initial project setup with README and ignore
This commit is contained in:
8
app/routes/__init__.py
Normal file
8
app/routes/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Routes package."""
|
||||
|
||||
from .optimization import router as optimization_router
|
||||
from .health import router as health_router
|
||||
from .cache import router as cache_router
|
||||
from .ml_admin import router as ml_router, web_router as ml_web_router
|
||||
|
||||
__all__ = ["optimization_router", "health_router", "cache_router", "ml_router", "ml_web_router"]
|
||||
79
app/routes/cache.py
Normal file
79
app/routes/cache.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""Cache management API endpoints."""
|
||||
|
||||
import logging
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from typing import Dict, Any
|
||||
|
||||
from app.services import cache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/v1/cache", tags=["Cache Management"])
|
||||
|
||||
|
||||
@router.get("/stats", response_model=Dict[str, Any])
|
||||
async def get_cache_stats():
|
||||
"""
|
||||
Get cache statistics.
|
||||
|
||||
Returns:
|
||||
- hits: Number of cache hits
|
||||
- misses: Number of cache misses
|
||||
- sets: Number of cache writes
|
||||
- total_keys: Current number of cached route keys
|
||||
- enabled: Whether Redis cache is enabled
|
||||
"""
|
||||
try:
|
||||
stats = cache.get_stats()
|
||||
# Calculate hit rate
|
||||
total_requests = stats.get("hits", 0) + stats.get("misses", 0)
|
||||
if total_requests > 0:
|
||||
stats["hit_rate"] = round(stats.get("hits", 0) / total_requests * 100, 2)
|
||||
else:
|
||||
stats["hit_rate"] = 0.0
|
||||
return stats
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cache stats: {e}")
|
||||
raise HTTPException(status_code=500, detail="Internal server error")
|
||||
|
||||
|
||||
@router.get("/keys")
|
||||
async def list_cache_keys(pattern: str = "routes:*"):
|
||||
"""
|
||||
List cache keys matching pattern.
|
||||
|
||||
- **pattern**: Redis key pattern (default: "routes:*")
|
||||
"""
|
||||
try:
|
||||
keys = cache.get_keys(pattern)
|
||||
return {
|
||||
"pattern": pattern,
|
||||
"count": len(keys),
|
||||
"keys": keys[:100] # Limit to first 100 for response size
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing cache keys: {e}")
|
||||
raise HTTPException(status_code=500, detail="Internal server error")
|
||||
|
||||
|
||||
@router.delete("/clear")
|
||||
async def clear_cache(pattern: str = "routes:*"):
|
||||
"""
|
||||
Clear cache keys matching pattern.
|
||||
|
||||
- **pattern**: Redis key pattern to delete (default: "routes:*")
|
||||
|
||||
[WARN] **Warning**: This will delete cached route optimizations!
|
||||
"""
|
||||
try:
|
||||
deleted_count = cache.delete(pattern)
|
||||
logger.info(f"Cleared {deleted_count} cache keys matching pattern: {pattern}")
|
||||
return {
|
||||
"pattern": pattern,
|
||||
"deleted_count": deleted_count,
|
||||
"message": f"Cleared {deleted_count} cache keys"
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing cache: {e}")
|
||||
raise HTTPException(status_code=500, detail="Internal server error")
|
||||
|
||||
98
app/routes/health.py
Normal file
98
app/routes/health.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""Professional health check endpoints."""
|
||||
|
||||
import time
|
||||
import logging
|
||||
import sys
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
from fastapi import APIRouter, Request
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/v1/health", tags=["Health"])
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
"""Health check response model."""
|
||||
status: str = Field(..., description="Service status")
|
||||
uptime_seconds: float = Field(..., description="Service uptime in seconds")
|
||||
version: str = Field("2.0.0", description="API version")
|
||||
timestamp: str = Field(..., description="Health check timestamp (ISO 8601)")
|
||||
request_id: Optional[str] = Field(None, description="Request ID for tracing")
|
||||
|
||||
|
||||
@router.get("/", response_model=HealthResponse)
|
||||
async def health_check(request: Request):
|
||||
"""
|
||||
Health check endpoint.
|
||||
|
||||
Returns the current health status of the API service including:
|
||||
- Service status (healthy/unhealthy)
|
||||
- Uptime in seconds
|
||||
- API version
|
||||
- Timestamp
|
||||
"""
|
||||
try:
|
||||
uptime = time.time() - start_time
|
||||
request_id = getattr(request.state, "request_id", None)
|
||||
|
||||
return HealthResponse(
|
||||
status="healthy",
|
||||
uptime_seconds=round(uptime, 2),
|
||||
version="2.0.0",
|
||||
timestamp=datetime.utcnow().isoformat() + "Z",
|
||||
request_id=request_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Health check failed: {e}", exc_info=True)
|
||||
request_id = getattr(request.state, "request_id", None)
|
||||
|
||||
return HealthResponse(
|
||||
status="unhealthy",
|
||||
uptime_seconds=0.0,
|
||||
version="2.0.0",
|
||||
timestamp=datetime.utcnow().isoformat() + "Z",
|
||||
request_id=request_id
|
||||
)
|
||||
|
||||
|
||||
@router.get("/ready")
|
||||
async def readiness_check(request: Request):
|
||||
"""
|
||||
Readiness check endpoint for load balancers.
|
||||
|
||||
Returns 200 if the service is ready to accept requests.
|
||||
"""
|
||||
try:
|
||||
# Check if critical services are available
|
||||
# Add your service health checks here
|
||||
|
||||
return {
|
||||
"status": "ready",
|
||||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||
"request_id": getattr(request.state, "request_id", None)
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Readiness check failed: {e}")
|
||||
return {
|
||||
"status": "not_ready",
|
||||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||
"request_id": getattr(request.state, "request_id", None)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/live")
|
||||
async def liveness_check(request: Request):
|
||||
"""
|
||||
Liveness check endpoint for container orchestration.
|
||||
|
||||
Returns 200 if the service is alive.
|
||||
"""
|
||||
return {
|
||||
"status": "alive",
|
||||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||
"request_id": getattr(request.state, "request_id", None)
|
||||
}
|
||||
286
app/routes/ml_admin.py
Normal file
286
app/routes/ml_admin.py
Normal file
@@ -0,0 +1,286 @@
|
||||
"""
|
||||
ML Admin API - rider-api
|
||||
|
||||
Endpoints:
|
||||
GET /api/v1/ml/status - DB record count, quality trend, model info
|
||||
GET /api/v1/ml/config - Current active hyperparameters (ML-tuned + defaults)
|
||||
POST /api/v1/ml/train - Trigger hypertuning immediately
|
||||
POST /api/v1/ml/reset - Reset config to factory defaults
|
||||
GET /api/v1/ml/reports - List past tuning reports
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
from fastapi import APIRouter, HTTPException, Body, Request
|
||||
from fastapi.responses import FileResponse, PlainTextResponse
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/v1/ml",
|
||||
tags=["ML Hypertuner"],
|
||||
responses={
|
||||
500: {"description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
|
||||
web_router = APIRouter(
|
||||
tags=["ML Monitor Web Dashboard"]
|
||||
)
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# GET /ml-ops
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@web_router.get("/ml-ops", summary="Visual ML monitoring dashboard")
|
||||
def ml_dashboard():
|
||||
"""Returns the beautiful HTML dashboard for visualizing ML progress."""
|
||||
path = os.path.join(os.getcwd(), "app/templates/ml_dashboard.html")
|
||||
if not os.path.isfile(path):
|
||||
raise HTTPException(status_code=404, detail=f"Dashboard template not found at {path}")
|
||||
return FileResponse(path)
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# GET /status
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.get("/status", summary="ML system status & quality trend")
|
||||
def ml_status():
|
||||
"""
|
||||
Returns:
|
||||
- How many assignment events are logged
|
||||
- Recent quality score trend (avg / min / max over last 20 calls)
|
||||
- Whether the model has been trained
|
||||
- Current hyperparameter source (ml_tuned vs defaults)
|
||||
"""
|
||||
from app.services.ml.ml_data_collector import get_collector
|
||||
from app.services.ml.ml_hypertuner import get_hypertuner
|
||||
|
||||
try:
|
||||
collector = get_collector()
|
||||
tuner = get_hypertuner()
|
||||
|
||||
record_count = collector.count_records()
|
||||
quality_trend = collector.get_recent_quality_trend(last_n=50)
|
||||
model_info = tuner.get_model_info()
|
||||
|
||||
from app.services.ml.behavior_analyzer import get_analyzer
|
||||
b_analyzer = get_analyzer()
|
||||
|
||||
from app.config.dynamic_config import get_config
|
||||
cfg = get_config()
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"db_records": record_count,
|
||||
"ready_to_train": record_count >= 30,
|
||||
"quality_trend": quality_trend,
|
||||
"hourly_stats": collector.get_hourly_stats(),
|
||||
"quality_histogram": collector.get_quality_histogram(),
|
||||
"strategy_comparison": collector.get_strategy_comparison(),
|
||||
"zone_stats": collector.get_zone_stats(),
|
||||
"behavior": b_analyzer.get_info() if hasattr(b_analyzer, 'get_info') else {},
|
||||
"config": cfg.get_all(),
|
||||
"model": model_info,
|
||||
"message": (
|
||||
f"Collecting data - need {max(0, 30 - record_count)} more records to train."
|
||||
if record_count < 30
|
||||
else "Ready to train! Call POST /api/v1/ml/train"
|
||||
if not model_info["model_trained"]
|
||||
else "Model trained and active."
|
||||
)
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Status failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# GET /config
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.get("/config", summary="Current active hyperparameter values")
|
||||
def ml_config():
|
||||
"""
|
||||
Returns every hyperparameter currently in use by the system.
|
||||
Values marked 'ml_tuned' were set by the ML model.
|
||||
Values marked 'default' are factory defaults (not yet tuned).
|
||||
"""
|
||||
from app.config.dynamic_config import get_config, DEFAULTS
|
||||
|
||||
try:
|
||||
cfg = get_config()
|
||||
all_values = cfg.get_all()
|
||||
cached_keys = set(cfg._cache.keys())
|
||||
|
||||
annotated = {}
|
||||
for k, v in all_values.items():
|
||||
annotated[k] = {
|
||||
"value": v,
|
||||
"source": "ml_tuned" if k in cached_keys else "default",
|
||||
}
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"hyperparameters": annotated,
|
||||
"total_params": len(annotated),
|
||||
"ml_tuned_count": sum(1 for x in annotated.values() if x["source"] == "ml_tuned"),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Config fetch failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@router.patch("/config", summary="Update specific ML configuration defaults")
|
||||
def ml_config_patch(payload: dict = Body(...)):
|
||||
"""Allows updating any active parameter via JSON overrides. e.g. \{ \"ml_strategy\": \"balanced\" \}"""
|
||||
from app.config.dynamic_config import get_config
|
||||
try:
|
||||
cfg = get_config()
|
||||
cfg.set_bulk(payload, source="ml_admin")
|
||||
return {"status": "ok"}
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Config patch failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# POST /train
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.post("/train", summary="Trigger XGBoost training + Optuna hyperparameter search")
|
||||
def ml_train(
|
||||
n_trials: int = Body(default=100, embed=True, ge=10, le=500,
|
||||
description="Number of Optuna trials (10500)"),
|
||||
min_records: int = Body(default=30, embed=True, ge=10,
|
||||
description="Minimum DB records required")
|
||||
):
|
||||
"""
|
||||
Runs the full hypertuning pipeline:
|
||||
1. Load logged assignment data from DB
|
||||
2. Train XGBoost surrogate model
|
||||
3. Run Optuna TPE search ({n_trials} trials)
|
||||
4. Write optimal params to DynamicConfig
|
||||
|
||||
The AssignmentService picks up new params within 5 minutes (auto-reload).
|
||||
"""
|
||||
from app.services.ml.ml_hypertuner import get_hypertuner
|
||||
|
||||
try:
|
||||
logger.info(f"[ML API] Hypertuning triggered: n_trials={n_trials}, min_records={min_records}")
|
||||
tuner = get_hypertuner()
|
||||
result = tuner.run(n_trials=n_trials, min_training_records=min_records)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Training failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# POST /reset
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.post("/reset", summary="Reset all hyperparameters to factory defaults")
|
||||
def ml_reset():
|
||||
"""
|
||||
Wipes all ML-tuned config values and reverts every parameter to the
|
||||
original hardcoded defaults. Useful if the model produced bad results.
|
||||
"""
|
||||
from app.config.dynamic_config import get_config
|
||||
|
||||
try:
|
||||
get_config().reset_to_defaults()
|
||||
return {
|
||||
"status": "ok",
|
||||
"message": "All hyperparameters reset to factory defaults.",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Reset failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# POST /strategy
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.post("/strategy", summary="Change the AI Optimization Prompt/Strategy")
|
||||
def ml_strategy(strategy: str = Body(default="balanced", embed=True)):
|
||||
"""
|
||||
Changes the mathematical objective of the AI.
|
||||
Choices: 'balanced', 'fuel_saver', 'aggressive_speed', 'zone_strict'
|
||||
|
||||
Historical data is NOT wiped. Instead, the AI dynamically recalculates
|
||||
the quality score of all past events using the new strategy rules.
|
||||
"""
|
||||
from app.config.dynamic_config import get_config
|
||||
import sqlite3
|
||||
|
||||
valid = ["balanced", "fuel_saver", "aggressive_speed", "zone_strict"]
|
||||
if strategy not in valid:
|
||||
raise HTTPException(400, f"Invalid strategy. Choose from {valid}")
|
||||
|
||||
try:
|
||||
get_config().set("ml_strategy", strategy)
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"message": f"Strategy changed to '{strategy}'. Historical AI data will be mathematically repurposed to train towards this new goal.",
|
||||
"strategy": strategy
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Strategy change failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# GET /reports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.get("/reports", summary="List past hypertuning reports")
|
||||
def ml_reports():
|
||||
"""Returns the last 10 tuning reports (JSON files in ml_data/reports/)."""
|
||||
try:
|
||||
report_dir = "ml_data/reports"
|
||||
if not os.path.isdir(report_dir):
|
||||
return {"status": "ok", "reports": [], "message": "No reports yet."}
|
||||
|
||||
files = sorted(
|
||||
[f for f in os.listdir(report_dir) if f.endswith(".json")],
|
||||
reverse=True
|
||||
)[:10]
|
||||
|
||||
reports = []
|
||||
for fname in files:
|
||||
path = os.path.join(report_dir, fname)
|
||||
try:
|
||||
with open(path) as f:
|
||||
reports.append(json.load(f))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {"status": "ok", "reports": reports, "count": len(reports)}
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Reports fetch failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# GET /export
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@router.get("/export", summary="Export all records as CSV")
|
||||
def ml_export():
|
||||
"""Generates a CSV string containing all rows in the assignment_ml_log table."""
|
||||
try:
|
||||
from app.services.ml.ml_data_collector import get_collector
|
||||
csv_data = get_collector().export_csv()
|
||||
response = PlainTextResponse(content=csv_data, media_type="text/csv")
|
||||
response.headers["Content-Disposition"] = 'attachment; filename="ml_export.csv"'
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"[ML API] Export failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
364
app/routes/optimization.py
Normal file
364
app/routes/optimization.py
Normal file
@@ -0,0 +1,364 @@
|
||||
"""Provider payload optimization endpoints."""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from fastapi import APIRouter, Request, Depends, status, HTTPException, Query
|
||||
|
||||
from app.controllers.route_controller import RouteController
|
||||
from app.core.exceptions import APIException
|
||||
from app.core.arrow_utils import save_optimized_route_parquet
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/v1/optimization",
|
||||
tags=["Route Optimization"],
|
||||
responses={
|
||||
400: {"description": "Bad request - Invalid input parameters"},
|
||||
422: {"description": "Validation error - Request validation failed"},
|
||||
500: {"description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
|
||||
def get_route_controller() -> RouteController:
|
||||
"""Dependency injection for route controller."""
|
||||
return RouteController()
|
||||
|
||||
|
||||
# Legacy single-route endpoint removed; provider flow only.
|
||||
@router.post(
|
||||
"/createdeliveries",
|
||||
status_code=status.HTTP_200_OK,
|
||||
summary="Optimize provider payload (forwarding paused)",
|
||||
description="""
|
||||
Accepts the provider's orders array, reorders it using greedy nearest-neighbor, adds only:
|
||||
- step (1..N)
|
||||
- previouskms (distance from previous stop in km)
|
||||
- cumulativekms (total distance so far in km)
|
||||
- actualkms (direct pickup-to-delivery distance)
|
||||
|
||||
Forwarding is temporarily paused: returns the optimized array in the response.
|
||||
""",
|
||||
responses={
|
||||
200: {
|
||||
"description": "Upstream response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": {"code": 200, "details": [], "message": "Success", "status": True}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
async def provider_optimize_forward(
|
||||
body: list[dict],
|
||||
controller: RouteController = Depends(get_route_controller)
|
||||
):
|
||||
"""
|
||||
Accept provider JSON array, reorder by greedy nearest-neighbor, annotate each item with:
|
||||
- step (1..N)
|
||||
- previouskms (km from previous point)
|
||||
- cumulativekms (km so far)
|
||||
- actualkms (pickup to delivery distance)
|
||||
Then forward the optimized array to the external API and return only its response.
|
||||
"""
|
||||
try:
|
||||
url = "https://jupiter.nearle.app/live/api/v1/deliveries/createdeliveries"
|
||||
result = await controller.optimize_and_forward_provider_payload(body, url)
|
||||
|
||||
# Performance Logging: Save a Parquet Snapshot (Async-friendly backup)
|
||||
try:
|
||||
os.makedirs("data/snapshots", exist_ok=True)
|
||||
snapshot_path = f"data/snapshots/route_{int(time.time())}.parquet"
|
||||
save_optimized_route_parquet(body, snapshot_path)
|
||||
logger.info(f"Apache Arrow: Snapshot saved to {snapshot_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not save Arrow snapshot: {e}")
|
||||
|
||||
return result
|
||||
except APIException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in provider_optimize_forward: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail="Internal server error")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/createdeliveries",
|
||||
summary="Usage info for provider optimize forward"
|
||||
)
|
||||
async def provider_optimize_forward_info():
|
||||
"""Return usage info; this endpoint accepts POST only for processing."""
|
||||
return {
|
||||
"message": "Use POST with a JSON array of orders to optimize and forward.",
|
||||
"method": "POST",
|
||||
"path": "/api/v1/optimization/provider-optimize-forward"
|
||||
}
|
||||
|
||||
|
||||
@router.post(
|
||||
"/riderassign",
|
||||
status_code=status.HTTP_200_OK,
|
||||
summary="Assign created orders to active riders",
|
||||
description="""
|
||||
Assigns orders to riders based on kitchen preferences, proximity, and load.
|
||||
|
||||
- If a payload of orders is provided, processes those.
|
||||
- If payload is empty, fetches all 'created' orders from the external API.
|
||||
- Fetches active riders and matches them.
|
||||
""",
|
||||
responses={
|
||||
200: {
|
||||
"description": "Assignment Result",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": {"code": 200, "details": {"1234": [{"orderid": "..."}]}, "message": "Success", "status": True}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
async def assign_orders_to_riders(
|
||||
request: Request,
|
||||
body: list[dict] = None,
|
||||
resuffle: bool = Query(False),
|
||||
reshuffle: bool = Query(False),
|
||||
rehuffle: bool = Query(False),
|
||||
hypertuning_params: str = None
|
||||
):
|
||||
"""
|
||||
Smart assignment of orders to riders.
|
||||
"""
|
||||
from app.services.rider.get_active_riders import fetch_active_riders, fetch_created_orders, fetch_rider_pricing
|
||||
from app.services.core.assignment_service import AssignmentService
|
||||
from app.services.routing.route_optimizer import RouteOptimizer
|
||||
from app.services.routing.realistic_eta_calculator import RealisticETACalculator
|
||||
from datetime import datetime, timedelta
|
||||
from dateutil.parser import parse as parse_date
|
||||
import asyncio
|
||||
|
||||
eta_calculator = RealisticETACalculator()
|
||||
|
||||
try:
|
||||
# Check if any variant is present in query params (flag-style) or explicitly true
|
||||
q_params = request.query_params
|
||||
do_reshuffle = any(k in q_params for k in ["reshuffle", "resuffle", "rehuffle"]) or \
|
||||
resuffle or reshuffle or rehuffle
|
||||
|
||||
# 1. Fetch Riders and Pricing
|
||||
riders_task = fetch_active_riders()
|
||||
pricing_task = fetch_rider_pricing()
|
||||
|
||||
riders, pricing = await asyncio.gather(riders_task, pricing_task)
|
||||
|
||||
# Determine pricing (Default: 30 base + 2.5/km)
|
||||
fuel_charge = 2.5
|
||||
base_pay = 30.0
|
||||
if pricing:
|
||||
shift_1 = next((p for p in pricing if p.get("shiftid") == 1), None)
|
||||
if shift_1:
|
||||
fuel_charge = float(shift_1.get("fuelcharge", 2.5))
|
||||
base_pay = float(shift_1.get("basepay") or shift_1.get("base_pay") or 30.0)
|
||||
|
||||
# 2. Determine Orders Source
|
||||
orders = body
|
||||
if not orders:
|
||||
logger.info("No payload provided, fetching created orders from external API.")
|
||||
orders = await fetch_created_orders()
|
||||
else:
|
||||
logger.info(f"Processing {len(orders)} orders from payload.")
|
||||
|
||||
if not orders:
|
||||
return {
|
||||
"code": 200,
|
||||
"details": {},
|
||||
"message": "No orders found to assign.",
|
||||
"status": True,
|
||||
"meta": {
|
||||
"active_riders_count": len(riders)
|
||||
}
|
||||
}
|
||||
|
||||
# 3. Run Assignment (AssignmentService)
|
||||
# -- Per-request strategy override --
|
||||
from app.config.dynamic_config import get_config
|
||||
_cfg = get_config()
|
||||
_original_strategy = None
|
||||
|
||||
valid_strategies = ["balanced", "fuel_saver", "aggressive_speed", "zone_strict"]
|
||||
if hypertuning_params and hypertuning_params in valid_strategies:
|
||||
_original_strategy = _cfg.get("ml_strategy", "balanced")
|
||||
_cfg._cache["ml_strategy"] = hypertuning_params
|
||||
logger.info(f"[HYPERTUNE] Per-request strategy override: {hypertuning_params}")
|
||||
|
||||
service = AssignmentService()
|
||||
assignments, unassigned_orders = await service.assign_orders(
|
||||
riders=riders,
|
||||
orders=orders,
|
||||
fuel_charge=fuel_charge,
|
||||
base_pay=base_pay,
|
||||
reshuffle=do_reshuffle
|
||||
)
|
||||
|
||||
# Restore original strategy after this call
|
||||
if _original_strategy is not None:
|
||||
_cfg._cache["ml_strategy"] = _original_strategy
|
||||
|
||||
if do_reshuffle:
|
||||
logger.info("[RESHUFFLE] Retry mode active - exploring alternative rider assignments.")
|
||||
|
||||
# 4. Optimize Routes for Each Rider and Flatten Response
|
||||
optimizer = RouteOptimizer()
|
||||
flat_orders_list = []
|
||||
|
||||
# Prepare tasks for parallel execution
|
||||
# We need to store context (rider_id) to map results back
|
||||
optimization_tasks = []
|
||||
task_contexts = []
|
||||
|
||||
for rider_id, rider_orders in assignments.items():
|
||||
if not rider_orders:
|
||||
continue
|
||||
|
||||
# Align with createdeliveries model: Always optimize from the Pickup/Kitchen location.
|
||||
# This prevents route reversal if the rider is on the "far" side of the deliveries.
|
||||
# The rider's current location (rlat/rlon) is ignored for sequence optimization
|
||||
# to ensure the logical flow (Kitchen -> Stop 1 -> Stop 2 -> Stop 3) is followed.
|
||||
start_coords = None
|
||||
|
||||
# Add to task list
|
||||
optimization_tasks.append(
|
||||
optimizer.optimize_provider_payload(rider_orders, start_coords=start_coords)
|
||||
)
|
||||
task_contexts.append(rider_id)
|
||||
|
||||
total_assigned = 0
|
||||
|
||||
# Execute all optimizations in parallel
|
||||
# This dramatically reduces time from Sum(RiderTimes) to Max(RiderTime)
|
||||
if optimization_tasks:
|
||||
results = await asyncio.gather(*optimization_tasks)
|
||||
|
||||
# Create a lookup for rider details
|
||||
rider_info_map = {}
|
||||
for r in riders:
|
||||
# Use string conversion for robust ID matching
|
||||
r_id = str(r.get("userid") or r.get("_id", ""))
|
||||
if r_id:
|
||||
rider_info_map[r_id] = {
|
||||
"name": r.get("username", ""),
|
||||
"contactno": r.get("contactno", "")
|
||||
}
|
||||
|
||||
# Process results matching them back to riders
|
||||
for stored_rider_id, optimized_route in zip(task_contexts, results):
|
||||
r_id_str = str(stored_rider_id)
|
||||
r_info = rider_info_map.get(r_id_str, {})
|
||||
rider_name = r_info.get("name", "")
|
||||
rider_contact = r_info.get("contactno", "")
|
||||
|
||||
# Calculate total distance for this rider
|
||||
total_rider_kms = 0
|
||||
if optimized_route:
|
||||
# Usually the last order has the max cumulative kms if steps are 1..N
|
||||
try:
|
||||
total_rider_kms = max([float(o.get("cumulativekms", 0)) for o in optimized_route])
|
||||
except:
|
||||
total_rider_kms = sum([float(o.get("actualkms", o.get("kms", 0))) for o in optimized_route])
|
||||
|
||||
for order in optimized_route:
|
||||
order["userid"] = stored_rider_id
|
||||
order["username"] = rider_name
|
||||
# Populate the specific fields requested by the user
|
||||
order["rider"] = rider_name
|
||||
order["ridercontactno"] = rider_contact
|
||||
order["riderkms"] = str(round(total_rider_kms, 2))
|
||||
|
||||
# --- DYNAMIC ETA COMPUTATION -----------------------------
|
||||
# Try various cases and names for pickup slot
|
||||
pickup_slot_str = (
|
||||
order.get("pickupSlot") or
|
||||
order.get("pickupslot") or
|
||||
order.get("pickup_slot") or
|
||||
order.get("pickuptime")
|
||||
)
|
||||
|
||||
if pickup_slot_str:
|
||||
# Find the actual travel distance for THIS specific order
|
||||
# cumulativekms represents distance from pickup to this delivery stop
|
||||
dist_km = float(order.get("cumulativekms") or order.get("actualkms", order.get("kms", 0)))
|
||||
step = int(order.get("step", 1))
|
||||
order_type = order.get("ordertype", "Economy")
|
||||
|
||||
try:
|
||||
# Robust date parsing (handles almost any format magically)
|
||||
pickup_time = parse_date(str(pickup_slot_str))
|
||||
|
||||
eta_mins = eta_calculator.calculate_eta(
|
||||
distance_km=dist_km,
|
||||
is_first_order=(step == 1),
|
||||
order_type=order_type,
|
||||
time_of_day="normal"
|
||||
)
|
||||
expected_time = pickup_time + timedelta(minutes=eta_mins)
|
||||
|
||||
# Format output as requested: "2026-03-24 08:25 AM"
|
||||
order["expectedDeliveryTime"] = expected_time.strftime("%Y-%m-%d %I:%M %p")
|
||||
order["transitMinutes"] = eta_mins
|
||||
order["calculationDistanceKm"] = round(dist_km, 2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not calculate ETA from pickupSlot '{pickup_slot_str}': {e}")
|
||||
# ---------------------------------------------------------
|
||||
|
||||
flat_orders_list.append(order)
|
||||
total_assigned += len(optimized_route)
|
||||
|
||||
# 5. Zone Processing
|
||||
from app.services.routing.zone_service import ZoneService
|
||||
zone_service = ZoneService()
|
||||
zone_data = zone_service.group_by_zones(flat_orders_list, unassigned_orders, fuel_charge=fuel_charge, base_pay=base_pay)
|
||||
|
||||
zones_structure = zone_data["detailed_zones"]
|
||||
zone_analysis = zone_data["zone_analysis"]
|
||||
|
||||
return {
|
||||
"code": 200,
|
||||
"zone_summary": zone_analysis, # High-level zone metrics
|
||||
"zones": zones_structure, # Detailed data
|
||||
"details": flat_orders_list, # Flat list
|
||||
"message": "Success",
|
||||
"status": True,
|
||||
"meta": {
|
||||
"total_orders": len(orders),
|
||||
"utilized_riders": len([rid for rid, rl in assignments.items() if rl]),
|
||||
"active_riders_pool": len(riders),
|
||||
"assigned_orders": total_assigned,
|
||||
"unassigned_orders": len(unassigned_orders),
|
||||
"total_profit": round(sum(z["total_profit"] for z in zone_analysis), 2),
|
||||
"fuel_charge_base": fuel_charge,
|
||||
"unassigned_details": [
|
||||
{
|
||||
"orderid": o.get("orderid") or o.get("_id"),
|
||||
"reason": o.get("unassigned_reason", "Unknown capacity/proximity issue")
|
||||
} for o in unassigned_orders
|
||||
],
|
||||
"distribution_summary": {rid: len(rl) for rid, rl in assignments.items() if rl},
|
||||
"resuffle_mode": do_reshuffle,
|
||||
"hypertuning_params": hypertuning_params or "default"
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in rider assignment: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail="Internal server error during assignment")
|
||||
|
||||
finally:
|
||||
# -- Fire ML training trigger (non-blocking) -----------------------
|
||||
# Runs AFTER response is ready. Every 10th call kicks off a
|
||||
# background thread that retrains the model. API is never blocked.
|
||||
try:
|
||||
from app.main import trigger_training_if_due
|
||||
trigger_training_if_due()
|
||||
except Exception:
|
||||
pass # Never crash the endpoint due to ML trigger
|
||||
Reference in New Issue
Block a user