This commit is contained in:
2026-04-07 12:44:06 +05:30
parent 7f81fc64c1
commit 5dd4196014
49 changed files with 2795 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

118
pages/mailer.py Normal file
View File

@@ -0,0 +1,118 @@
import streamlit as st
import pandas as pd
from datetime import date, datetime
from zoneinfo import ZoneInfo
from app_core.services.mailer_service import MailerService
def render_page():
if st.session_state.get("auth_user") is None:
st.warning("Please login to continue.")
st.stop()
st.markdown("## Mailer")
st.caption("Automated daily email sending logs and status.")
service = MailerService()
ist = ZoneInfo("Asia/Kolkata")
st.markdown("### Scheduler Status")
# Check if scheduler container is running by checking if daily report was sent today
today_logs = [log for log in service.recent_logs(limit=1000) if log.get('date_for') == str(date.today())]
daily_report_sent_today = any('Daily Report' in str(log.get('subject', '')) for log in today_logs)
if daily_report_sent_today:
st.success("Scheduler is running - Daily report already sent today")
else:
# Check if it's past 8 PM IST today
now_ist = datetime.now(ist)
eight_pm_today = now_ist.replace(hour=20, minute=0, second=0, microsecond=0)
if now_ist >= eight_pm_today:
st.warning("Scheduler is running - Waiting for next scheduled run (8:00 PM IST daily)")
else:
next_run_ist = eight_pm_today
st.success(f"Scheduler is running - Next report will be sent at {next_run_ist.strftime('%B %d, %Y at %I:%M %p IST')}")
st.markdown("---")
# Show system status
st.markdown("### System Status")
col1, col2, col3 = st.columns(3)
with col1:
st.metric("Total Emails Sent", len([log for log in service.recent_logs(limit=1000) if log.get('status') == 'sent']))
with col2:
failed_count = len([log for log in service.recent_logs(limit=1000) if log.get('status') == 'failed'])
st.metric("Failed Sends", failed_count, delta=f"-{failed_count}" if failed_count > 0 else None)
with col3:
today_logs = [log for log in service.recent_logs(limit=1000) if log.get('date_for') == str(date.today())]
st.metric("Today's Sends", len(today_logs))
st.markdown("---")
# Manual trigger section
st.markdown("### Manual Controls")
mcol1, mcol2, mcol3 = st.columns([2, 2, 3])
with mcol1:
target_date = st.date_input("Report Date", value=date.today())
with mcol2:
st.write("") # Spacer
force_resend = st.checkbox("Force Resend", value=True, help="Send the report even if it was already sent for this date.")
with mcol3:
st.write("") # Spacer
if st.button("Send Report Now", type="primary", use_container_width=True):
with st.spinner(f"Sending report for {target_date}..."):
try:
from app_core.services.daily_report import main as run_daily_report
# Pass the selected date and force flag
result = run_daily_report(for_date=str(target_date), force=force_resend)
if result == 0:
st.success(f"Report for {target_date} sent successfully!")
st.rerun()
else:
st.error(f"Failed to send report (exit code: {result})")
except Exception as e:
st.error(f"Error: {str(e)}")
st.caption("Select a date to manually trigger or re-trigger the daily report email. This is useful for reconciliations.")
st.markdown("---")
# Show email logs table
st.markdown("### Email Logs")
logs = service.recent_logs(limit=100)
if not logs:
st.info("No email logs yet. Automated emails will appear here once sent.")
else:
df_logs = pd.DataFrame(logs)
col_map = {
"id": "ID",
"sent_at": "Sent At",
"recipients": "Recipients",
"subject": "Subject",
"status": "Status",
"error": "Error",
"date_for": "Report Date",
}
df_logs = df_logs[["id", "sent_at", "date_for", "recipients", "subject", "status", "error"]]
df_logs = df_logs.rename(columns=col_map)
# Add status styling
def style_status(val):
if val == 'sent':
return 'background-color: #D1FAE5; color: #065F46; font-weight: 600;'
elif val == 'failed':
return 'background-color: #FEE2E2; color: #991B1B; font-weight: 600;'
return ''
styled_logs = df_logs.style.map(style_status, subset=['Status'])
st.dataframe(styled_logs, use_container_width=True, height=400)
# trigger reload

189
pages/mappings.py Normal file
View File

@@ -0,0 +1,189 @@
import streamlit as st
import pandas as pd
from app_core.services.mappings_service import MappingsService
from app_core.config.settings import STORES
def render_page():
if st.session_state.get("auth_user") is None:
st.warning("Please login to continue.")
st.stop()
st.markdown("""
<style>
.stApp { font-size: 1.05rem; }
[data-testid="stDataEditor"] { font-size: 1.05rem !important; }
h2 { font-weight: 700 !important; letter-spacing: -0.02em !important; }
h3 { font-weight: 600 !important; color: #6366f1 !important; margin-top: 1.2rem !important; }
.store-pill {
display: inline-block;
padding: 4px 14px;
border-radius: 20px;
font-size: 0.85rem;
font-weight: 600;
margin: 3px 4px;
background: linear-gradient(135deg, #6366f1, #8b5cf6);
color: white;
}
</style>
""", unsafe_allow_html=True)
st.markdown("## 📋 Triumph Debtor Mappings")
st.caption("Manage POS account sale mappings to Triumph debtor codes — filtered by store.")
service = MappingsService()
all_mappings = service.get_all_mappings()
# Store labels from config — used only for the "Add New" dropdown
store_labels = [s["label"] for s in STORES]
tab1, tab2 = st.tabs(["🔍 View & Search", " Add New Mapping"])
# ── TAB 1: View & Edit ────────────────────────────────────────────────────
with tab1:
st.markdown("### 🔍 Current Mappings")
if not all_mappings:
st.info("No mappings found. Use the ' Add New Mapping' tab to create one.")
else:
# Build dataframe from raw DB values
data = [
{
"ID": m.id,
"POS Code": m.code or "",
"Account Name": m.name or "",
"Triumph Code": m.dbmacc or "",
"Outlet": (m.outlet or "").strip(),
"Created At": m.created_at.strftime("%Y-%m-%d %H:%M") if m.created_at else "",
"Updated At": m.updated_at.strftime("%Y-%m-%d %H:%M") if m.updated_at else "",
}
for m in all_mappings
]
df_full = pd.DataFrame(data)
# Distinct outlet names actually in DB
distinct_outlets = sorted([
o for o in df_full["Outlet"].dropna().unique().tolist() if o.strip()
])
f1, f2 = st.columns([1, 2])
with f1:
selected_store = st.selectbox(
"🏪 Filter by Store",
options=["All Stores"] + distinct_outlets,
index=0,
)
with f2:
search_query = st.text_input(
"🔎 Search",
placeholder="POS Code, Account Name, or Triumph Code…",
)
df = df_full.copy()
if selected_store != "All Stores":
df = df[df["Outlet"] == selected_store]
if search_query:
q = search_query
df = df[
df["POS Code"].str.contains(q, case=False, na=False) |
df["Account Name"].str.contains(q, case=False, na=False) |
df["Triumph Code"].str.contains(q, case=False, na=False)
]
store_label = selected_store if selected_store != "All Stores" else "all stores"
st.caption(f"Showing **{len(df)}** mapping(s) for **{store_label}**.")
st.markdown("#### 📝 Edit Mappings")
st.caption("Double-click any editable cell to modify. Changes are saved when you press Enter.")
st.data_editor(
df,
hide_index=True,
use_container_width=True,
num_rows="dynamic",
disabled=["ID", "Created At", "Updated At"],
column_config={
"ID": st.column_config.NumberColumn(format="%d", width="small"),
"POS Code": st.column_config.TextColumn(max_chars=50, width="medium"),
"Account Name": st.column_config.TextColumn(max_chars=255, width="large"),
"Triumph Code": st.column_config.TextColumn(max_chars=50, width="medium"),
"Outlet": st.column_config.TextColumn(max_chars=255, width="large"),
"Created At": st.column_config.TextColumn(width="medium"),
"Updated At": st.column_config.TextColumn(width="medium"),
},
key="mapping_editor_v2",
)
if st.session_state.get("mapping_editor_v2"):
edited_rows = st.session_state.mapping_editor_v2.get("edited_rows", {})
deleted_rows = st.session_state.mapping_editor_v2.get("deleted_rows", [])
if edited_rows or deleted_rows:
changes_made = False
for idx, patch in edited_rows.items():
mapping_id = df.iloc[idx]["ID"]
row = df.iloc[idx]
new_code = patch.get("POS Code", row["POS Code"])
new_name = patch.get("Account Name", row["Account Name"])
new_triumph = patch.get("Triumph Code", row["Triumph Code"])
new_outlet = patch.get("Outlet", row["Outlet"])
if service.update_mapping(mapping_id, new_code, new_name, new_triumph, new_outlet):
changes_made = True
for idx in deleted_rows:
if service.delete_mapping(df.iloc[idx]["ID"]):
changes_made = True
if changes_made:
st.toast("✅ Mappings updated and synced!", icon="🚀")
st.rerun()
# ── TAB 2: Add New ────────────────────────────────────────────────────────
with tab2:
st.markdown("### Create New Mapping")
st.caption("All fields are mandatory.")
with st.form("new_mapping_form", clear_on_submit=True):
c1, c2 = st.columns(2)
with c1:
new_code = st.text_input("POS Code", placeholder="e.g. 0273",
help="Unique identifier from your POS system.")
new_name = st.text_input("Account Sale Name", placeholder="e.g. Suriya",
help="The name as it appears on account invoices.")
with c2:
new_triumph = st.text_input("Triumph Debtor Code (DBMACC#)", placeholder="e.g. SURI0273",
help="The debtor code in Triumph ERP.")
new_outlet = st.selectbox(
"Store / Outlet",
options=["Select a Store"] + store_labels,
index=0,
help="Select the store this mapping belongs to.",
)
st.markdown("<br>", unsafe_allow_html=True)
if st.form_submit_button("Create Mapping", type="primary", use_container_width=True):
if not all([new_code.strip(), new_name.strip(), new_triumph.strip()]) or new_outlet == "— Select a Store —":
st.error("⚠️ All fields are required — including selecting a store.")
else:
service.create_mapping(new_code.strip(), new_name.strip(), new_triumph.strip(), new_outlet)
st.success(f"✅ Mapping for **{new_name}** created under **{new_outlet}**!")
st.balloons()
st.rerun()
st.markdown("---")
with st.expander("📖 Field definitions"):
st.write("""
- **POS Code** — Unique identifier from your POS system.
- **Account Name** — Name used on account sales invoices.
- **Triumph Code (DBMACC#)** — Corresponding debtor code in Triumph ERP.
- **Store / Outlet** — Store this mapping is assigned to.
*Any change here is immediately picked up by the background event processor.*
""")
if __name__ == "__main__":
render_page()

380
pages/see_logs.py Normal file
View File

@@ -0,0 +1,380 @@
import streamlit as st
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from datetime import datetime, date, timedelta
from app_core.db.database import engine
from sqlalchemy import text
from app_core.ui.layout import render_store_selector
@st.cache_data(ttl=300) # Cache for 5 minutes
def _load_available_dates(tenant_id: int, days_back: int = 60):
"""Load available dates with data for the tenant."""
with engine.connect() as conn:
dates_df = pd.read_sql(
'SELECT "created_at"::date AS d, COUNT(*) AS c\n'
'FROM "tenantpostings"\n'
'WHERE "created_at" >= (CURRENT_DATE - INTERVAL \'60 days\') AND "tenant_id" = %(t)s\n'
'GROUP BY d\n'
'ORDER BY d DESC',
conn,
params={"t": tenant_id},
)
if not pd.api.types.is_datetime64_any_dtype(dates_df['d']):
dates_df['d'] = pd.to_datetime(dates_df['d'], errors='coerce')
return dates_df
@st.cache_data(ttl=300) # Cache for 5 minutes
def _load_daily_data(tenant_id: int, target_date: date):
"""Load daily data for a specific tenant and date."""
day_sql = (
'SELECT * FROM "tenantpostings" '
'WHERE "created_at"::date = %(d)s AND "tenant_id" = %(t)s '
'ORDER BY "id" DESC '
'LIMIT 10000'
)
with engine.connect() as conn:
df = pd.read_sql(day_sql, conn, params={"d": target_date, "t": tenant_id})
# De-duplicate by triumph_event to avoid logical doubling
if not df.empty and 'triumph_event' in df.columns:
has_event = df['triumph_event'].fillna('').astype(str).str.strip() != ''
df_with_events = df[has_event].sort_values(['processing_type', 'triumph_event', 'id'], ascending=[True, True, False]).drop_duplicates(subset=['processing_type', 'triumph_event'], keep='first')
df_no_events = df[~has_event]
df = pd.concat([df_with_events, df_no_events]).sort_values('id', ascending=False)
return df
@st.cache_data(ttl=300) # Cache for 5 minutes
def _load_trend_data(tenant_id: int, days_back: int = 30):
"""Load trend data for charts."""
with engine.connect() as conn:
totals_agg = pd.read_sql(
'SELECT "created_at"::date AS d, SUM("total_amount") AS total\n'
'FROM "tenantpostings"\n'
'WHERE "created_at" >= (CURRENT_DATE - INTERVAL \'30 days\') AND "tenant_id" = %(t)s\n'
" AND UPPER(COALESCE(\"processing_type\", '')) = 'JOURNAL'\n"
'GROUP BY d\n'
'ORDER BY d ASC',
conn,
params={"t": tenant_id},
)
if not pd.api.types.is_datetime64_any_dtype(totals_agg['d']):
totals_agg['d'] = pd.to_datetime(totals_agg['d'], errors='coerce')
return totals_agg
def _normalize_name(name: str) -> str:
return "".join(ch for ch in name.lower() if ch.isalnum())
def _build_display_map(df: pd.DataFrame) -> dict[str, str]:
overrides = {
"triumph_status": "Status",
"triumph_event": "Event",
"outlet_name": "Outlet Name",
"tenant_id": "Tenant ID",
"processing_type": "Processing Type",
"total_amount": "Total Amount",
"created_at": "Date",
"updated_at": "Updated At",
"id": "SNo",
}
display_map: dict[str, str] = {}
used: set[str] = set()
for col in df.columns:
key = col.lower()
if key in overrides:
display_name = overrides[key]
else:
# Convert snake_case to Title Case
display_name = col.replace("_", " ").title()
# Ensure unique display names
final_name = display_name
counter = 1
while final_name in used:
final_name = f"{display_name} ({counter})"
counter += 1
display_map[col] = final_name
used.add(final_name)
return display_map
def _pick_existing_columns(df: pd.DataFrame, names: list[str]) -> list[str]:
"""Pick columns that exist in the DataFrame from a list of names."""
found = []
for name in names:
if name in df.columns:
found.append(name)
return found
def _format_date_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Format date columns to show only date part"""
df_formatted = df.copy()
for col in df_formatted.columns:
if 'created_at' in col.lower() or 'date' in col.lower():
if pd.api.types.is_datetime64_any_dtype(df_formatted[col]):
df_formatted[col] = df_formatted[col].dt.date
else:
# Try to convert to datetime first
try:
df_formatted[col] = pd.to_datetime(df_formatted[col]).dt.date
except:
pass
return df_formatted
def _journal_total(frame: pd.DataFrame) -> float:
"""Sum total_amount for JOURNAL rows only."""
if frame is None or frame.empty or 'total_amount' not in frame.columns:
return 0.0
# We assume 'frame' is already de-duplicated by triumph_event at load time
if 'processing_type' in frame.columns:
mask = frame['processing_type'].astype(str).str.upper() == 'JOURNAL'
frame = frame[mask]
return float(frame['total_amount'].sum()) if not frame.empty else 0.0
def _stat_card(title: str, value: str, color: str, icon: str) -> str:
return f"""
<div style="
background: {color}15;
border: 1px solid {color}30;
border-radius: 12px;
padding: 16px;
text-align: center;
box-shadow: 0 4px 12px {color}20;
transition: transform 0.2s ease, box-shadow 0.2s ease;
cursor: pointer;
" onmouseover="this.style.transform='translateY(-2px)'; this.style.boxShadow='0 8px 20px {color}30';"
onmouseout="this.style.transform='translateY(0px)'; this.style.boxShadow='0 4px 12px {color}20';">
<div style="font-size: 24px; margin-bottom: 8px;">{icon}</div>
<div style="font-size: 24px; font-weight: 700; color: {color}; margin-bottom: 4px;">{value}</div>
<div style="font-size: 14px; color: #64748b; font-weight: 600;">{title}</div>
</div>
"""
def render_page():
if st.session_state.get("auth_user") is None:
st.warning("Please login to continue.")
st.stop()
# Store selector (must be chosen before loading analytics)
tenant_id, store_label = render_store_selector()
if not tenant_id:
st.info("Please choose a store to view analytics.")
return
st.markdown("## 📊 Dashboard")
# Date picker for selecting any date
picker_col1, _ = st.columns([1, 3])
with picker_col1:
selected_date = st.date_input("Report date", value=date.today(), max_value=date.today())
st.markdown("---")
# Target date preference is today, but we will fall back to most recent date with data
today = date.today()
# Database connection with caching
try:
# Find most recent available dates with data (last 60 days)
dates_df = _load_available_dates(tenant_id)
if dates_df.empty:
st.warning("No data available in the last 60 days.")
return
# Prefer the user-selected date if present; else pick the most recent date
available_dates = list(dates_df['d'].dt.date)
if selected_date in available_dates:
date_shown = selected_date
else:
date_shown = available_dates[0]
# Comparison dates: the most recent prior dates (up to two)
prior_dates = [d for d in available_dates if d < date_shown]
compare_date = prior_dates[0] if prior_dates else None
compare_date2 = prior_dates[1] if len(prior_dates) > 1 else None
# Load frames using cached functions
df = _load_daily_data(tenant_id, date_shown)
df_compare = _load_daily_data(tenant_id, compare_date) if compare_date else pd.DataFrame()
df_compare2 = _load_daily_data(tenant_id, compare_date2) if compare_date2 else pd.DataFrame()
if date_shown == selected_date:
st.success(f"📅 Showing data for {date_shown.strftime('%B %d, %Y')} ({len(df):,} records)")
else:
st.info(f"📅 Showing most recent data: {date_shown.strftime('%B %d, %Y')} ({len(df):,} records)")
except Exception as e:
st.error(f"Database connection failed: {str(e)}")
return
# Calculate key metrics (Total Amount uses JOURNAL only)
total_amount = _journal_total(df)
total_transactions = len(df)
success_count = len(df[df['triumph_status'] == 'success']) if 'triumph_status' in df.columns else 0
failed_count = len(df[df['triumph_status'] == 'failed']) if 'triumph_status' in df.columns else 0
pending_count = len(df[df['triumph_status'] == 'pending']) if 'triumph_status' in df.columns else 0
# Status summary cards
st.markdown("### 📈 Today's Overview")
col1, col2, col3, col4, col5 = st.columns(5)
with col1:
st.markdown(_stat_card("Total Amount", f"${total_amount:,.2f}", "#059669", "💰"), unsafe_allow_html=True)
with col2:
st.markdown(_stat_card("Transactions", f"{total_transactions:,}", "#2563EB", "📊"), unsafe_allow_html=True)
with col3:
st.markdown(_stat_card("Success", f"{success_count:,}", "#059669", ""), unsafe_allow_html=True)
with col4:
st.markdown(_stat_card("Failed", f"{failed_count:,}", "#DC2626", ""), unsafe_allow_html=True)
with col5:
st.markdown(_stat_card("Pending", f"{pending_count:,}", "#D97706", ""), unsafe_allow_html=True)
st.markdown("---")
# Build simple frames map for totals lookup
frames_by_date = {date_shown: df}
if 'df_compare' in locals() and compare_date:
frames_by_date[compare_date] = df_compare
if 'df_compare2' in locals() and 'compare_date2' in locals() and compare_date2:
frames_by_date[compare_date2] = df_compare2
# Stock-like trend line: last 14 days JOURNAL totals, with last 3 days highlighted
st.markdown("### 📈 Sales Trend")
totals_agg = _load_trend_data(tenant_id)
last14 = totals_agg.tail(14).copy() if not totals_agg.empty else pd.DataFrame(columns=['d','total'])
if not last14.empty:
x_labels = last14['d'].dt.strftime('%b %d')
fig_line = go.Figure()
fig_line.add_trace(
go.Scatter(
x=x_labels,
y=last14['total'],
mode='lines+markers',
name='Sales',
line=dict(color="#2563EB", width=2.6),
marker=dict(size=4, color="#2563EB"),
line_shape='spline',
hovertemplate="%{x}<br>$%{y:,.2f}<extra></extra>",
)
)
# Highlight last 3 points
last3 = last14.tail(3).reset_index(drop=True)
colors = ["#94A3B8", "#DC2626", "#16A34A"] # old->gray, prev->red, latest->green
labels = ["Prev-2", "Prev", "Latest"]
for i in range(len(last3)):
fig_line.add_trace(
go.Scatter(
x=[last3['d'].dt.strftime('%b %d').iloc[i]],
y=[last3['total'].iloc[i]],
mode='markers',
name=labels[i],
marker=dict(color=colors[i], size=9, symbol='circle'),
hovertemplate=f"{labels[i]}: %{{x}}<br>$%{{y:,.2f}}<extra></extra>",
)
)
# Profit/Loss vs previous day
if len(last3) >= 2 and last3['total'].iloc[1] != 0:
shown_total = float(last3['total'].iloc[2]) if len(last3) == 3 else float(last3['total'].iloc[-1])
prev_total = float(last3['total'].iloc[-2])
delta = (shown_total - prev_total) / prev_total * 100.0
arrow = '' if delta >= 0 else ''
color = '#16A34A' if delta >= 0 else '#DC2626'
fig_line.add_annotation(
x=1, y=1.1, xref='paper', yref='paper', showarrow=False,
text=f"{arrow} {delta:.1f}% vs {last3['d'].dt.strftime('%b %d').iloc[-2]}",
font=dict(color=color, size=14), align='right'
)
fig_line.update_layout(
height=320,
showlegend=True,
yaxis_title="Total Amount ($)",
xaxis_title=None,
margin=dict(t=30, b=30, l=30, r=20),
plot_bgcolor='white',
hovermode='x unified'
)
fig_line.update_yaxes(showgrid=True, gridcolor='#E5E7EB', zeroline=False)
fig_line.update_xaxes(showgrid=False, zeroline=False)
st.plotly_chart(fig_line, use_container_width=True)
st.markdown("---")
# Data table section
st.markdown("### 📋 Detailed Data")
# Minimal columns for default view
minimal_names = [
"id",
"created_at",
"outlet_name",
"processing_type",
"total_amount",
"triumph_status",
"triumph_event",
]
minimal_cols = _pick_existing_columns(df, minimal_names)
# Controls row: search only
q = st.text_input("Search", placeholder="Type to filter rows across all columns")
# Filter data based on search
if q:
mask = df.astype(str).apply(lambda x: x.str.contains(q, case=False, na=False)).any(axis=1)
df_filtered = df[mask]
else:
df_filtered = df.copy()
# Always use minimal columns
display_cols = minimal_cols if minimal_cols else list(df_filtered.columns[:8])
# Build display names
display_map = _build_display_map(df_filtered)
# Format the display dataframe
df_display = df_filtered[display_cols].copy()
df_display.columns = [display_map.get(col, col) for col in display_cols]
# Format date columns
df_display = _format_date_columns(df_display)
# Format numeric columns
for col in df_display.columns:
if 'amount' in col.lower() and df_display[col].dtype in ['float64', 'int64']:
df_display[col] = df_display[col].apply(lambda x: f"${x:,.2f}" if pd.notna(x) else "")
# Always apply status styling
if 'Status' in df_display.columns:
def style_status(val):
if val == 'success':
return 'background-color: #D1FAE5; color: #065F46; font-weight: 600;'
elif val == 'failed':
return 'background-color: #FEE2E2; color: #991B1B; font-weight: 600;'
elif val == 'pending':
return 'background-color: #FEF3C7; color: #92400E; font-weight: 600;'
return ''
styled_df = df_display.style.map(style_status, subset=['Status'])
st.dataframe(styled_df, use_container_width=True, height=400)
else:
st.dataframe(df_display, use_container_width=True, height=400)
# Download button
if st.button("📥 Download Today's Data as CSV", type="primary"):
csv = df_filtered.to_csv(index=False)
st.download_button(
label="Download CSV",
data=csv,
file_name=f"workolik_data_{date_shown.strftime('%Y%m%d')}.csv",
mime="text/csv"
)

285
pages/see_payload.py Normal file
View File

@@ -0,0 +1,285 @@
import streamlit as st
import pandas as pd
from sqlalchemy import text
from app_core.db.database import engine
from app_core.ui.layout import render_store_selector
@st.cache_data(ttl=300) # Cache for 5 minutes
def _load_tenant_data(tenant_id: int, limit: int = 10000):
"""Load data for a specific tenant with caching."""
with engine.connect() as conn:
df = pd.read_sql(
text('SELECT * FROM "tenantpostings" WHERE "tenant_id" = :t ORDER BY "id" DESC LIMIT :limit'),
conn,
params={"t": tenant_id, "limit": limit},
)
return df
def _detect_status_column(df: pd.DataFrame) -> str | None:
candidates = ["status", "state", "result", "triumph_status"]
lower_map = {c.lower(): c for c in df.columns}
for key in candidates:
if key in lower_map:
return lower_map[key]
for c in df.columns:
if "status" in c.lower():
return c
return None
def _normalize_name(name: str) -> str:
return "".join(ch for ch in name.lower() if ch.isalnum())
def _build_display_map(df: pd.DataFrame) -> dict[str, str]:
overrides = {
"triumph_status": "Status",
"triumph_event": "Event",
"outlet_name": "Outlet Name",
"tenant_id": "Tenant ID",
"processing_type": "Processing Type",
"total_amount": "Total Amount",
"created_at": "Date",
"updated_at": "Updated At",
"id": "SNo",
}
display_map: dict[str, str] = {}
used: set[str] = set()
for col in df.columns:
key = col.lower()
if key in overrides:
label = overrides[key]
else:
label = col.replace("_", " ").title()
base = label
suffix = 2
while label in used:
label = f"{base} {suffix}"
suffix += 1
used.add(label)
display_map[col] = label
return display_map
def _format_status_with_emoji(styler: "pd.io.formats.style.Styler", df: pd.DataFrame, status_col: str | None) -> "pd.io.formats.style.Styler":
if status_col is None or status_col not in df.columns:
return styler
def fmt(val):
v = str(val)
v_lower = v.lower()
if any(k in v_lower for k in ["success", "ok", "completed", "done", "active"]):
return f"{v}"
if any(k in v_lower for k in ["fail", "error", "dead", "invalid"]):
return f"{v}"
if any(k in v_lower for k in ["pending", "queue", "waiting", "processing"]):
return f"{v}"
return v
return styler.format({status_col: fmt})
def _badge_status_cells(styler: "pd.io.formats.style.Styler", df: pd.DataFrame, status_col: str | None) -> "pd.io.formats.style.Styler":
if status_col is None or status_col not in df.columns:
return styler
def badge(val):
v = str(val).lower()
bg = "#E2E8F0"; color = "#0F172A"
if any(k in v for k in ["success", "ok", "completed", "done", "active"]):
bg = "#E6F7EE"; color = "#166534"
elif any(k in v for k in ["fail", "error", "dead", "invalid"]):
bg = "#FDECEC"; color = "#991B1B"
elif any(k in v for k in ["pending", "queue", "waiting", "processing"]):
bg = "#FEF5E6"; color = "#92400E"
return f"background-color: {bg}; color:{color}; border-radius: 999px; padding: 4px 8px;"
return styler.map(badge, subset=pd.IndexSlice[:, [status_col]])
def _zebra_style(df: pd.DataFrame) -> "pd.io.formats.style.Styler":
df2 = df.reset_index(drop=True)
def zebra(row: pd.Series):
return ["background-color: rgba(2,6,23,0.03);" if (row.name % 2 == 0) else ""] * len(row)
styler = df2.style.apply(zebra, axis=1)
styler = styler.set_table_styles([
{"selector": "th", "props": "position: sticky; top: 0; background: #F0F6FF; color:#0F172A; font-weight:700;"},
{"selector": "tbody td", "props": "border-top: 1px solid rgba(15,23,42,0.06);"},
{"selector": "table", "props": "border-collapse: separate; border-spacing: 0;"},
])
styler = styler.hide(axis="index")
return styler
def _format_two_decimals_for_amounts(styler: "pd.io.formats.style.Styler", df: pd.DataFrame) -> "pd.io.formats.style.Styler":
candidates_norm = {"totalamount", "total_amount", "amount", "totalamounts", "totalamounttotals"}
targets = []
for c in df.columns:
if _normalize_name(c) in candidates_norm and pd.api.types.is_numeric_dtype(df[c]):
targets.append(c)
if targets:
styler = styler.format(formatter="{:.2f}", subset=pd.IndexSlice[:, targets])
return styler
def _format_date_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Format date columns to show only date part"""
df_formatted = df.copy()
for col in df_formatted.columns:
if 'created_at' in col.lower() or 'date' in col.lower():
if pd.api.types.is_datetime64_any_dtype(df_formatted[col]):
df_formatted[col] = df_formatted[col].dt.date
else:
# Try to convert to datetime first
try:
df_formatted[col] = pd.to_datetime(df_formatted[col]).dt.date
except:
pass
return df_formatted
def _pick_existing_columns(df: pd.DataFrame, names: list[str]) -> list[str]:
lower_map = {c.lower(): c for c in df.columns}
picked = []
for n in names:
if n.lower() in lower_map:
picked.append(lower_map[n.lower()])
return picked
def _stat_card(title: str, value: int | str, color: str, emoji: str) -> str:
return f"""
<div class=\"stat-card\" style=\"display:flex;align-items:center;gap:12px;padding:14px 16px;border-radius:14px;background:#fff;border:1px solid rgba(15,23,42,0.06);box-shadow:0 10px 24px rgba(2,6,23,0.08);transition:transform .15s ease, box-shadow .15s ease;\">
<div style=\"font-size:20px;\">{emoji}</div>
<div>
<div style=\"font-size:12px;color:#64748b;\">{title}</div>
<div style=\"font-size:20px;font-weight:800;color:{color};\">{value}</div>
</div>
</div>
"""
def render_page():
if st.session_state.get("auth_user") is None:
st.warning("Please login to continue.")
st.stop()
# Store selector (required before loading data view)
tenant_id, _ = render_store_selector()
if not tenant_id:
st.info("Please choose a store to view data.")
return
st.markdown(
"""
<style>
.stat-card:hover{transform:translateY(-2px);box-shadow:0 16px 36px rgba(2,6,23,0.12)}
.stat-row{margin-bottom:14px;}
.block-after-stats{margin-top:10px;}
</style>
""",
unsafe_allow_html=True,
)
st.title("DataHub")
st.caption("Inspect data from Warehouse.")
st.info("Connected to database ✅.")
df = _load_tenant_data(tenant_id)
status_col_global = _detect_status_column(df)
if status_col_global:
s = df[status_col_global].astype(str).str.lower()
ok = s.str_contains("success|ok|completed|done|active").sum() if hasattr(s, 'str_contains') else s.str.contains("success|ok|completed|done|active").sum()
bad = s.str_contains("fail|error|dead|invalid").sum() if hasattr(s, 'str_contains') else s.str.contains("fail|error|dead|invalid").sum()
pend = s.str_contains("pending|queue|waiting|processing").sum() if hasattr(s, 'str_contains') else s.str.contains("pending|queue|waiting|processing").sum()
total = len(df)
st.markdown('<div class="stat-row">', unsafe_allow_html=True)
c1, c2, c3, c4 = st.columns([1,1,1,2])
with c1: st.markdown(_stat_card("Success", ok, "#166534", ""), unsafe_allow_html=True)
with c2: st.markdown(_stat_card("Failed", bad, "#991B1B", ""), unsafe_allow_html=True)
with c3: st.markdown(_stat_card("Pending", pend, "#92400E", ""), unsafe_allow_html=True)
with c4: st.caption(f"Total rows: {total}")
st.markdown('</div>', unsafe_allow_html=True)
minimal_names = [
"id",
"created_at",
"outlet_name",
"processing_type",
"total_amount",
"triumph_status",
"triumph_event",
]
minimal_cols = _pick_existing_columns(df, minimal_names)
# Controls row: search only
q = st.text_input("Search", placeholder="Type to filter rows across all columns")
# Apply global search
filtered = df
if q:
q_lower = q.lower()
filtered = filtered[filtered.apply(lambda r: r.astype(str).str.lower().str.contains(q_lower).any(), axis=1)]
# Always use minimal columns
visible_cols = minimal_cols
if visible_cols:
filtered = filtered[visible_cols]
# Pagination (moved below the table; small controls)
total_rows = len(filtered)
default_page_size = 25
total_pages = max(1, (total_rows + default_page_size - 1) // default_page_size)
page_num_state_key = "payload_page_num"
if page_num_state_key not in st.session_state:
st.session_state[page_num_state_key] = 1
start = (st.session_state[page_num_state_key] - 1) * default_page_size
end = start + default_page_size
page_df = filtered.iloc[start:end]
# Build display names and style
display_map = _build_display_map(page_df)
display_df = page_df.rename(columns=display_map)
# Format date columns
display_df = _format_date_columns(display_df)
status_col_original = _detect_status_column(page_df)
status_col_display = display_map.get(status_col_original)
styled = _zebra_style(display_df)
styled = _format_two_decimals_for_amounts(styled, display_df)
# Always apply status badges
if status_col_display:
styled = _format_status_with_emoji(styled, display_df, status_col_display)
styled = _badge_status_cells(styled, display_df, status_col_display)
styled = _format_two_decimals_for_amounts(styled, display_df)
styled = styled.set_table_styles([
{"selector": "th", "props": "position: sticky; top: 0; background: #F0F6FF; color:#0F172A; font-weight:700;"},
{"selector": "tbody td", "props": "border-top: 1px solid rgba(15,23,42,0.06);"},
{"selector": "table", "props": "border-collapse: separate; border-spacing: 0;"},
]).hide(axis="index")
st.dataframe(styled, use_container_width=True, height=520)
# Bottom pagination controls
p1, p2, p3 = st.columns([1, 2, 1])
with p1:
st.caption(f"Showing {len(page_df)} of {total_rows} rows")
with p2:
st.caption("Page")
st.session_state[page_num_state_key] = st.number_input(
" ", min_value=1, max_value=total_pages, value=st.session_state[page_num_state_key], step=1, label_visibility="collapsed")
with p3:
download_df = filtered.rename(columns=_build_display_map(filtered))
st.download_button(
"Download filtered CSV",
data=download_df.to_csv(index=False).encode("utf-8"),
file_name="tenantpostings_filtered.csv",
use_container_width=True,
)