Push the rest
This commit is contained in:
@@ -1,59 +1,68 @@
|
||||
APP_NAME=otel-bi-backend
|
||||
# ---------------------------------------------------------------------------
|
||||
# OTel BI Backend — local development (without Docker)
|
||||
# Copy to .env and fill in your values.
|
||||
# Run services from the backend/ directory so pydantic-settings finds .env.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
APP_ENV=dev
|
||||
LOG_LEVEL=INFO
|
||||
API_HOST=0.0.0.0
|
||||
API_PORT=8000
|
||||
|
||||
# ============================================================
|
||||
# Go analytics service (same image, ROLE=analytics)
|
||||
# Set this to wherever the analytics container is reachable.
|
||||
# ============================================================
|
||||
ANALYTICS_SERVICE_URL=http://localhost:8080
|
||||
|
||||
# MSSQL — required when ROLE=analytics
|
||||
# go-mssqldb DSN: sqlserver://user:pass@host:port?database=name&...
|
||||
AW_MSSQL_DSN=sqlserver://sa:YourStrongPassword123!@localhost:1433?database=AdventureWorksDW2022&TrustServerCertificate=true&ApplicationIntent=ReadOnly
|
||||
WWI_MSSQL_DSN=sqlserver://sa:YourStrongPassword123!@localhost:1433?database=WideWorldImportersDW&TrustServerCertificate=true&ApplicationIntent=ReadOnly
|
||||
|
||||
# ============================================================
|
||||
# PostgreSQL — write store for derived data
|
||||
# ============================================================
|
||||
POSTGRES_HOST=localhost
|
||||
POSTGRES_PORT=5432
|
||||
POSTGRES_DATABASE=otel_bi
|
||||
POSTGRES_USERNAME=otel_bi
|
||||
POSTGRES_PASSWORD=otel_bi_dev
|
||||
# prefer for dev, require for production
|
||||
POSTGRES_SSLMODE=prefer
|
||||
|
||||
# Optional: override the generated connection URL directly
|
||||
# POSTGRES_CONNECTION_STRING=postgresql+psycopg://otel_bi:otel_bi_dev@localhost:5432/otel_bi?sslmode=prefer
|
||||
|
||||
# ============================================================
|
||||
# Frontend JWT validation
|
||||
# Validates the Bearer token the browser sends on every request.
|
||||
# ============================================================
|
||||
# Set false to disable auth entirely (dev only)
|
||||
REQUIRE_FRONTEND_AUTH=false
|
||||
|
||||
# When REQUIRE_FRONTEND_AUTH=true, fill in your OIDC provider:
|
||||
# FRONTEND_JWT_ISSUER_URL=https://your-idp.example.com/realms/your-realm
|
||||
# FRONTEND_JWT_AUDIENCE=your-api-audience
|
||||
# FRONTEND_JWT_JWKS_URL=https://your-idp.example.com/realms/your-realm/protocol/openid-connect/certs
|
||||
# FRONTEND_REQUIRED_SCOPES=openid profile
|
||||
|
||||
# ============================================================
|
||||
# Frontend OIDC runtime config (served to the SPA via GET /api/config
|
||||
# — NOT baked into the JS bundle)
|
||||
# ============================================================
|
||||
# FRONTEND_OIDC_CLIENT_ID=otel-bi-frontend
|
||||
# FRONTEND_OIDC_SCOPE=openid profile email
|
||||
|
||||
CORS_ORIGINS=http://localhost:5173
|
||||
|
||||
MSSQL_HOST=localhost
|
||||
MSSQL_PORT=1433
|
||||
MSSQL_USERNAME=readonly_user
|
||||
MSSQL_PASSWORD=readonly_password
|
||||
MSSQL_DRIVER=ODBC Driver 18 for SQL Server
|
||||
MSSQL_TRUST_SERVER_CERTIFICATE=false
|
||||
|
||||
WWI_DATABASE=WorldWideImporters
|
||||
AW_DATABASE=AdventureWorks2022DWH
|
||||
# Optional direct URLs (override generated URLs):
|
||||
# WWI_CONNECTION_STRING=mssql+pyodbc://user:pass@host:1433/WorldWideImporters?driver=ODBC+Driver+18+for+SQL+Server&ApplicationIntent=ReadOnly
|
||||
# AW_CONNECTION_STRING=mssql+pyodbc://user:pass@host:1433/AdventureWorks2022DWH?driver=ODBC+Driver+18+for+SQL+Server&ApplicationIntent=ReadOnly
|
||||
|
||||
POSTGRES_HOST=localhost
|
||||
POSTGRES_PORT=5432
|
||||
POSTGRES_DATABASE=otel_bi_app
|
||||
POSTGRES_USERNAME=otel_bi_app
|
||||
POSTGRES_PASSWORD=otel_bi_app
|
||||
POSTGRES_SSLMODE=require
|
||||
# Optional direct URL:
|
||||
# POSTGRES_CONNECTION_STRING=postgresql+psycopg://otel_bi_app:otel_bi_app@localhost:5432/otel_bi_app?sslmode=prefer
|
||||
POSTGRES_REQUIRED=true
|
||||
|
||||
QUERY_SERVICE_URL=http://localhost:8101
|
||||
ANALYTICS_SERVICE_URL=http://localhost:8102
|
||||
PERSISTENCE_SERVICE_URL=http://localhost:8103
|
||||
REQUEST_TIMEOUT_SECONDS=20
|
||||
REQUIRE_FRONTEND_AUTH=true
|
||||
FRONTEND_JWT_ISSUER_URL=https://<your-idp-domain>/realms/<your-realm>
|
||||
FRONTEND_JWT_AUDIENCE=otel-bi-api
|
||||
FRONTEND_JWT_JWKS_URL=https://<your-idp-domain>/realms/<your-realm>/protocol/openid-connect/certs
|
||||
FRONTEND_JWT_ALGORITHM=RS256
|
||||
FRONTEND_REQUIRED_SCOPES=openid profile email
|
||||
FRONTEND_CLOCK_SKEW_SECONDS=30
|
||||
INTERNAL_SERVICE_AUTH_ENABLED=true
|
||||
INTERNAL_SERVICE_SHARED_SECRET=replace-with-strong-random-secret-min-32-bytes
|
||||
INTERNAL_SERVICE_TOKEN_TTL_SECONDS=120
|
||||
INTERNAL_SERVICE_TOKEN_AUDIENCE=bi-internal
|
||||
INTERNAL_SERVICE_ALLOWED_ISSUERS=api-gateway
|
||||
INTERNAL_TOKEN_CLOCK_SKEW_SECONDS=15
|
||||
# ============================================================
|
||||
# Reports — filesystem path for generated XLSX + PDF files
|
||||
# Mount a K8s CSI / SMB PVC here in production.
|
||||
# ============================================================
|
||||
REPORT_OUTPUT_DIR=/tmp/otel-bi-reports
|
||||
|
||||
# ============================================================
|
||||
# OpenTelemetry
|
||||
# ============================================================
|
||||
OTEL_SERVICE_NAME=otel-bi-backend
|
||||
OTEL_SERVICE_NAMESPACE=final-thesis
|
||||
OTEL_COLLECTOR_ENDPOINT=http://localhost:4318
|
||||
# K8s + Alloy example:
|
||||
# OTEL_COLLECTOR_ENDPOINT=http://alloy.monitoring.svc.cluster.local:4318
|
||||
OTEL_EXPORT_TIMEOUT_MS=10000
|
||||
|
||||
FORECAST_HORIZON_DAYS=30
|
||||
DEFAULT_HISTORY_DAYS=365
|
||||
RANKING_DEFAULT_TOP_N=10
|
||||
|
||||
73
backend/Dockerfile
Normal file
73
backend/Dockerfile
Normal file
@@ -0,0 +1,73 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage 1 — Build Go analytics service
|
||||
# ---------------------------------------------------------------------------
|
||||
FROM rockylinux/rockylinux:10 AS go-build
|
||||
|
||||
RUN dnf install -y golang && dnf clean all
|
||||
|
||||
WORKDIR /src
|
||||
COPY analytics/go.mod analytics/go.sum ./
|
||||
RUN go mod download
|
||||
COPY analytics/ .
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /analytics-server ./cmd/server
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage 2 — Python base
|
||||
# ---------------------------------------------------------------------------
|
||||
FROM rockylinux/rockylinux:10 AS base
|
||||
|
||||
RUN dnf install -y python3 && dnf clean all
|
||||
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/app/.venv
|
||||
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage 3 — Python dependencies
|
||||
# ---------------------------------------------------------------------------
|
||||
FROM base AS deps
|
||||
COPY pyproject.toml uv.lock* ./
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv sync --frozen --no-install-project --no-dev
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage 4 — Python application
|
||||
# ---------------------------------------------------------------------------
|
||||
FROM deps AS app-build
|
||||
COPY app/ ./app/
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv sync --frozen --no-dev
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage 5 — Final image (Python + Go binary)
|
||||
# ---------------------------------------------------------------------------
|
||||
FROM base AS final
|
||||
|
||||
RUN groupadd --gid 10001 appgroup && \
|
||||
useradd --uid 10001 --gid 10001 --no-create-home --shell /sbin/nologin appuser
|
||||
|
||||
COPY --from=go-build /analytics-server /usr/local/bin/analytics-server
|
||||
COPY --from=app-build --chown=appuser:appgroup /app /app
|
||||
|
||||
USER appuser
|
||||
WORKDIR /app
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH" \
|
||||
ROLE=api
|
||||
|
||||
EXPOSE 8000 8080
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c", "\
|
||||
if [ \"$ROLE\" = 'analytics' ]; then \
|
||||
/usr/local/bin/analytics-server; \
|
||||
else \
|
||||
uvicorn app.main:app --host 0.0.0.0 --port 8000 --no-access-log; \
|
||||
fi"]
|
||||
154
backend/analytics/cmd/server/main.go
Normal file
154
backend/analytics/cmd/server/main.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"otel-bi-analytics/internal/config"
|
||||
"otel-bi-analytics/internal/db"
|
||||
"otel-bi-analytics/internal/handler"
|
||||
"otel-bi-analytics/internal/scheduler"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
)
|
||||
|
||||
func main() {
|
||||
slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
cfg := config.Load()
|
||||
ctx := context.Background()
|
||||
|
||||
shutdown, err := setupOtel(ctx, cfg)
|
||||
if err != nil {
|
||||
slog.Error("failed to set up OTel", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
awDB, err := db.Open(ctx, cfg.AWConnStr, "aw")
|
||||
if err != nil {
|
||||
slog.Error("failed to connect to AW MSSQL", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer awDB.Close()
|
||||
|
||||
wwiDB, err := db.Open(ctx, cfg.WWIConnStr, "wwi")
|
||||
if err != nil {
|
||||
slog.Error("failed to connect to WWI MSSQL", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer wwiDB.Close()
|
||||
|
||||
pgPool, err := db.OpenPostgres(ctx, cfg.PostgresDSN)
|
||||
if err != nil {
|
||||
slog.Error("failed to connect to PostgreSQL", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pgPool.Close()
|
||||
|
||||
sched := scheduler.New(awDB, wwiDB, pgPool, cfg.DefaultTopN)
|
||||
sched.Start()
|
||||
defer sched.Stop()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
h := handler.New(awDB, wwiDB, pgPool, sched, cfg.DefaultTopN, cfg.ForecastHorizonDays, cfg.DefaultHistoryDays)
|
||||
h.RegisterRoutes(mux)
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", cfg.Port),
|
||||
Handler: otelhttp.NewHandler(mux, "analytics-service"),
|
||||
ReadTimeout: 60 * time.Second,
|
||||
WriteTimeout: 120 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
|
||||
<-quit
|
||||
slog.Info("shutting down")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
_ = srv.Shutdown(ctx)
|
||||
shutdown(ctx)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
slog.Info("analytics service started", "port", cfg.Port)
|
||||
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
slog.Error("server error", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
<-done
|
||||
slog.Info("shutdown complete")
|
||||
}
|
||||
|
||||
func setupOtel(ctx context.Context, cfg config.Config) (func(context.Context), error) {
|
||||
res, err := resource.New(ctx,
|
||||
resource.WithAttributes(
|
||||
semconv.ServiceName(cfg.OtelServiceName),
|
||||
semconv.ServiceNamespace(cfg.OtelServiceNamespace),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create OTel resource: %w", err)
|
||||
}
|
||||
|
||||
traceExporter, err := otlptracehttp.New(ctx,
|
||||
otlptracehttp.WithEndpointURL(cfg.OtelCollectorEndpoint+"/v1/traces"),
|
||||
otlptracehttp.WithInsecure(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create OTLP trace exporter: %w", err)
|
||||
}
|
||||
|
||||
tp := sdktrace.NewTracerProvider(
|
||||
sdktrace.WithBatcher(traceExporter),
|
||||
sdktrace.WithResource(res),
|
||||
)
|
||||
otel.SetTracerProvider(tp)
|
||||
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(
|
||||
propagation.TraceContext{},
|
||||
propagation.Baggage{},
|
||||
))
|
||||
|
||||
metricExporter, err := otlpmetrichttp.New(ctx,
|
||||
otlpmetrichttp.WithEndpointURL(cfg.OtelCollectorEndpoint+"/v1/metrics"),
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create OTLP metric exporter: %w", err)
|
||||
}
|
||||
|
||||
mp := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter, sdkmetric.WithInterval(15*time.Second))),
|
||||
sdkmetric.WithResource(res),
|
||||
)
|
||||
otel.SetMeterProvider(mp)
|
||||
|
||||
return func(ctx context.Context) {
|
||||
if err := tp.Shutdown(ctx); err != nil {
|
||||
slog.Error("trace provider shutdown error", "err", err)
|
||||
}
|
||||
if err := mp.Shutdown(ctx); err != nil {
|
||||
slog.Error("metric provider shutdown error", "err", err)
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
51
backend/analytics/go.mod
Normal file
51
backend/analytics/go.mod
Normal file
@@ -0,0 +1,51 @@
|
||||
module otel-bi-analytics
|
||||
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/microsoft/go-mssqldb v1.7.2
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/xuri/excelize/v2 v2.8.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
|
||||
go.opentelemetry.io/otel v1.43.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
|
||||
go.opentelemetry.io/otel/metric v1.43.0
|
||||
go.opentelemetry.io/otel/sdk v1.43.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.43.0
|
||||
go.opentelemetry.io/otel/trace v1.43.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
|
||||
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/richardlehane/mscfb v1.0.4 // indirect
|
||||
github.com/richardlehane/msoleps v1.0.3 // indirect
|
||||
github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 // indirect
|
||||
github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
|
||||
golang.org/x/crypto v0.49.0 // indirect
|
||||
golang.org/x/net v0.52.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
golang.org/x/text v0.35.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/grpc v1.80.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
)
|
||||
128
backend/analytics/go.sum
Normal file
128
backend/analytics/go.sum
Normal file
@@ -0,0 +1,128 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
|
||||
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI=
|
||||
github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
|
||||
github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/richardlehane/mscfb v1.0.4 h1:WULscsljNPConisD5hR0+OyZjwK46Pfyr6mPu5ZawpM=
|
||||
github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
|
||||
github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
|
||||
github.com/richardlehane/msoleps v1.0.3 h1:aznSZzrwYRl3rLKRT3gUk9am7T/mLNSnJINvN0AQoVM=
|
||||
github.com/richardlehane/msoleps v1.0.3/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 h1:Chd9DkqERQQuHpXjR/HSV1jLZA6uaoiwwH3vSuF3IW0=
|
||||
github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI=
|
||||
github.com/xuri/excelize/v2 v2.8.1 h1:pZLMEwK8ep+CLIUWpWmvW8IWE/yxqG0I1xcN6cVMGuQ=
|
||||
github.com/xuri/excelize/v2 v2.8.1/go.mod h1:oli1E4C3Pa5RXg1TBXn4ENCXDV5JUMlBluUhG7c+CEE=
|
||||
github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 h1:qhbILQo1K3mphbwKh1vNm4oGezE1eF9fQWmNiIpSfI4=
|
||||
github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
|
||||
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
|
||||
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
|
||||
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
|
||||
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
|
||||
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
|
||||
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||
golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
|
||||
golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
|
||||
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
|
||||
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
724
backend/analytics/internal/analytics/aw.go
Normal file
724
backend/analytics/internal/analytics/aw.go
Normal file
@@ -0,0 +1,724 @@
|
||||
package analytics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
mssqldb "otel-bi-analytics/internal/db"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var awTracer = otel.Tracer("otel-bi.analytics.aw")
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SQL queries (with fallback variants)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var awDailySalesQueries = []string{
|
||||
`SELECT
|
||||
CAST(d.FullDateAlternateKey AS date) AS sale_date,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimDate AS d ON d.DateKey = f.OrderDateKey
|
||||
GROUP BY CAST(d.FullDateAlternateKey AS date)
|
||||
UNION ALL
|
||||
SELECT
|
||||
CAST(d.FullDateAlternateKey AS date) AS sale_date,
|
||||
SUM(r.SalesAmount) AS revenue,
|
||||
SUM(r.TotalProductCost) AS cost,
|
||||
SUM(r.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactResellerSales AS r
|
||||
INNER JOIN dbo.DimDate AS d ON d.DateKey = r.OrderDateKey
|
||||
GROUP BY CAST(d.FullDateAlternateKey AS date)
|
||||
ORDER BY sale_date`,
|
||||
`SELECT
|
||||
CAST(OrderDate AS date) AS sale_date,
|
||||
SUM(SalesAmount) AS revenue,
|
||||
SUM(TotalProductCost) AS cost,
|
||||
SUM(OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales
|
||||
GROUP BY CAST(OrderDate AS date)
|
||||
ORDER BY sale_date`,
|
||||
}
|
||||
|
||||
var awRepPerfQueries = []string{
|
||||
`SELECT
|
||||
e.EmployeeKey AS employee_key,
|
||||
e.FirstName + ' ' + e.LastName AS rep_name,
|
||||
COALESCE(e.Title, 'Sales Rep') AS rep_title,
|
||||
COALESCE(st.SalesTerritoryRegion, 'Unknown') AS territory,
|
||||
SUM(r.SalesAmount) AS revenue,
|
||||
SUM(r.TotalProductCost) AS cost,
|
||||
COUNT_BIG(*) AS orders,
|
||||
AVG(r.SalesAmount) AS avg_deal_size
|
||||
FROM dbo.FactResellerSales AS r
|
||||
INNER JOIN dbo.DimEmployee AS e ON e.EmployeeKey = r.EmployeeKey
|
||||
INNER JOIN dbo.DimSalesTerritory AS st ON st.SalesTerritoryKey = r.SalesTerritoryKey
|
||||
WHERE e.SalesPersonFlag = 1
|
||||
GROUP BY e.EmployeeKey, e.FirstName, e.LastName, e.Title, st.SalesTerritoryRegion
|
||||
ORDER BY revenue DESC`,
|
||||
`SELECT
|
||||
e.EmployeeKey AS employee_key,
|
||||
e.FirstName + ' ' + e.LastName AS rep_name,
|
||||
COALESCE(e.Title, 'Employee') AS rep_title,
|
||||
'Unknown' AS territory,
|
||||
SUM(r.SalesAmount) AS revenue,
|
||||
SUM(r.TotalProductCost) AS cost,
|
||||
COUNT_BIG(*) AS orders,
|
||||
AVG(r.SalesAmount) AS avg_deal_size
|
||||
FROM dbo.FactResellerSales AS r
|
||||
INNER JOIN dbo.DimEmployee AS e ON e.EmployeeKey = r.EmployeeKey
|
||||
GROUP BY e.EmployeeKey, e.FirstName, e.LastName, e.Title
|
||||
ORDER BY revenue DESC`,
|
||||
}
|
||||
|
||||
var awProductDemandQueries = []string{
|
||||
`SELECT
|
||||
p.ProductAlternateKey AS product_id,
|
||||
p.EnglishProductName AS product_name,
|
||||
COALESCE(pc.EnglishProductCategoryName, 'Unknown') AS category,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimProduct AS p ON p.ProductKey = f.ProductKey
|
||||
LEFT JOIN dbo.DimProductSubcategory AS sc ON sc.ProductSubcategoryKey = p.ProductSubcategoryKey
|
||||
LEFT JOIN dbo.DimProductCategory AS pc ON pc.ProductCategoryKey = sc.ProductCategoryKey
|
||||
GROUP BY p.ProductAlternateKey, p.EnglishProductName, pc.EnglishProductCategoryName
|
||||
ORDER BY revenue DESC`,
|
||||
`SELECT
|
||||
CAST(f.ProductKey AS nvarchar(50)) AS product_id,
|
||||
COALESCE(p.EnglishProductName, CAST(f.ProductKey AS nvarchar(50))) AS product_name,
|
||||
'Unknown' AS category,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
LEFT JOIN dbo.DimProduct AS p ON p.ProductKey = f.ProductKey
|
||||
GROUP BY f.ProductKey, p.EnglishProductName
|
||||
ORDER BY revenue DESC`,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type dailySalesRow struct {
|
||||
Date time.Time
|
||||
Revenue float64
|
||||
Cost float64
|
||||
Quantity float64
|
||||
Orders float64
|
||||
}
|
||||
|
||||
type SalesKPIs struct {
|
||||
TotalRevenue float64 `json:"total_revenue"`
|
||||
GrossMarginPct float64 `json:"gross_margin_pct"`
|
||||
TotalQuantity float64 `json:"total_quantity"`
|
||||
AvgOrderValue float64 `json:"avg_order_value"`
|
||||
RecordsInWindow int `json:"records_in_window"`
|
||||
}
|
||||
|
||||
type DailySalesPoint struct {
|
||||
Date string `json:"date"`
|
||||
Revenue float64 `json:"revenue"`
|
||||
Cost float64 `json:"cost"`
|
||||
Quantity float64 `json:"quantity"`
|
||||
}
|
||||
|
||||
type ForecastPoint struct {
|
||||
Date string `json:"date"`
|
||||
PredictedRevenue float64 `json:"predicted_revenue"`
|
||||
LowerBound float64 `json:"lower_bound"`
|
||||
UpperBound float64 `json:"upper_bound"`
|
||||
}
|
||||
|
||||
type RepScore struct {
|
||||
Rank int `json:"rank"`
|
||||
EmployeeKey int `json:"employee_key"`
|
||||
RepName string `json:"rep_name"`
|
||||
RepTitle string `json:"rep_title"`
|
||||
Territory string `json:"territory"`
|
||||
Revenue float64 `json:"revenue"`
|
||||
Orders int `json:"orders"`
|
||||
AvgDealSize float64 `json:"avg_deal_size"`
|
||||
MarginPct float64 `json:"margin_pct"`
|
||||
Score float64 `json:"score"`
|
||||
}
|
||||
|
||||
type ProductDemand struct {
|
||||
Rank int `json:"rank"`
|
||||
ProductID string `json:"product_id"`
|
||||
ProductName string `json:"product_name"`
|
||||
Category string `json:"category"`
|
||||
Revenue float64 `json:"revenue"`
|
||||
Quantity float64 `json:"quantity"`
|
||||
Orders int `json:"orders"`
|
||||
MarginPct float64 `json:"margin_pct"`
|
||||
DemandScore float64 `json:"demand_score"`
|
||||
}
|
||||
|
||||
type AnomalyPoint struct {
|
||||
Date string `json:"date"`
|
||||
Revenue float64 `json:"revenue"`
|
||||
RollingMean *float64 `json:"rolling_mean"`
|
||||
LowerBand *float64 `json:"lower_band"`
|
||||
UpperBand *float64 `json:"upper_band"`
|
||||
IsAnomaly bool `json:"is_anomaly"`
|
||||
ZScore *float64 `json:"z_score"`
|
||||
Direction *string `json:"direction"`
|
||||
}
|
||||
|
||||
type DataQualityResult struct {
|
||||
Status string `json:"status"`
|
||||
Checks map[string]string `json:"checks"`
|
||||
FailedChecks []string `json:"failed_checks"`
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data fetching
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func fetchAWDailySales(ctx context.Context, db *sql.DB) ([]dailySalesRow, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.query.daily_sales")
|
||||
defer span.End()
|
||||
|
||||
rows, err := mssqldb.QueryFirst(ctx, db, awDailySalesQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
byDate := make(map[string]*dailySalesRow)
|
||||
var keys []string
|
||||
|
||||
for rows.Next() {
|
||||
var r dailySalesRow
|
||||
var revenue, cost, quantity, orders sql.NullFloat64
|
||||
if err := rows.Scan(&r.Date, &revenue, &cost, &quantity, &orders); err != nil {
|
||||
return nil, fmt.Errorf("scan daily_sales: %w", err)
|
||||
}
|
||||
r.Revenue = revenue.Float64
|
||||
r.Cost = cost.Float64
|
||||
r.Quantity = quantity.Float64
|
||||
r.Orders = orders.Float64
|
||||
|
||||
key := r.Date.Format("2006-01-02")
|
||||
if existing, ok := byDate[key]; ok {
|
||||
existing.Revenue += r.Revenue
|
||||
existing.Cost += r.Cost
|
||||
existing.Quantity += r.Quantity
|
||||
existing.Orders += r.Orders
|
||||
} else {
|
||||
cp := r
|
||||
byDate[key] = &cp
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
result := make([]dailySalesRow, len(keys))
|
||||
for i, k := range keys {
|
||||
result[i] = *byDate[k]
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// KPIs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func AWGetSalesKPIs(ctx context.Context, db *sql.DB) (*SalesKPIs, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.kpis")
|
||||
defer span.End()
|
||||
|
||||
series, err := fetchAWDailySales(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -180)
|
||||
var totalRevenue, totalCost, totalQuantity, totalOrders float64
|
||||
var count int
|
||||
for _, r := range series {
|
||||
if r.Date.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
totalRevenue += r.Revenue
|
||||
totalCost += r.Cost
|
||||
totalQuantity += r.Quantity
|
||||
totalOrders += r.Orders
|
||||
count++
|
||||
}
|
||||
if totalOrders < 1 {
|
||||
totalOrders = 1
|
||||
}
|
||||
var marginPct float64
|
||||
if totalRevenue > 0 {
|
||||
marginPct = (totalRevenue - totalCost) / totalRevenue * 100
|
||||
}
|
||||
return &SalesKPIs{
|
||||
TotalRevenue: round2(totalRevenue),
|
||||
GrossMarginPct: round2(marginPct),
|
||||
TotalQuantity: round2(totalQuantity),
|
||||
AvgOrderValue: round2(totalRevenue / totalOrders),
|
||||
RecordsInWindow: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sales history
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func AWGetSalesHistory(ctx context.Context, db *sql.DB, daysBack int) ([]DailySalesPoint, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.sales_history",
|
||||
trace.WithAttributes(attribute.Int("days_back", daysBack)))
|
||||
defer span.End()
|
||||
|
||||
series, err := fetchAWDailySales(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -daysBack)
|
||||
var result []DailySalesPoint
|
||||
for _, r := range series {
|
||||
if r.Date.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
result = append(result, DailySalesPoint{
|
||||
Date: r.Date.Format("2006-01-02"),
|
||||
Revenue: round2(r.Revenue),
|
||||
Cost: round2(r.Cost),
|
||||
Quantity: round2(r.Quantity),
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sales forecast (OLS + weekday seasonality)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func AWGetSalesForecast(ctx context.Context, db *sql.DB, horizonDays int) ([]ForecastPoint, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.forecast",
|
||||
trace.WithAttributes(attribute.Int("horizon_days", horizonDays)))
|
||||
defer span.End()
|
||||
|
||||
series, err := fetchAWDailySales(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -720)
|
||||
var window []dailySalesRow
|
||||
for _, r := range series {
|
||||
if !r.Date.Before(cutoff) {
|
||||
window = append(window, r)
|
||||
}
|
||||
}
|
||||
if len(window) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Fill daily gaps with 0 (resample to daily)
|
||||
start := window[0].Date
|
||||
end := window[len(window)-1].Date
|
||||
byDate := make(map[string]float64, len(window))
|
||||
for _, r := range window {
|
||||
byDate[r.Date.Format("2006-01-02")] = r.Revenue
|
||||
}
|
||||
var revenues []float64
|
||||
var dates []time.Time
|
||||
for d := start; !d.After(end); d = d.AddDate(0, 0, 1) {
|
||||
key := d.Format("2006-01-02")
|
||||
revenues = append(revenues, byDate[key])
|
||||
dates = append(dates, d)
|
||||
}
|
||||
|
||||
// OLS fit
|
||||
n := len(revenues)
|
||||
xs := make([]float64, n)
|
||||
for i := range xs {
|
||||
xs[i] = float64(i)
|
||||
}
|
||||
slope, intercept := ols(xs, revenues)
|
||||
|
||||
// Residual sigma for confidence interval
|
||||
var ssRes float64
|
||||
for i, y := range revenues {
|
||||
pred := intercept + slope*xs[i]
|
||||
d := y - pred
|
||||
ssRes += d * d
|
||||
}
|
||||
sigma := math.Sqrt(ssRes / float64(maxInt(n-2, 1)))
|
||||
|
||||
// Weekday seasonality factors (Python weekday: 0=Mon)
|
||||
weekdayRevenues := make([][]float64, 7)
|
||||
for i, r := range revenues {
|
||||
wd := (int(dates[i].Weekday()) + 6) % 7
|
||||
weekdayRevenues[wd] = append(weekdayRevenues[wd], r)
|
||||
}
|
||||
overallMean := meanOf(revenues)
|
||||
weekdayFactors := make([]float64, 7)
|
||||
for wd := range weekdayFactors {
|
||||
if len(weekdayRevenues[wd]) > 0 && overallMean > 0 {
|
||||
f := meanOf(weekdayRevenues[wd]) / overallMean
|
||||
if math.IsNaN(f) || math.IsInf(f, 0) {
|
||||
f = 1.0
|
||||
}
|
||||
weekdayFactors[wd] = f
|
||||
} else {
|
||||
weekdayFactors[wd] = 1.0
|
||||
}
|
||||
}
|
||||
|
||||
// Forecast
|
||||
result := make([]ForecastPoint, horizonDays)
|
||||
lastDate := dates[len(dates)-1]
|
||||
for i := range result {
|
||||
step := i + 1
|
||||
day := lastDate.AddDate(0, 0, step)
|
||||
rawPred := intercept + slope*float64(n+i)
|
||||
wd := (int(day.Weekday()) + 6) % 7
|
||||
yhat := math.Max(rawPred*weekdayFactors[wd], 0)
|
||||
ci := 1.96 * sigma * math.Sqrt(1+float64(step)/float64(maxInt(n, 1)))
|
||||
result[i] = ForecastPoint{
|
||||
Date: day.Format("2006-01-02"),
|
||||
PredictedRevenue: round2(yhat),
|
||||
LowerBound: round2(math.Max(yhat-ci, 0)),
|
||||
UpperBound: round2(yhat + ci),
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Rep scores
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func AWGetRepScores(ctx context.Context, db *sql.DB, topN int) ([]RepScore, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.rep_scores",
|
||||
trace.WithAttributes(attribute.Int("top_n", topN)))
|
||||
defer span.End()
|
||||
|
||||
rows, err := mssqldb.QueryFirst(ctx, db, awRepPerfQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
type rawRep struct {
|
||||
EmployeeKey int
|
||||
RepName string
|
||||
RepTitle string
|
||||
Territory string
|
||||
Revenue float64
|
||||
Cost float64
|
||||
Orders float64
|
||||
AvgDealSize float64
|
||||
}
|
||||
|
||||
var raws []rawRep
|
||||
for rows.Next() {
|
||||
var r rawRep
|
||||
var rev, cost, orders, deal sql.NullFloat64
|
||||
if err := rows.Scan(&r.EmployeeKey, &r.RepName, &r.RepTitle, &r.Territory,
|
||||
&rev, &cost, &orders, &deal); err != nil {
|
||||
return nil, fmt.Errorf("scan rep_performance: %w", err)
|
||||
}
|
||||
r.Revenue = rev.Float64
|
||||
r.Cost = cost.Float64
|
||||
r.Orders = orders.Float64
|
||||
r.AvgDealSize = deal.Float64
|
||||
raws = append(raws, r)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var maxRevenue, maxOrders, maxDeal float64
|
||||
for _, r := range raws {
|
||||
maxRevenue = math.Max(maxRevenue, r.Revenue)
|
||||
maxOrders = math.Max(maxOrders, r.Orders)
|
||||
maxDeal = math.Max(maxDeal, r.AvgDealSize)
|
||||
}
|
||||
maxRevenue = maxF(maxRevenue, 1)
|
||||
maxOrders = maxF(maxOrders, 1)
|
||||
maxDeal = maxF(maxDeal, 1)
|
||||
|
||||
type scored struct {
|
||||
raw rawRep
|
||||
score float64
|
||||
}
|
||||
scoreds := make([]scored, len(raws))
|
||||
for i, r := range raws {
|
||||
s := 0.50*(r.Revenue/maxRevenue) +
|
||||
0.30*(r.Orders/maxOrders) +
|
||||
0.20*(r.AvgDealSize/maxDeal)
|
||||
scoreds[i] = scored{r, s}
|
||||
}
|
||||
sort.Slice(scoreds, func(i, j int) bool { return scoreds[i].score > scoreds[j].score })
|
||||
if topN < len(scoreds) {
|
||||
scoreds = scoreds[:topN]
|
||||
}
|
||||
|
||||
result := make([]RepScore, len(scoreds))
|
||||
for i, s := range scoreds {
|
||||
var marginPct float64
|
||||
if s.raw.Revenue > 0 {
|
||||
marginPct = (s.raw.Revenue - s.raw.Cost) / s.raw.Revenue * 100
|
||||
}
|
||||
result[i] = RepScore{
|
||||
Rank: i + 1,
|
||||
EmployeeKey: s.raw.EmployeeKey,
|
||||
RepName: s.raw.RepName,
|
||||
RepTitle: s.raw.RepTitle,
|
||||
Territory: s.raw.Territory,
|
||||
Revenue: round2(s.raw.Revenue),
|
||||
Orders: int(s.raw.Orders),
|
||||
AvgDealSize: round2(s.raw.AvgDealSize),
|
||||
MarginPct: round2(marginPct),
|
||||
Score: round2(s.score * 100),
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Product demand
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func AWGetProductDemand(ctx context.Context, db *sql.DB, topN int) ([]ProductDemand, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.product_demand",
|
||||
trace.WithAttributes(attribute.Int("top_n", topN)))
|
||||
defer span.End()
|
||||
|
||||
rows, err := mssqldb.QueryFirst(ctx, db, awProductDemandQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
type rawProd struct {
|
||||
ProductID string
|
||||
ProductName string
|
||||
Category string
|
||||
Revenue float64
|
||||
Cost float64
|
||||
Quantity float64
|
||||
Orders float64
|
||||
}
|
||||
|
||||
var raws []rawProd
|
||||
for rows.Next() {
|
||||
var r rawProd
|
||||
var rev, cost, qty, orders sql.NullFloat64
|
||||
if err := rows.Scan(&r.ProductID, &r.ProductName, &r.Category, &rev, &cost, &qty, &orders); err != nil {
|
||||
return nil, fmt.Errorf("scan product_demand: %w", err)
|
||||
}
|
||||
r.Revenue = rev.Float64
|
||||
r.Cost = cost.Float64
|
||||
r.Quantity = qty.Float64
|
||||
r.Orders = orders.Float64
|
||||
raws = append(raws, r)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var maxRevenue, maxOrders float64
|
||||
for _, r := range raws {
|
||||
maxRevenue = math.Max(maxRevenue, r.Revenue)
|
||||
maxOrders = math.Max(maxOrders, r.Orders)
|
||||
}
|
||||
maxRevenue = maxF(maxRevenue, 1)
|
||||
maxOrders = maxF(maxOrders, 1)
|
||||
|
||||
type scored struct {
|
||||
raw rawProd
|
||||
score float64
|
||||
}
|
||||
scoreds := make([]scored, len(raws))
|
||||
for i, r := range raws {
|
||||
var marginPct float64
|
||||
if r.Revenue > 0 {
|
||||
marginPct = (r.Revenue - r.Cost) / r.Revenue * 100
|
||||
}
|
||||
marginNorm := clamp01((marginPct + 100) / 200)
|
||||
s := 0.40*(r.Revenue/maxRevenue) + 0.35*(r.Orders/maxOrders) + 0.25*marginNorm
|
||||
scoreds[i] = scored{r, s}
|
||||
}
|
||||
sort.Slice(scoreds, func(i, j int) bool { return scoreds[i].score > scoreds[j].score })
|
||||
if topN < len(scoreds) {
|
||||
scoreds = scoreds[:topN]
|
||||
}
|
||||
|
||||
result := make([]ProductDemand, len(scoreds))
|
||||
for i, s := range scoreds {
|
||||
var marginPct float64
|
||||
if s.raw.Revenue > 0 {
|
||||
marginPct = (s.raw.Revenue - s.raw.Cost) / s.raw.Revenue * 100
|
||||
}
|
||||
result[i] = ProductDemand{
|
||||
Rank: i + 1,
|
||||
ProductID: s.raw.ProductID,
|
||||
ProductName: s.raw.ProductName,
|
||||
Category: s.raw.Category,
|
||||
Revenue: round2(s.raw.Revenue),
|
||||
Quantity: round2(s.raw.Quantity),
|
||||
Orders: int(s.raw.Orders),
|
||||
MarginPct: round2(marginPct),
|
||||
DemandScore: round2(s.score * 100),
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Anomaly detection (rolling z-scores)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
anomalyWindow = 30
|
||||
anomalyThreshold = 2.0
|
||||
anomalySeriesDays = 365
|
||||
)
|
||||
|
||||
func AWRunAnomalyDetection(ctx context.Context, db *sql.DB) ([]AnomalyPoint, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.anomaly_detection")
|
||||
defer span.End()
|
||||
|
||||
series, err := fetchAWDailySales(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -anomalySeriesDays)
|
||||
var window []dailySalesRow
|
||||
for _, r := range series {
|
||||
if !r.Date.Before(cutoff) {
|
||||
window = append(window, r)
|
||||
}
|
||||
}
|
||||
if len(window) < anomalyWindow {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
revenues := make([]float64, len(window))
|
||||
for i, r := range window {
|
||||
revenues[i] = r.Revenue
|
||||
}
|
||||
|
||||
minPeriods := maxInt(7, anomalyWindow/4)
|
||||
means, stds := rollingMeanStd(revenues, anomalyWindow, minPeriods)
|
||||
|
||||
result := make([]AnomalyPoint, len(window))
|
||||
for i, r := range window {
|
||||
pt := AnomalyPoint{
|
||||
Date: r.Date.Format("2006-01-02"),
|
||||
Revenue: round2(r.Revenue),
|
||||
}
|
||||
if !math.IsNaN(means[i]) {
|
||||
m := round2(means[i])
|
||||
std := stds[i]
|
||||
lb := round2(means[i] - anomalyThreshold*std)
|
||||
ub := round2(means[i] + anomalyThreshold*std)
|
||||
pt.RollingMean = &m
|
||||
pt.LowerBand = &lb
|
||||
pt.UpperBand = &ub
|
||||
|
||||
if std > 0 {
|
||||
z := round3((r.Revenue - means[i]) / std)
|
||||
pt.ZScore = &z
|
||||
pt.IsAnomaly = math.Abs(z) > anomalyThreshold
|
||||
if r.Revenue > means[i] {
|
||||
d := "high"
|
||||
pt.Direction = &d
|
||||
} else {
|
||||
d := "low"
|
||||
pt.Direction = &d
|
||||
}
|
||||
}
|
||||
}
|
||||
result[i] = pt
|
||||
}
|
||||
span.SetAttributes(attribute.Int("series_points", len(result)))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data quality
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var awDQChecks = []struct {
|
||||
name string
|
||||
sql string
|
||||
}{
|
||||
{"fact_internet_sales_rows", "SELECT COUNT_BIG(*) AS cnt FROM dbo.FactInternetSales"},
|
||||
{"fact_reseller_sales_rows", "SELECT COUNT_BIG(*) AS cnt FROM dbo.FactResellerSales"},
|
||||
{"active_sales_reps", "SELECT COUNT_BIG(*) AS cnt FROM dbo.DimEmployee WHERE SalesPersonFlag = 1"},
|
||||
{"product_count", "SELECT COUNT_BIG(*) AS cnt FROM dbo.DimProduct"},
|
||||
{"latest_internet_sale", "SELECT MAX(CAST(OrderDate AS date)) AS val FROM dbo.FactInternetSales"},
|
||||
}
|
||||
|
||||
func AWRunDataQualityCheck(ctx context.Context, db *sql.DB) (*DataQualityResult, error) {
|
||||
ctx, span := awTracer.Start(ctx, "aw.analytics.data_quality")
|
||||
defer span.End()
|
||||
|
||||
result := &DataQualityResult{
|
||||
Checks: make(map[string]string),
|
||||
FailedChecks: []string{},
|
||||
}
|
||||
|
||||
for _, check := range awDQChecks {
|
||||
row := db.QueryRowContext(ctx, check.sql)
|
||||
var val sql.NullString
|
||||
if err := row.Scan(&val); err != nil {
|
||||
result.Checks[check.name] = fmt.Sprintf("ERROR: %v", err)
|
||||
result.FailedChecks = append(result.FailedChecks, check.name)
|
||||
continue
|
||||
}
|
||||
v := "NULL"
|
||||
if val.Valid {
|
||||
v = val.String
|
||||
}
|
||||
result.Checks[check.name] = v
|
||||
if v == "NULL" || v == "0" {
|
||||
if check.name != "latest_internet_sale" && check.name != "active_sales_reps" {
|
||||
result.FailedChecks = append(result.FailedChecks, check.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(result.FailedChecks) > 0 {
|
||||
result.Status = "fail"
|
||||
} else {
|
||||
result.Status = "pass"
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
97
backend/analytics/internal/analytics/math.go
Normal file
97
backend/analytics/internal/analytics/math.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package analytics
|
||||
|
||||
import "math"
|
||||
|
||||
// ols returns slope and intercept for simple linear regression y = intercept + slope*x.
|
||||
func ols(x, y []float64) (slope, intercept float64) {
|
||||
n := float64(len(x))
|
||||
if n == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
var sumX, sumY, sumXX, sumXY float64
|
||||
for i := range x {
|
||||
sumX += x[i]
|
||||
sumY += y[i]
|
||||
sumXX += x[i] * x[i]
|
||||
sumXY += x[i] * y[i]
|
||||
}
|
||||
denom := n*sumXX - sumX*sumX
|
||||
if math.Abs(denom) < 1e-10 {
|
||||
return 0, sumY / n
|
||||
}
|
||||
slope = (n*sumXY - sumX*sumY) / denom
|
||||
intercept = (sumY - slope*sumX) / n
|
||||
return
|
||||
}
|
||||
|
||||
func meanOf(values []float64) float64 {
|
||||
if len(values) == 0 {
|
||||
return 0
|
||||
}
|
||||
var s float64
|
||||
for _, v := range values {
|
||||
s += v
|
||||
}
|
||||
return s / float64(len(values))
|
||||
}
|
||||
|
||||
// sampleStdDev computes sample standard deviation (Bessel's correction).
|
||||
func sampleStdDev(values []float64, mean float64) float64 {
|
||||
if len(values) < 2 {
|
||||
return 0
|
||||
}
|
||||
var s float64
|
||||
for _, v := range values {
|
||||
d := v - mean
|
||||
s += d * d
|
||||
}
|
||||
return math.Sqrt(s / float64(len(values)-1))
|
||||
}
|
||||
|
||||
// rollingMeanStd computes per-point rolling mean and std over the given window.
|
||||
// Positions with fewer than minPeriods observations get NaN.
|
||||
func rollingMeanStd(values []float64, window, minPeriods int) (means, stds []float64) {
|
||||
n := len(values)
|
||||
means = make([]float64, n)
|
||||
stds = make([]float64, n)
|
||||
for i := range values {
|
||||
start := i - window + 1
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
sl := values[start : i+1]
|
||||
if len(sl) < minPeriods {
|
||||
means[i] = math.NaN()
|
||||
stds[i] = math.NaN()
|
||||
continue
|
||||
}
|
||||
m := meanOf(sl)
|
||||
means[i] = m
|
||||
stds[i] = sampleStdDev(sl, m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func round2(v float64) float64 { return math.Round(v*100) / 100 }
|
||||
func round3(v float64) float64 { return math.Round(v*1000) / 1000 }
|
||||
|
||||
func maxF(a, b float64) float64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func clamp01(v float64) float64 {
|
||||
if v < 0 {
|
||||
return 0
|
||||
}
|
||||
if v > 1 {
|
||||
return 1
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func ceilInt(v float64) int {
|
||||
return int(math.Ceil(v))
|
||||
}
|
||||
529
backend/analytics/internal/analytics/wwi.go
Normal file
529
backend/analytics/internal/analytics/wwi.go
Normal file
@@ -0,0 +1,529 @@
|
||||
package analytics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
mssqldb "otel-bi-analytics/internal/db"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var wwiTracer = otel.Tracer("otel-bi.analytics.wwi")
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SQL queries
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var wwiDailySalesQueries = []string{
|
||||
`SELECT
|
||||
d.[Date] AS sale_date,
|
||||
SUM(s.[Total Excluding Tax]) AS revenue,
|
||||
SUM(s.[Total Excluding Tax] - s.[Profit]) AS cost,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Delivery Date Key]
|
||||
GROUP BY d.[Date]
|
||||
ORDER BY d.[Date]`,
|
||||
`SELECT
|
||||
d.[Date] AS sale_date,
|
||||
SUM(s.[Total Excluding Tax]) AS revenue,
|
||||
SUM(s.[Total Excluding Tax] - s.[Profit]) AS cost,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Invoice Date Key]
|
||||
GROUP BY d.[Date]
|
||||
ORDER BY d.[Date]`,
|
||||
}
|
||||
|
||||
var wwiStockLevelsQueries = []string{
|
||||
`SELECT
|
||||
si.[Stock Item Key] AS stock_item_key,
|
||||
si.[Stock Item] AS stock_item_name,
|
||||
si.[Unit Price] AS unit_price,
|
||||
si.[Lead Time Days] AS lead_time_days,
|
||||
SUM(CAST(m.[Quantity] AS FLOAT)) AS current_stock
|
||||
FROM [Dimension].[Stock Item] AS si
|
||||
LEFT JOIN [Fact].[Movement] AS m ON m.[Stock Item Key] = si.[Stock Item Key]
|
||||
WHERE si.[Stock Item Key] <> 0
|
||||
GROUP BY si.[Stock Item Key], si.[Stock Item], si.[Unit Price], si.[Lead Time Days]`,
|
||||
`SELECT
|
||||
si.[Stock Item Key] AS stock_item_key,
|
||||
si.[Stock Item] AS stock_item_name,
|
||||
si.[Unit Price] AS unit_price,
|
||||
si.[Lead Time Days] AS lead_time_days,
|
||||
CAST(0 AS FLOAT) AS current_stock
|
||||
FROM [Dimension].[Stock Item] AS si
|
||||
WHERE si.[Stock Item Key] <> 0`,
|
||||
}
|
||||
|
||||
var wwiDemandVelocityQueries = []string{
|
||||
`SELECT
|
||||
s.[Stock Item Key] AS stock_item_key,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS qty_sold_90d
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Delivery Date Key]
|
||||
WHERE d.[Date] >= DATEADD(day, -90, GETDATE()) AND s.[Stock Item Key] <> 0
|
||||
GROUP BY s.[Stock Item Key]`,
|
||||
`SELECT
|
||||
s.[Stock Item Key] AS stock_item_key,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS qty_sold_90d
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Invoice Date Key]
|
||||
WHERE d.[Date] >= DATEADD(day, -90, GETDATE()) AND s.[Stock Item Key] <> 0
|
||||
GROUP BY s.[Stock Item Key]`,
|
||||
}
|
||||
|
||||
var wwiSupplierPerfQueries = []string{
|
||||
`SELECT
|
||||
sup.[Supplier Key] AS supplier_key,
|
||||
sup.[Supplier] AS supplier_name,
|
||||
sup.[Category] AS category,
|
||||
COUNT_BIG(*) AS total_orders,
|
||||
SUM(CAST(p.[Ordered Outers] AS FLOAT)) AS total_ordered_outers,
|
||||
SUM(CAST(p.[Received Outers] AS FLOAT)) AS total_received_outers,
|
||||
SUM(CASE WHEN p.[Is Order Finalized] = 1 THEN 1 ELSE 0 END) AS finalized_orders
|
||||
FROM [Dimension].[Supplier] AS sup
|
||||
INNER JOIN [Fact].[Purchase] AS p ON p.[Supplier Key] = sup.[Supplier Key]
|
||||
WHERE sup.[Supplier Key] <> 0
|
||||
GROUP BY sup.[Supplier Key], sup.[Supplier], sup.[Category]
|
||||
ORDER BY total_orders DESC`,
|
||||
`SELECT
|
||||
sup.[Supplier Key] AS supplier_key,
|
||||
sup.[Supplier] AS supplier_name,
|
||||
sup.[Category] AS category,
|
||||
COUNT_BIG(*) AS total_orders,
|
||||
SUM(CAST(p.[Ordered Outers] AS FLOAT)) AS total_ordered_outers,
|
||||
SUM(CAST(p.[Received Outers] AS FLOAT)) AS total_received_outers,
|
||||
COUNT_BIG(*) AS finalized_orders
|
||||
FROM [Dimension].[Supplier] AS sup
|
||||
INNER JOIN [Fact].[Purchase] AS p ON p.[Supplier Key] = sup.[Supplier Key]
|
||||
WHERE sup.[Supplier Key] <> 0
|
||||
GROUP BY sup.[Supplier Key], sup.[Supplier], sup.[Category]
|
||||
ORDER BY total_orders DESC`,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type ReorderRecommendation struct {
|
||||
StockItemKey int `json:"stock_item_key"`
|
||||
StockItemName string `json:"stock_item_name"`
|
||||
UnitPrice float64 `json:"unit_price"`
|
||||
CurrentStock float64 `json:"current_stock"`
|
||||
AvgDailyDemand float64 `json:"avg_daily_demand"`
|
||||
DaysUntilStockout *float64 `json:"days_until_stockout"`
|
||||
RecommendedReorderQty int `json:"recommended_reorder_qty"`
|
||||
Urgency string `json:"urgency"`
|
||||
}
|
||||
|
||||
type SupplierScore struct {
|
||||
Rank int `json:"rank"`
|
||||
SupplierKey int `json:"supplier_key"`
|
||||
SupplierName string `json:"supplier_name"`
|
||||
Category string `json:"category"`
|
||||
TotalOrders int `json:"total_orders"`
|
||||
FillRatePct float64 `json:"fill_rate_pct"`
|
||||
FinalizationRatePct float64 `json:"finalization_rate_pct"`
|
||||
Score float64 `json:"score"`
|
||||
}
|
||||
|
||||
type WhatIfResult struct {
|
||||
StockItemKey int `json:"stock_item_key"`
|
||||
StockItemName string `json:"stock_item_name"`
|
||||
DemandMultiplier float64 `json:"demand_multiplier"`
|
||||
CurrentStock float64 `json:"current_stock"`
|
||||
BaseAvgDailyDemand float64 `json:"base_avg_daily_demand"`
|
||||
AdjustedDailyDemand float64 `json:"adjusted_daily_demand"`
|
||||
ProjectedDaysUntilStockout *float64 `json:"projected_days_until_stockout"`
|
||||
ProjectedStockoutDate *string `json:"projected_stockout_date"`
|
||||
RecommendedOrderQty int `json:"recommended_order_qty"`
|
||||
EstimatedReorderCost float64 `json:"estimated_reorder_cost"`
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// KPIs (same logic as AW)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func WWIGetSalesKPIs(ctx context.Context, db *sql.DB) (*SalesKPIs, error) {
|
||||
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.kpis")
|
||||
defer span.End()
|
||||
|
||||
rows, err := mssqldb.QueryFirst(ctx, db, wwiDailySalesQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cutoff := time.Now().UTC().AddDate(0, 0, -180)
|
||||
var totalRevenue, totalCost, totalQuantity, totalOrders float64
|
||||
var count int
|
||||
|
||||
for rows.Next() {
|
||||
var date time.Time
|
||||
var revenue, cost, quantity, orders sql.NullFloat64
|
||||
if err := rows.Scan(&date, &revenue, &cost, &quantity, &orders); err != nil {
|
||||
return nil, fmt.Errorf("scan wwi_daily_sales: %w", err)
|
||||
}
|
||||
if date.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
totalRevenue += revenue.Float64
|
||||
totalCost += cost.Float64
|
||||
totalQuantity += quantity.Float64
|
||||
totalOrders += orders.Float64
|
||||
count++
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if totalOrders < 1 {
|
||||
totalOrders = 1
|
||||
}
|
||||
var marginPct float64
|
||||
if totalRevenue > 0 {
|
||||
marginPct = (totalRevenue - totalCost) / totalRevenue * 100
|
||||
}
|
||||
return &SalesKPIs{
|
||||
TotalRevenue: round2(totalRevenue),
|
||||
GrossMarginPct: round2(marginPct),
|
||||
TotalQuantity: round2(totalQuantity),
|
||||
AvgOrderValue: round2(totalRevenue / totalOrders),
|
||||
RecordsInWindow: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Reorder recommendations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func urgency(days float64) string {
|
||||
if days <= 7 {
|
||||
return "HIGH"
|
||||
}
|
||||
if days <= 14 {
|
||||
return "MEDIUM"
|
||||
}
|
||||
return "LOW"
|
||||
}
|
||||
|
||||
func WWIGetReorderRecommendations(ctx context.Context, db *sql.DB) ([]ReorderRecommendation, error) {
|
||||
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.reorder_recommendations")
|
||||
defer span.End()
|
||||
|
||||
// Fetch stock levels
|
||||
stockRows, err := mssqldb.QueryFirst(ctx, db, wwiStockLevelsQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stockRows.Close()
|
||||
|
||||
type stockItem struct {
|
||||
Key int
|
||||
Name string
|
||||
UnitPrice float64
|
||||
LeadTimeDays float64
|
||||
CurrentStock float64
|
||||
}
|
||||
byKey := make(map[int]*stockItem)
|
||||
for stockRows.Next() {
|
||||
var s stockItem
|
||||
var price, lead, stock sql.NullFloat64
|
||||
if err := stockRows.Scan(&s.Key, &s.Name, &price, &lead, &stock); err != nil {
|
||||
return nil, fmt.Errorf("scan stock_levels: %w", err)
|
||||
}
|
||||
s.UnitPrice = price.Float64
|
||||
s.LeadTimeDays = lead.Float64
|
||||
if s.LeadTimeDays == 0 {
|
||||
s.LeadTimeDays = 7
|
||||
}
|
||||
s.CurrentStock = stock.Float64
|
||||
byKey[s.Key] = &s
|
||||
}
|
||||
if err := stockRows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch 90-day demand velocity
|
||||
demandRows, err := mssqldb.QueryFirst(ctx, db, wwiDemandVelocityQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer demandRows.Close()
|
||||
|
||||
demand := make(map[int]float64)
|
||||
for demandRows.Next() {
|
||||
var key int
|
||||
var qty sql.NullFloat64
|
||||
if err := demandRows.Scan(&key, &qty); err != nil {
|
||||
return nil, fmt.Errorf("scan demand_velocity: %w", err)
|
||||
}
|
||||
demand[key] = qty.Float64
|
||||
}
|
||||
if err := demandRows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compute recommendations
|
||||
var result []ReorderRecommendation
|
||||
for _, s := range byKey {
|
||||
avgDailyDemand := demand[s.Key] / 90.0
|
||||
var daysUntilStockout float64
|
||||
if avgDailyDemand > 0 {
|
||||
daysUntilStockout = s.CurrentStock / avgDailyDemand
|
||||
} else {
|
||||
daysUntilStockout = math.Inf(1)
|
||||
}
|
||||
|
||||
if daysUntilStockout > 30 && s.CurrentStock >= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
reorderQty := math.Max(math.Ceil(avgDailyDemand*s.LeadTimeDays*1.5), 1)
|
||||
|
||||
rec := ReorderRecommendation{
|
||||
StockItemKey: s.Key,
|
||||
StockItemName: s.Name,
|
||||
UnitPrice: round2(s.UnitPrice),
|
||||
CurrentStock: round2(s.CurrentStock),
|
||||
AvgDailyDemand: round3(avgDailyDemand),
|
||||
RecommendedReorderQty: int(reorderQty),
|
||||
Urgency: urgency(daysUntilStockout),
|
||||
}
|
||||
if !math.IsInf(daysUntilStockout, 0) {
|
||||
d := round2(daysUntilStockout)
|
||||
rec.DaysUntilStockout = &d
|
||||
}
|
||||
result = append(result, rec)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
di := math.Inf(1)
|
||||
if result[i].DaysUntilStockout != nil {
|
||||
di = *result[i].DaysUntilStockout
|
||||
}
|
||||
dj := math.Inf(1)
|
||||
if result[j].DaysUntilStockout != nil {
|
||||
dj = *result[j].DaysUntilStockout
|
||||
}
|
||||
return di < dj
|
||||
})
|
||||
|
||||
span.SetAttributes(attribute.Int("item_count", len(result)))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Supplier scores
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func WWIGetSupplierScores(ctx context.Context, db *sql.DB, topN int) ([]SupplierScore, error) {
|
||||
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.supplier_scores",
|
||||
trace.WithAttributes(attribute.Int("top_n", topN)))
|
||||
defer span.End()
|
||||
|
||||
rows, err := mssqldb.QueryFirst(ctx, db, wwiSupplierPerfQueries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
type rawSupplier struct {
|
||||
Key int
|
||||
Name string
|
||||
Category string
|
||||
TotalOrders float64
|
||||
OrderedOuters float64
|
||||
ReceivedOuters float64
|
||||
FinalizedOrders float64
|
||||
}
|
||||
|
||||
var raws []rawSupplier
|
||||
for rows.Next() {
|
||||
var r rawSupplier
|
||||
var orders, ordered, received, finalized sql.NullFloat64
|
||||
if err := rows.Scan(&r.Key, &r.Name, &r.Category, &orders, &ordered, &received, &finalized); err != nil {
|
||||
return nil, fmt.Errorf("scan supplier_performance: %w", err)
|
||||
}
|
||||
r.TotalOrders = orders.Float64
|
||||
r.OrderedOuters = ordered.Float64
|
||||
r.ReceivedOuters = received.Float64
|
||||
r.FinalizedOrders = finalized.Float64
|
||||
raws = append(raws, r)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
type scored struct {
|
||||
raw rawSupplier
|
||||
score float64
|
||||
fill float64
|
||||
final float64
|
||||
}
|
||||
scoreds := make([]scored, len(raws))
|
||||
for i, r := range raws {
|
||||
var fillRate, finalRate float64
|
||||
if r.OrderedOuters > 0 {
|
||||
fillRate = math.Min(r.ReceivedOuters/r.OrderedOuters*100, 100)
|
||||
}
|
||||
if r.TotalOrders > 0 {
|
||||
finalRate = r.FinalizedOrders / r.TotalOrders * 100
|
||||
}
|
||||
s := 0.60*(fillRate/100) + 0.40*(finalRate/100)
|
||||
scoreds[i] = scored{r, s, fillRate, finalRate}
|
||||
}
|
||||
sort.Slice(scoreds, func(i, j int) bool { return scoreds[i].score > scoreds[j].score })
|
||||
if topN < len(scoreds) {
|
||||
scoreds = scoreds[:topN]
|
||||
}
|
||||
|
||||
result := make([]SupplierScore, len(scoreds))
|
||||
for i, s := range scoreds {
|
||||
result[i] = SupplierScore{
|
||||
Rank: i + 1,
|
||||
SupplierKey: s.raw.Key,
|
||||
SupplierName: s.raw.Name,
|
||||
Category: s.raw.Category,
|
||||
TotalOrders: int(s.raw.TotalOrders),
|
||||
FillRatePct: round2(s.fill),
|
||||
FinalizationRatePct: round2(s.final),
|
||||
Score: round2(s.score * 100),
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// What-if scenario
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func WWICreateWhatIfScenario(ctx context.Context, db *sql.DB, stockItemKey int, demandMultiplier float64) (*WhatIfResult, error) {
|
||||
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.whatif_scenario",
|
||||
trace.WithAttributes(
|
||||
attribute.Int("stock_item_key", stockItemKey),
|
||||
attribute.Float64("demand_multiplier", demandMultiplier),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
const detailQ = `SELECT
|
||||
si.[Stock Item Key], si.[Stock Item], si.[Unit Price], si.[Lead Time Days],
|
||||
COALESCE(SUM(CAST(m.[Quantity] AS FLOAT)), 0) AS current_stock
|
||||
FROM [Dimension].[Stock Item] AS si
|
||||
LEFT JOIN [Fact].[Movement] AS m ON m.[Stock Item Key] = si.[Stock Item Key]
|
||||
WHERE si.[Stock Item Key] = @stock_item_key
|
||||
GROUP BY si.[Stock Item Key], si.[Stock Item], si.[Unit Price], si.[Lead Time Days]`
|
||||
|
||||
const demandQ = `SELECT
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) / NULLIF(90.0, 0) AS avg_daily_demand
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Delivery Date Key]
|
||||
WHERE s.[Stock Item Key] = @stock_item_key
|
||||
AND d.[Date] >= DATEADD(day, -90, GETDATE())`
|
||||
|
||||
var itemKey int
|
||||
var itemName string
|
||||
var unitPrice, leadTime, currentStock sql.NullFloat64
|
||||
|
||||
row := db.QueryRowContext(ctx, detailQ, sql.Named("stock_item_key", stockItemKey))
|
||||
if err := row.Scan(&itemKey, &itemName, &unitPrice, &leadTime, ¤tStock); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("stock item %d not found", stockItemKey)
|
||||
}
|
||||
return nil, fmt.Errorf("query stock item detail: %w", err)
|
||||
}
|
||||
|
||||
lead := leadTime.Float64
|
||||
if lead == 0 {
|
||||
lead = 7
|
||||
}
|
||||
stock := currentStock.Float64
|
||||
price := unitPrice.Float64
|
||||
|
||||
var baseDemand sql.NullFloat64
|
||||
demRow := db.QueryRowContext(ctx, demandQ, sql.Named("stock_item_key", stockItemKey))
|
||||
_ = demRow.Scan(&baseDemand)
|
||||
|
||||
adjustedDemand := baseDemand.Float64 * demandMultiplier
|
||||
reorderQty := 0
|
||||
var daysPtr *float64
|
||||
var stockoutDatePtr *string
|
||||
if adjustedDemand > 0 {
|
||||
days := stock / adjustedDemand
|
||||
d := round2(days)
|
||||
daysPtr = &d
|
||||
sd := time.Now().UTC().AddDate(0, 0, int(days)).Format("2006-01-02")
|
||||
stockoutDatePtr = &sd
|
||||
reorderQty = ceilInt(adjustedDemand * lead * 1.5)
|
||||
}
|
||||
|
||||
return &WhatIfResult{
|
||||
StockItemKey: stockItemKey,
|
||||
StockItemName: itemName,
|
||||
DemandMultiplier: demandMultiplier,
|
||||
CurrentStock: round2(stock),
|
||||
BaseAvgDailyDemand: round3(baseDemand.Float64),
|
||||
AdjustedDailyDemand: round3(adjustedDemand),
|
||||
ProjectedDaysUntilStockout: daysPtr,
|
||||
ProjectedStockoutDate: stockoutDatePtr,
|
||||
RecommendedOrderQty: reorderQty,
|
||||
EstimatedReorderCost: round2(float64(reorderQty) * price),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data quality
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var wwiDQChecks = []struct {
|
||||
name string
|
||||
sql string
|
||||
}{
|
||||
{"fact_sale_rows", "SELECT COUNT_BIG(*) AS cnt FROM [Fact].[Sale]"},
|
||||
{"active_suppliers", "SELECT COUNT_BIG(*) AS cnt FROM [Dimension].[Supplier] WHERE [Supplier Key] <> 0"},
|
||||
{"stock_item_count", "SELECT COUNT_BIG(*) AS cnt FROM [Dimension].[Stock Item] WHERE [Stock Item Key] <> 0"},
|
||||
{"stock_holdings", "SELECT COUNT(*) AS cnt FROM [Warehouse].[StockItemHoldings]"},
|
||||
{"latest_sale_date", "SELECT MAX(d.[Date]) AS val FROM [Fact].[Sale] AS s INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Invoice Date Key]"},
|
||||
}
|
||||
|
||||
func WWIRunDataQualityCheck(ctx context.Context, db *sql.DB) (*DataQualityResult, error) {
|
||||
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.data_quality")
|
||||
defer span.End()
|
||||
|
||||
result := &DataQualityResult{
|
||||
Checks: make(map[string]string),
|
||||
FailedChecks: []string{},
|
||||
}
|
||||
for _, check := range wwiDQChecks {
|
||||
row := db.QueryRowContext(ctx, check.sql)
|
||||
var val sql.NullString
|
||||
if err := row.Scan(&val); err != nil {
|
||||
result.Checks[check.name] = fmt.Sprintf("ERROR: %v", err)
|
||||
result.FailedChecks = append(result.FailedChecks, check.name)
|
||||
continue
|
||||
}
|
||||
v := "NULL"
|
||||
if val.Valid {
|
||||
v = val.String
|
||||
}
|
||||
result.Checks[check.name] = v
|
||||
if (v == "NULL" || v == "0") && check.name == "fact_sale_rows" {
|
||||
result.FailedChecks = append(result.FailedChecks, check.name)
|
||||
}
|
||||
}
|
||||
if len(result.FailedChecks) > 0 {
|
||||
result.Status = "fail"
|
||||
} else {
|
||||
result.Status = "pass"
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
54
backend/analytics/internal/config/config.go
Normal file
54
backend/analytics/internal/config/config.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Port int
|
||||
AWConnStr string
|
||||
WWIConnStr string
|
||||
PostgresDSN string
|
||||
OtelCollectorEndpoint string
|
||||
OtelServiceName string
|
||||
OtelServiceNamespace string
|
||||
DefaultTopN int
|
||||
ForecastHorizonDays int
|
||||
DefaultHistoryDays int
|
||||
}
|
||||
|
||||
func Load() Config {
|
||||
port, _ := strconv.Atoi(getEnv("PORT", "8080"))
|
||||
topN, _ := strconv.Atoi(getEnv("DEFAULT_TOP_N", "10"))
|
||||
forecastDays, _ := strconv.Atoi(getEnv("FORECAST_HORIZON_DAYS", "30"))
|
||||
historyDays, _ := strconv.Atoi(getEnv("DEFAULT_HISTORY_DAYS", "365"))
|
||||
return Config{
|
||||
Port: port,
|
||||
AWConnStr: mustEnv("AW_MSSQL_DSN"),
|
||||
WWIConnStr: mustEnv("WWI_MSSQL_DSN"),
|
||||
PostgresDSN: mustEnv("POSTGRES_DSN"),
|
||||
OtelCollectorEndpoint: getEnv("OTEL_COLLECTOR_ENDPOINT", "http://localhost:4318"),
|
||||
OtelServiceName: getEnv("OTEL_SERVICE_NAME", "otel-bi-analytics"),
|
||||
OtelServiceNamespace: getEnv("OTEL_SERVICE_NAMESPACE", "final-thesis"),
|
||||
DefaultTopN: topN,
|
||||
ForecastHorizonDays: forecastDays,
|
||||
DefaultHistoryDays: historyDays,
|
||||
}
|
||||
}
|
||||
|
||||
func getEnv(key, fallback string) string {
|
||||
if v, ok := os.LookupEnv(key); ok && v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
func mustEnv(key string) string {
|
||||
v := os.Getenv(key)
|
||||
if v == "" {
|
||||
panic(fmt.Sprintf("required environment variable %s is not set", key))
|
||||
}
|
||||
return v
|
||||
}
|
||||
41
backend/analytics/internal/db/mssql.go
Normal file
41
backend/analytics/internal/db/mssql.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
_ "github.com/microsoft/go-mssqldb"
|
||||
)
|
||||
|
||||
// Open creates an MSSQL connection pool and validates connectivity.
|
||||
func Open(ctx context.Context, dsn, name string) (*sql.DB, error) {
|
||||
pool, err := sql.Open("sqlserver", dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open %s: %w", name, err)
|
||||
}
|
||||
pool.SetMaxOpenConns(15)
|
||||
pool.SetMaxIdleConns(5)
|
||||
if err := pool.PingContext(ctx); err != nil {
|
||||
return nil, fmt.Errorf("ping %s: %w", name, err)
|
||||
}
|
||||
slog.Info("mssql connected", "db", name)
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// QueryFirst runs each SQL query in order, returning rows from the first one
|
||||
// that succeeds. Used for schema-fallback queries.
|
||||
func QueryFirst(ctx context.Context, pool *sql.DB, queries []string) (*sql.Rows, error) {
|
||||
var lastErr error
|
||||
for _, q := range queries {
|
||||
rows, err := pool.QueryContext(ctx, q)
|
||||
if err != nil {
|
||||
slog.Warn("query variant failed, trying next", "err", err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
return nil, fmt.Errorf("all query variants failed: %w", lastErr)
|
||||
}
|
||||
28
backend/analytics/internal/db/postgres.go
Normal file
28
backend/analytics/internal/db/postgres.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// OpenPostgres creates a pgx connection pool and validates connectivity.
|
||||
func OpenPostgres(ctx context.Context, dsn string) (*pgxpool.Pool, error) {
|
||||
cfg, err := pgxpool.ParseConfig(dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse postgres DSN: %w", err)
|
||||
}
|
||||
|
||||
pool, err := pgxpool.NewWithConfig(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create postgres pool: %w", err)
|
||||
}
|
||||
if err := pool.Ping(ctx); err != nil {
|
||||
pool.Close()
|
||||
return nil, fmt.Errorf("ping postgres: %w", err)
|
||||
}
|
||||
slog.Info("postgres connected", "max_conns", cfg.MaxConns)
|
||||
return pool, nil
|
||||
}
|
||||
106
backend/analytics/internal/export/xlsx.go
Normal file
106
backend/analytics/internal/export/xlsx.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/xuri/excelize/v2"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
exportTracer = otel.Tracer("otel-bi.export")
|
||||
exportMeter = otel.Meter("otel-bi.export")
|
||||
|
||||
exportRowsTotal, _ = exportMeter.Int64Counter(
|
||||
"export.rows_total",
|
||||
metric.WithDescription("Total rows exported to XLSX"),
|
||||
)
|
||||
exportSizeBytes, _ = exportMeter.Int64Histogram(
|
||||
"export.file_size_bytes",
|
||||
metric.WithDescription("XLSX file size in bytes"),
|
||||
metric.WithUnit("By"),
|
||||
)
|
||||
)
|
||||
|
||||
type Column struct {
|
||||
Key string
|
||||
Label string
|
||||
}
|
||||
|
||||
// ToXLSXBytes writes rows to a single-sheet Excel workbook using the given
|
||||
// column spec (controls header labels and order) and returns the raw bytes.
|
||||
func ToXLSXBytes(ctx context.Context, sheetName string, cols []Column, rows []map[string]any) ([]byte, error) {
|
||||
ctx, span := exportTracer.Start(ctx, "export.xlsx",
|
||||
trace.WithAttributes(attribute.String("sheet_name", sheetName)),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
f := excelize.NewFile()
|
||||
defer f.Close()
|
||||
|
||||
sheet := f.GetSheetName(0)
|
||||
if err := f.SetSheetName(sheet, sheetName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Header row
|
||||
for col, c := range cols {
|
||||
cell, _ := excelize.CoordinatesToCellName(col+1, 1)
|
||||
if err := f.SetCellValue(sheetName, cell, c.Label); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Data rows
|
||||
for rowIdx, row := range rows {
|
||||
for colIdx, c := range cols {
|
||||
cell, _ := excelize.CoordinatesToCellName(colIdx+1, rowIdx+2)
|
||||
_ = f.SetCellValue(sheetName, cell, fmtCell(row[c.Key]))
|
||||
}
|
||||
}
|
||||
|
||||
buf, err := f.WriteToBuffer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := buf.Bytes()
|
||||
|
||||
span.SetAttributes(
|
||||
attribute.Int("row_count", len(rows)),
|
||||
attribute.Int("file_size_bytes", len(b)),
|
||||
)
|
||||
exportRowsTotal.Add(ctx, int64(len(rows)), metric.WithAttributes(attribute.String("sheet", sheetName)))
|
||||
exportSizeBytes.Record(ctx, int64(len(b)), metric.WithAttributes(attribute.String("sheet", sheetName)))
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GenericXLSX converts a slice of maps to XLSX with alphabetically-sorted headers.
|
||||
// Use ToXLSXBytes when column order matters.
|
||||
func GenericXLSX(ctx context.Context, sheetName string, rows []map[string]any) ([]byte, error) {
|
||||
if len(rows) == 0 {
|
||||
return ToXLSXBytes(ctx, sheetName, nil, nil)
|
||||
}
|
||||
keys := make([]string, 0, len(rows[0]))
|
||||
for k := range rows[0] {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
cols := make([]Column, len(keys))
|
||||
for i, k := range keys {
|
||||
cols[i] = Column{Key: k, Label: k}
|
||||
}
|
||||
return ToXLSXBytes(ctx, sheetName, cols, rows)
|
||||
}
|
||||
|
||||
func fmtCell(v any) string {
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
447
backend/analytics/internal/handler/handler.go
Normal file
447
backend/analytics/internal/handler/handler.go
Normal file
@@ -0,0 +1,447 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
|
||||
"otel-bi-analytics/internal/analytics"
|
||||
"otel-bi-analytics/internal/export"
|
||||
"otel-bi-analytics/internal/scheduler"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
awDB *sql.DB
|
||||
wwiDB *sql.DB
|
||||
pgPool *pgxpool.Pool
|
||||
sched *scheduler.Scheduler
|
||||
|
||||
defaultTopN int
|
||||
defaultForecastDays int
|
||||
defaultHistoryDays int
|
||||
}
|
||||
|
||||
func New(awDB, wwiDB *sql.DB, pgPool *pgxpool.Pool, sched *scheduler.Scheduler, topN, forecastDays, historyDays int) *Handler {
|
||||
return &Handler{
|
||||
awDB: awDB,
|
||||
wwiDB: wwiDB,
|
||||
pgPool: pgPool,
|
||||
sched: sched,
|
||||
defaultTopN: topN,
|
||||
defaultForecastDays: forecastDays,
|
||||
defaultHistoryDays: historyDays,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes wires all routes into the given mux (Go 1.22 method+path syntax).
|
||||
func (h *Handler) RegisterRoutes(mux *http.ServeMux) {
|
||||
mux.HandleFunc("GET /health", h.Health)
|
||||
|
||||
mux.HandleFunc("GET /aw/sales/kpis", h.AWKPIs)
|
||||
mux.HandleFunc("GET /aw/sales/history", h.AWHistory)
|
||||
mux.HandleFunc("GET /aw/sales/forecast", h.AWForecast)
|
||||
mux.HandleFunc("GET /aw/reps/scores", h.AWRepScores)
|
||||
mux.HandleFunc("GET /aw/products/demand", h.AWProductDemand)
|
||||
mux.HandleFunc("GET /aw/anomalies", h.AWAnomalies)
|
||||
mux.HandleFunc("GET /aw/data-quality", h.AWDataQuality)
|
||||
|
||||
mux.HandleFunc("GET /aw/export/sales-history", h.ExportAWSalesHistory)
|
||||
mux.HandleFunc("GET /aw/export/sales-forecast", h.ExportAWSalesForecast)
|
||||
mux.HandleFunc("GET /aw/export/rep-scores", h.ExportAWRepScores)
|
||||
mux.HandleFunc("GET /aw/export/product-demand", h.ExportAWProductDemand)
|
||||
|
||||
mux.HandleFunc("GET /wwi/sales/kpis", h.WWIKPIs)
|
||||
mux.HandleFunc("GET /wwi/stock/recommendations", h.WWIReorderRecommendations)
|
||||
mux.HandleFunc("GET /wwi/suppliers/scores", h.WWISupplierScores)
|
||||
mux.HandleFunc("POST /wwi/scenarios", h.WWIWhatIfScenario)
|
||||
mux.HandleFunc("GET /wwi/data-quality", h.WWIDataQuality)
|
||||
|
||||
mux.HandleFunc("GET /wwi/export/stock-recommendations", h.ExportWWIStockRecommendations)
|
||||
mux.HandleFunc("GET /wwi/export/supplier-scores", h.ExportWWISupplierScores)
|
||||
|
||||
mux.HandleFunc("POST /scheduler/aw/{job_name}/trigger", h.TriggerAWJob)
|
||||
mux.HandleFunc("POST /scheduler/wwi/{job_name}/trigger", h.TriggerWWIJob)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func writeJSON(w http.ResponseWriter, status int, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
if err := json.NewEncoder(w).Encode(v); err != nil {
|
||||
slog.Error("json encode failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeError(w http.ResponseWriter, status int, msg string) {
|
||||
writeJSON(w, status, map[string]string{"error": msg})
|
||||
}
|
||||
|
||||
func queryInt(r *http.Request, key string, defaultVal int) int {
|
||||
s := r.URL.Query().Get(key)
|
||||
if s == "" {
|
||||
return defaultVal
|
||||
}
|
||||
v, err := strconv.Atoi(s)
|
||||
if err != nil || v <= 0 {
|
||||
return defaultVal
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func writeXLSX(w http.ResponseWriter, filename string, rowCount int, data []byte) {
|
||||
w.Header().Set("Content-Type", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
|
||||
w.Header().Set("X-Row-Count", strconv.Itoa(rowCount))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(data)
|
||||
}
|
||||
|
||||
func toMaps(v any) []map[string]any {
|
||||
b, _ := json.Marshal(v)
|
||||
var out []map[string]any
|
||||
_ = json.Unmarshal(b, &out)
|
||||
if out == nil {
|
||||
out = []map[string]any{}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Analytics handlers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (h *Handler) Health(w http.ResponseWriter, r *http.Request) {
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
func (h *Handler) AWKPIs(w http.ResponseWriter, r *http.Request) {
|
||||
result, err := analytics.AWGetSalesKPIs(r.Context(), h.awDB)
|
||||
if err != nil {
|
||||
slog.Error("AWGetSalesKPIs", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) AWHistory(w http.ResponseWriter, r *http.Request) {
|
||||
daysBack := queryInt(r, "days_back", h.defaultHistoryDays)
|
||||
result, err := analytics.AWGetSalesHistory(r.Context(), h.awDB, daysBack)
|
||||
if err != nil {
|
||||
slog.Error("AWGetSalesHistory", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.DailySalesPoint{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) AWForecast(w http.ResponseWriter, r *http.Request) {
|
||||
horizonDays := queryInt(r, "horizon_days", h.defaultForecastDays)
|
||||
result, err := analytics.AWGetSalesForecast(r.Context(), h.awDB, horizonDays)
|
||||
if err != nil {
|
||||
slog.Error("AWGetSalesForecast", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.ForecastPoint{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) AWRepScores(w http.ResponseWriter, r *http.Request) {
|
||||
topN := queryInt(r, "top_n", h.defaultTopN)
|
||||
result, err := analytics.AWGetRepScores(r.Context(), h.awDB, topN)
|
||||
if err != nil {
|
||||
slog.Error("AWGetRepScores", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.RepScore{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) AWProductDemand(w http.ResponseWriter, r *http.Request) {
|
||||
topN := queryInt(r, "top_n", h.defaultTopN)
|
||||
result, err := analytics.AWGetProductDemand(r.Context(), h.awDB, topN)
|
||||
if err != nil {
|
||||
slog.Error("AWGetProductDemand", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.ProductDemand{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) AWAnomalies(w http.ResponseWriter, r *http.Request) {
|
||||
result, err := analytics.AWRunAnomalyDetection(r.Context(), h.awDB)
|
||||
if err != nil {
|
||||
slog.Error("AWRunAnomalyDetection", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.AnomalyPoint{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) AWDataQuality(w http.ResponseWriter, r *http.Request) {
|
||||
result, err := analytics.AWRunDataQualityCheck(r.Context(), h.awDB)
|
||||
if err != nil {
|
||||
slog.Error("AWRunDataQualityCheck", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) WWIKPIs(w http.ResponseWriter, r *http.Request) {
|
||||
result, err := analytics.WWIGetSalesKPIs(r.Context(), h.wwiDB)
|
||||
if err != nil {
|
||||
slog.Error("WWIGetSalesKPIs", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) WWIReorderRecommendations(w http.ResponseWriter, r *http.Request) {
|
||||
result, err := analytics.WWIGetReorderRecommendations(r.Context(), h.wwiDB)
|
||||
if err != nil {
|
||||
slog.Error("WWIGetReorderRecommendations", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.ReorderRecommendation{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) WWISupplierScores(w http.ResponseWriter, r *http.Request) {
|
||||
topN := queryInt(r, "top_n", h.defaultTopN)
|
||||
result, err := analytics.WWIGetSupplierScores(r.Context(), h.wwiDB, topN)
|
||||
if err != nil {
|
||||
slog.Error("WWIGetSupplierScores", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if result == nil {
|
||||
result = []analytics.SupplierScore{}
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) WWIWhatIfScenario(w http.ResponseWriter, r *http.Request) {
|
||||
var body struct {
|
||||
StockItemKey int `json:"stock_item_key"`
|
||||
DemandMultiplier float64 `json:"demand_multiplier"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid request body")
|
||||
return
|
||||
}
|
||||
if body.StockItemKey <= 0 {
|
||||
writeError(w, http.StatusBadRequest, "stock_item_key must be > 0")
|
||||
return
|
||||
}
|
||||
if body.DemandMultiplier <= 0 {
|
||||
body.DemandMultiplier = 1.0
|
||||
}
|
||||
|
||||
result, err := analytics.WWICreateWhatIfScenario(r.Context(), h.wwiDB, body.StockItemKey, body.DemandMultiplier)
|
||||
if err != nil {
|
||||
slog.Error("WWICreateWhatIfScenario", "err", err)
|
||||
writeError(w, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *Handler) WWIDataQuality(w http.ResponseWriter, r *http.Request) {
|
||||
result, err := analytics.WWIRunDataQualityCheck(r.Context(), h.wwiDB)
|
||||
if err != nil {
|
||||
slog.Error("WWIRunDataQualityCheck", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Export handlers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (h *Handler) ExportAWSalesHistory(w http.ResponseWriter, r *http.Request) {
|
||||
daysBack := queryInt(r, "days_back", h.defaultHistoryDays)
|
||||
data, err := analytics.AWGetSalesHistory(r.Context(), h.awDB, daysBack)
|
||||
if err != nil {
|
||||
slog.Error("ExportAWSalesHistory", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
cols := []export.Column{
|
||||
{Key: "date", Label: "Date"},
|
||||
{Key: "total_revenue", Label: "Total Revenue"},
|
||||
{Key: "total_orders", Label: "Total Orders"},
|
||||
{Key: "avg_order_value", Label: "Avg Order Value"},
|
||||
}
|
||||
b, err := export.ToXLSXBytes(r.Context(), "Sales History", cols, toMaps(data))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeXLSX(w, "aw_sales_history.xlsx", len(data), b)
|
||||
}
|
||||
|
||||
func (h *Handler) ExportAWSalesForecast(w http.ResponseWriter, r *http.Request) {
|
||||
horizonDays := queryInt(r, "horizon_days", h.defaultForecastDays)
|
||||
data, err := analytics.AWGetSalesForecast(r.Context(), h.awDB, horizonDays)
|
||||
if err != nil {
|
||||
slog.Error("ExportAWSalesForecast", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
cols := []export.Column{
|
||||
{Key: "date", Label: "Date"},
|
||||
{Key: "predicted_revenue", Label: "Predicted Revenue"},
|
||||
{Key: "lower_bound", Label: "Lower Bound"},
|
||||
{Key: "upper_bound", Label: "Upper Bound"},
|
||||
}
|
||||
b, err := export.ToXLSXBytes(r.Context(), "Sales Forecast", cols, toMaps(data))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeXLSX(w, "aw_sales_forecast.xlsx", len(data), b)
|
||||
}
|
||||
|
||||
func (h *Handler) ExportAWRepScores(w http.ResponseWriter, r *http.Request) {
|
||||
topN := queryInt(r, "top_n", h.defaultTopN)
|
||||
data, err := analytics.AWGetRepScores(r.Context(), h.awDB, topN)
|
||||
if err != nil {
|
||||
slog.Error("ExportAWRepScores", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
cols := []export.Column{
|
||||
{Key: "rep_name", Label: "Sales Rep"},
|
||||
{Key: "total_revenue", Label: "Total Revenue"},
|
||||
{Key: "total_orders", Label: "Total Orders"},
|
||||
{Key: "avg_order_value", Label: "Avg Order Value"},
|
||||
{Key: "performance_score", Label: "Performance Score"},
|
||||
}
|
||||
b, err := export.ToXLSXBytes(r.Context(), "Rep Scores", cols, toMaps(data))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeXLSX(w, "aw_rep_scores.xlsx", len(data), b)
|
||||
}
|
||||
|
||||
func (h *Handler) ExportAWProductDemand(w http.ResponseWriter, r *http.Request) {
|
||||
topN := queryInt(r, "top_n", h.defaultTopN)
|
||||
data, err := analytics.AWGetProductDemand(r.Context(), h.awDB, topN)
|
||||
if err != nil {
|
||||
slog.Error("ExportAWProductDemand", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
cols := []export.Column{
|
||||
{Key: "product_name", Label: "Product"},
|
||||
{Key: "category", Label: "Category"},
|
||||
{Key: "total_quantity", Label: "Total Quantity"},
|
||||
{Key: "total_revenue", Label: "Total Revenue"},
|
||||
{Key: "demand_score", Label: "Demand Score"},
|
||||
}
|
||||
b, err := export.ToXLSXBytes(r.Context(), "Product Demand", cols, toMaps(data))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeXLSX(w, "aw_product_demand.xlsx", len(data), b)
|
||||
}
|
||||
|
||||
func (h *Handler) ExportWWIStockRecommendations(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := analytics.WWIGetReorderRecommendations(r.Context(), h.wwiDB)
|
||||
if err != nil {
|
||||
slog.Error("ExportWWIStockRecommendations", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
cols := []export.Column{
|
||||
{Key: "stock_item_name", Label: "Stock Item"},
|
||||
{Key: "current_stock", Label: "Current Stock"},
|
||||
{Key: "avg_daily_demand", Label: "Avg Daily Demand"},
|
||||
{Key: "days_until_stockout", Label: "Days Until Stockout"},
|
||||
{Key: "recommended_reorder_qty", Label: "Recommended Reorder Qty"},
|
||||
{Key: "urgency", Label: "Urgency"},
|
||||
}
|
||||
b, err := export.ToXLSXBytes(r.Context(), "Stock Recommendations", cols, toMaps(data))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeXLSX(w, "wwi_stock_recommendations.xlsx", len(data), b)
|
||||
}
|
||||
|
||||
func (h *Handler) ExportWWISupplierScores(w http.ResponseWriter, r *http.Request) {
|
||||
topN := queryInt(r, "top_n", h.defaultTopN)
|
||||
data, err := analytics.WWIGetSupplierScores(r.Context(), h.wwiDB, topN)
|
||||
if err != nil {
|
||||
slog.Error("ExportWWISupplierScores", "err", err)
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
cols := []export.Column{
|
||||
{Key: "supplier_name", Label: "Supplier"},
|
||||
{Key: "total_orders", Label: "Total Orders"},
|
||||
{Key: "on_time_delivery_rate", Label: "On-Time Delivery Rate"},
|
||||
{Key: "avg_lead_time_days", Label: "Avg Lead Time (Days)"},
|
||||
{Key: "performance_score", Label: "Performance Score"},
|
||||
}
|
||||
b, err := export.ToXLSXBytes(r.Context(), "Supplier Scores", cols, toMaps(data))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeXLSX(w, "wwi_supplier_scores.xlsx", len(data), b)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Scheduler trigger handlers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (h *Handler) TriggerAWJob(w http.ResponseWriter, r *http.Request) {
|
||||
jobName := r.PathValue("job_name")
|
||||
if err := h.sched.TriggerAWJob(jobName); err != nil {
|
||||
writeError(w, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusAccepted, map[string]string{"status": "triggered", "job": jobName})
|
||||
}
|
||||
|
||||
func (h *Handler) TriggerWWIJob(w http.ResponseWriter, r *http.Request) {
|
||||
jobName := r.PathValue("job_name")
|
||||
if err := h.sched.TriggerWWIJob(jobName); err != nil {
|
||||
writeError(w, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusAccepted, map[string]string{"status": "triggered", "job": jobName})
|
||||
}
|
||||
152
backend/analytics/internal/persistence/audit.go
Normal file
152
backend/analytics/internal/persistence/audit.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package persistence
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
persistTracer = otel.Tracer("otel-bi.persistence")
|
||||
persistMeter = otel.Meter("otel-bi.persistence")
|
||||
|
||||
persistWritesTotal, _ = persistMeter.Int64Counter(
|
||||
"persistence.writes_total",
|
||||
metric.WithDescription("Total persistence write operations"),
|
||||
)
|
||||
)
|
||||
|
||||
// newUUID generates a random UUID v4.
|
||||
func newUUID() string {
|
||||
var b [16]byte
|
||||
rand.Read(b[:]) //nolint:errcheck
|
||||
b[6] = (b[6] & 0x0f) | 0x40
|
||||
b[8] = (b[8] & 0x3f) | 0x80
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
|
||||
b[0:4], b[4:6], b[6:8], b[8:10], b[10:16])
|
||||
}
|
||||
|
||||
// spanContext extracts trace_id and span_id from the current span as nullable strings.
|
||||
func spanContext(span trace.Span) (traceID, spanID *string) {
|
||||
sctx := span.SpanContext()
|
||||
if !sctx.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
tid := sctx.TraceID().String()
|
||||
sid := sctx.SpanID().String()
|
||||
return &tid, &sid
|
||||
}
|
||||
|
||||
// mustJSON marshals v to JSON bytes, returning nil on error.
|
||||
func mustJSON(v any) []byte {
|
||||
b, _ := json.Marshal(v)
|
||||
return b
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Job execution tracking
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func RecordJobStart(ctx context.Context, pool *pgxpool.Pool, jobName, domain string, traceID, spanID *string) string {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.record_job_start")
|
||||
defer span.End()
|
||||
|
||||
id := newUUID()
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO job_executions (id, started_at, job_name, domain, status, trace_id, span_id)
|
||||
VALUES ($1, NOW(), $2, $3, 'running', $4, $5)`,
|
||||
id, jobName, domain, traceID, spanID,
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to record job start", "job", jobName, "err", err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func RecordJobComplete(ctx context.Context, pool *pgxpool.Pool, jobID string, startedAt time.Time, records int) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.record_job_complete")
|
||||
defer span.End()
|
||||
|
||||
durationMs := int64(time.Since(startedAt).Milliseconds())
|
||||
_, err := pool.Exec(ctx,
|
||||
`UPDATE job_executions
|
||||
SET status = 'success', completed_at = NOW(), duration_ms = $2, records_processed = $3
|
||||
WHERE id = $1`,
|
||||
jobID, durationMs, records,
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to record job complete", "id", jobID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func RecordJobFailure(ctx context.Context, pool *pgxpool.Pool, jobID string, startedAt time.Time, errMsg string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.record_job_failure")
|
||||
defer span.End()
|
||||
|
||||
durationMs := int64(time.Since(startedAt).Milliseconds())
|
||||
if len(errMsg) > 2000 {
|
||||
errMsg = errMsg[:2000]
|
||||
}
|
||||
_, err := pool.Exec(ctx,
|
||||
`UPDATE job_executions
|
||||
SET status = 'failure', completed_at = NOW(), duration_ms = $2, error_message = $3
|
||||
WHERE id = $1`,
|
||||
jobID, durationMs, errMsg,
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to record job failure", "id", jobID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Audit log
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type AuditEntry struct {
|
||||
Action string
|
||||
ActorType string
|
||||
ActorID string
|
||||
Domain string
|
||||
Service string
|
||||
EntityType string
|
||||
Status string
|
||||
Payload any
|
||||
}
|
||||
|
||||
func AppendAudit(ctx context.Context, pool *pgxpool.Pool, e AuditEntry) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.append_audit",
|
||||
trace.WithAttributes(
|
||||
attribute.String("audit.action", e.Action),
|
||||
attribute.String("audit.domain", e.Domain),
|
||||
),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
status := e.Status
|
||||
if status == "" {
|
||||
status = "success"
|
||||
}
|
||||
payloadJSON := mustJSON(e.Payload)
|
||||
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO audit_log
|
||||
(id, occurred_at, action, status, actor_type, actor_id, domain, service, entity_type, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7, $8, $9, $10, $11::jsonb)`,
|
||||
newUUID(), e.Action, status, e.ActorType, e.ActorID,
|
||||
e.Domain, e.Service, e.EntityType,
|
||||
traceID, spanID, payloadJSON,
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to append audit", "action", e.Action, "err", err)
|
||||
}
|
||||
}
|
||||
140
backend/analytics/internal/persistence/aw.go
Normal file
140
backend/analytics/internal/persistence/aw.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package persistence
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"otel-bi-analytics/internal/analytics"
|
||||
)
|
||||
|
||||
func PersistForecast(ctx context.Context, pool *pgxpool.Pool, data []analytics.ForecastPoint, horizonDays int, source string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_forecast",
|
||||
trace.WithAttributes(
|
||||
attribute.Int("horizon_days", horizonDays),
|
||||
attribute.Int("point_count", len(data)),
|
||||
),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO aw_sales_forecasts
|
||||
(id, created_at, horizon_days, point_count, trigger_source, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7::jsonb)`,
|
||||
newUUID(), horizonDays, len(data), source, traceID, spanID, mustJSON(data),
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to persist AW forecast", "err", err)
|
||||
span.RecordError(err)
|
||||
return
|
||||
}
|
||||
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_sales_forecast")))
|
||||
|
||||
AppendAudit(ctx, pool, AuditEntry{
|
||||
Action: "forecast.generated", ActorType: actorType(source), ActorID: source,
|
||||
Domain: "aw", Service: "otel-bi-analytics", EntityType: "sales_forecast",
|
||||
Payload: map[string]any{"horizon_days": horizonDays, "point_count": len(data)},
|
||||
})
|
||||
}
|
||||
|
||||
func PersistRepScores(ctx context.Context, pool *pgxpool.Pool, data []analytics.RepScore, topN int, source string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_rep_scores",
|
||||
trace.WithAttributes(attribute.Int("rep_count", len(data))),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO aw_rep_scores
|
||||
(id, computed_at, rep_count, trigger_source, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6::jsonb)`,
|
||||
newUUID(), len(data), source, traceID, spanID, mustJSON(data),
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to persist AW rep scores", "err", err)
|
||||
span.RecordError(err)
|
||||
return
|
||||
}
|
||||
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_rep_scores")))
|
||||
|
||||
AppendAudit(ctx, pool, AuditEntry{
|
||||
Action: "scores.generated", ActorType: actorType(source), ActorID: source,
|
||||
Domain: "aw", Service: "otel-bi-analytics", EntityType: "rep_scores",
|
||||
Payload: map[string]any{"rep_count": len(data), "top_n": topN},
|
||||
})
|
||||
}
|
||||
|
||||
func PersistProductDemand(ctx context.Context, pool *pgxpool.Pool, data []analytics.ProductDemand, topN int, source string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_product_demand",
|
||||
trace.WithAttributes(attribute.Int("product_count", len(data))),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO aw_product_demand
|
||||
(id, computed_at, product_count, top_n, trigger_source, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7::jsonb)`,
|
||||
newUUID(), len(data), topN, source, traceID, spanID, mustJSON(data),
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to persist AW product demand", "err", err)
|
||||
span.RecordError(err)
|
||||
return
|
||||
}
|
||||
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_product_demand")))
|
||||
|
||||
AppendAudit(ctx, pool, AuditEntry{
|
||||
Action: "scores.generated", ActorType: actorType(source), ActorID: source,
|
||||
Domain: "aw", Service: "otel-bi-analytics", EntityType: "product_demand",
|
||||
Payload: map[string]any{"product_count": len(data), "top_n": topN},
|
||||
})
|
||||
}
|
||||
|
||||
func PersistAnomalyRun(ctx context.Context, pool *pgxpool.Pool, data []analytics.AnomalyPoint, source string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_anomaly_run")
|
||||
defer span.End()
|
||||
|
||||
anomalyCount := 0
|
||||
for _, p := range data {
|
||||
if p.IsAnomaly {
|
||||
anomalyCount++
|
||||
}
|
||||
}
|
||||
span.SetAttributes(
|
||||
attribute.Int("series_points", len(data)),
|
||||
attribute.Int("anomaly_count", anomalyCount),
|
||||
)
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO aw_anomaly_runs
|
||||
(id, detected_at, anomaly_count, series_days, window_days, threshold_sigma, trigger_source, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, 365, 30, 2.0, $3, $4, $5, $6::jsonb)`,
|
||||
newUUID(), anomalyCount, source, traceID, spanID, mustJSON(data),
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to persist AW anomaly run", "err", err)
|
||||
span.RecordError(err)
|
||||
return
|
||||
}
|
||||
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_anomaly_run")))
|
||||
|
||||
AppendAudit(ctx, pool, AuditEntry{
|
||||
Action: "anomaly_detection.ran", ActorType: actorType(source), ActorID: source,
|
||||
Domain: "aw", Service: "otel-bi-analytics", EntityType: "anomaly_detection",
|
||||
Payload: map[string]any{"series_days": 365, "window_days": 30, "anomaly_count": anomalyCount},
|
||||
})
|
||||
}
|
||||
|
||||
func actorType(source string) string {
|
||||
if len(source) >= 9 && source[:9] == "scheduler" {
|
||||
return "scheduler"
|
||||
}
|
||||
return "api"
|
||||
}
|
||||
151
backend/analytics/internal/persistence/wwi.go
Normal file
151
backend/analytics/internal/persistence/wwi.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package persistence
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"otel-bi-analytics/internal/analytics"
|
||||
)
|
||||
|
||||
var (
|
||||
businessEventsTotal, _ = persistMeter.Int64Counter(
|
||||
"wwi.business_events_generated_total",
|
||||
metric.WithDescription("Business events generated from reorder data"),
|
||||
)
|
||||
)
|
||||
|
||||
func PersistReorderRecommendations(ctx context.Context, pool *pgxpool.Pool, data []analytics.ReorderRecommendation, source string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.wwi.persist_reorder_recommendations",
|
||||
trace.WithAttributes(attribute.Int("item_count", len(data))),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO wwi_reorder_recommendations
|
||||
(id, created_at, item_count, trigger_source, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6::jsonb)`,
|
||||
newUUID(), len(data), source, traceID, spanID, mustJSON(data),
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to persist WWI reorder recommendations", "err", err)
|
||||
span.RecordError(err)
|
||||
return
|
||||
}
|
||||
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "wwi_reorder_recommendations")))
|
||||
|
||||
AppendAudit(ctx, pool, AuditEntry{
|
||||
Action: "recommendations.generated", ActorType: actorType(source), ActorID: source,
|
||||
Domain: "wwi", Service: "otel-bi-analytics", EntityType: "reorder_recommendations",
|
||||
Payload: map[string]any{"item_count": len(data)},
|
||||
})
|
||||
}
|
||||
|
||||
func PersistSupplierScores(ctx context.Context, pool *pgxpool.Pool, data []analytics.SupplierScore, topN int, source string) {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.wwi.persist_supplier_scores",
|
||||
trace.WithAttributes(attribute.Int("supplier_count", len(data))),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO wwi_supplier_scores
|
||||
(id, computed_at, supplier_count, top_n, trigger_source, trace_id, span_id, payload)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7::jsonb)`,
|
||||
newUUID(), len(data), topN, source, traceID, spanID, mustJSON(data),
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to persist WWI supplier scores", "err", err)
|
||||
span.RecordError(err)
|
||||
return
|
||||
}
|
||||
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "wwi_supplier_scores")))
|
||||
|
||||
AppendAudit(ctx, pool, AuditEntry{
|
||||
Action: "scores.generated", ActorType: actorType(source), ActorID: source,
|
||||
Domain: "wwi", Service: "otel-bi-analytics", EntityType: "supplier_scores",
|
||||
Payload: map[string]any{"supplier_count": len(data), "top_n": topN},
|
||||
})
|
||||
}
|
||||
|
||||
// GenerateStockEvents writes LOW_STOCK business events for HIGH-urgency items,
|
||||
// deduplicating within a 24-hour window.
|
||||
func GenerateStockEvents(ctx context.Context, pool *pgxpool.Pool, items []analytics.ReorderRecommendation) error {
|
||||
ctx, span := persistTracer.Start(ctx, "persistence.wwi.generate_stock_events")
|
||||
defer span.End()
|
||||
|
||||
cutoff := time.Now().UTC().Add(-24 * time.Hour)
|
||||
|
||||
tx, err := pool.Begin(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("begin transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback(ctx) //nolint:errcheck
|
||||
|
||||
inserted := 0
|
||||
for _, item := range items {
|
||||
if item.Urgency != "HIGH" {
|
||||
continue
|
||||
}
|
||||
entityKey := fmt.Sprintf("%d", item.StockItemKey)
|
||||
|
||||
var existingID string
|
||||
err := tx.QueryRow(ctx,
|
||||
`SELECT id FROM wwi_business_events
|
||||
WHERE event_type = 'LOW_STOCK' AND entity_key = $1 AND occurred_at >= $2
|
||||
LIMIT 1`,
|
||||
entityKey, cutoff,
|
||||
).Scan(&existingID)
|
||||
if err == nil {
|
||||
continue // already exists within 24h
|
||||
}
|
||||
if !errors.Is(err, pgx.ErrNoRows) {
|
||||
slog.Warn("error checking existing business event", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
daysStr := "immediately"
|
||||
if item.DaysUntilStockout != nil {
|
||||
daysStr = fmt.Sprintf("%.1f days", *item.DaysUntilStockout)
|
||||
}
|
||||
message := fmt.Sprintf(
|
||||
"Stock for '%s' will be exhausted in %s. Current stock: %.0f units, daily demand: %.1f units.",
|
||||
item.StockItemName, daysStr, item.CurrentStock, item.AvgDailyDemand,
|
||||
)
|
||||
|
||||
traceID, spanID := spanContext(span)
|
||||
details := mustJSON(map[string]any{
|
||||
"current_stock": item.CurrentStock,
|
||||
"avg_daily_demand": item.AvgDailyDemand,
|
||||
"recommended_reorder_qty": item.RecommendedReorderQty,
|
||||
})
|
||||
|
||||
_, err = tx.Exec(ctx,
|
||||
`INSERT INTO wwi_business_events
|
||||
(id, occurred_at, event_type, severity, entity_key, entity_name, message, trace_id, span_id, details)
|
||||
VALUES ($1, NOW(), 'LOW_STOCK', 'HIGH', $2, $3, $4, $5, $6, $7::jsonb)`,
|
||||
newUUID(), entityKey, item.StockItemName, message, traceID, spanID, details,
|
||||
)
|
||||
if err != nil {
|
||||
slog.Warn("failed to insert business event", "item", item.StockItemKey, "err", err)
|
||||
continue
|
||||
}
|
||||
inserted++
|
||||
businessEventsTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("event_type", "LOW_STOCK")))
|
||||
}
|
||||
|
||||
if err := tx.Commit(ctx); err != nil {
|
||||
return fmt.Errorf("commit stock events: %w", err)
|
||||
}
|
||||
span.SetAttributes(attribute.Int("events_inserted", inserted))
|
||||
return nil
|
||||
}
|
||||
306
backend/analytics/internal/scheduler/scheduler.go
Normal file
306
backend/analytics/internal/scheduler/scheduler.go
Normal file
@@ -0,0 +1,306 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/robfig/cron/v3"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"otel-bi-analytics/internal/analytics"
|
||||
"otel-bi-analytics/internal/persistence"
|
||||
)
|
||||
|
||||
var (
|
||||
schedTracer = otel.Tracer("otel-bi.scheduler")
|
||||
schedMeter = otel.Meter("otel-bi.scheduler")
|
||||
|
||||
jobDurationSeconds, _ = schedMeter.Float64Histogram(
|
||||
"scheduler.job.duration_seconds",
|
||||
metric.WithDescription("Scheduler job execution duration"),
|
||||
metric.WithUnit("s"),
|
||||
)
|
||||
jobSuccessTotal, _ = schedMeter.Int64Counter(
|
||||
"scheduler.job.success_total",
|
||||
metric.WithDescription("Scheduler jobs completed successfully"),
|
||||
)
|
||||
jobFailureTotal, _ = schedMeter.Int64Counter(
|
||||
"scheduler.job.failure_total",
|
||||
metric.WithDescription("Scheduler jobs that failed"),
|
||||
)
|
||||
jobRecordsProcessed, _ = schedMeter.Int64Counter(
|
||||
"scheduler.job.records_processed_total",
|
||||
metric.WithDescription("Records processed by scheduler jobs"),
|
||||
)
|
||||
)
|
||||
|
||||
// Scheduler wraps robfig/cron and owns all job implementations.
|
||||
type Scheduler struct {
|
||||
awDB *sql.DB
|
||||
wwiDB *sql.DB
|
||||
pgPool *pgxpool.Pool
|
||||
topN int
|
||||
cron *cron.Cron
|
||||
}
|
||||
|
||||
func New(awDB, wwiDB *sql.DB, pgPool *pgxpool.Pool, defaultTopN int) *Scheduler {
|
||||
return &Scheduler{
|
||||
awDB: awDB,
|
||||
wwiDB: wwiDB,
|
||||
pgPool: pgPool,
|
||||
topN: defaultTopN,
|
||||
cron: cron.New(cron.WithLocation(time.UTC), cron.WithSeconds()),
|
||||
}
|
||||
}
|
||||
|
||||
// Start registers all jobs and starts the cron runner.
|
||||
func (s *Scheduler) Start() {
|
||||
s.cron.AddFunc("0 0 2 * * *", s.jobAWForecast)
|
||||
s.cron.AddFunc("0 30 2 * * *", s.jobAWScores)
|
||||
s.cron.AddFunc("0 0 3 * * *", s.jobAWDataQuality)
|
||||
s.cron.AddFunc("0 30 3 * * *", s.jobAWAnomalyDetection)
|
||||
s.cron.AddFunc("0 0 * * * *", s.jobWWIReorder)
|
||||
s.cron.AddFunc("0 30 3 * * *", s.jobWWISupplierScores)
|
||||
s.cron.AddFunc("0 30 * * * *", s.jobWWIEvents)
|
||||
s.cron.AddFunc("0 0 4 * * *", s.jobWWIDataQuality)
|
||||
s.cron.Start()
|
||||
slog.Info("scheduler started", "jobs", len(s.cron.Entries()))
|
||||
}
|
||||
|
||||
// Stop gracefully stops the cron runner.
|
||||
func (s *Scheduler) Stop() {
|
||||
ctx := s.cron.Stop()
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
// TriggerAWJob runs an AW job immediately in a goroutine.
|
||||
func (s *Scheduler) TriggerAWJob(jobName string) error {
|
||||
fns := map[string]func(){
|
||||
"forecast": s.jobAWForecast,
|
||||
"scores": s.jobAWScores,
|
||||
"data_quality": s.jobAWDataQuality,
|
||||
"anomaly_detection": s.jobAWAnomalyDetection,
|
||||
}
|
||||
fn, ok := fns[jobName]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown AW job: %s", jobName)
|
||||
}
|
||||
go fn()
|
||||
return nil
|
||||
}
|
||||
|
||||
// TriggerWWIJob runs a WWI job immediately in a goroutine.
|
||||
func (s *Scheduler) TriggerWWIJob(jobName string) error {
|
||||
fns := map[string]func(){
|
||||
"reorder": s.jobWWIReorder,
|
||||
"supplier_scores": s.jobWWISupplierScores,
|
||||
"events": s.jobWWIEvents,
|
||||
"data_quality": s.jobWWIDataQuality,
|
||||
}
|
||||
fn, ok := fns[jobName]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown WWI job: %s", jobName)
|
||||
}
|
||||
go fn()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// runJob wraps a job function with OTel tracing, metrics, and audit logging.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (s *Scheduler) runJob(jobName, domain string, fn func(ctx context.Context) (int, error)) {
|
||||
ctx := context.Background()
|
||||
ctx, span := schedTracer.Start(ctx,
|
||||
"scheduler."+jobName,
|
||||
trace.WithNewRoot(),
|
||||
trace.WithSpanKind(trace.SpanKindInternal),
|
||||
trace.WithAttributes(
|
||||
attribute.String("job.name", jobName),
|
||||
attribute.String("job.domain", domain),
|
||||
),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
traceID, spanID := spanCtx(span)
|
||||
jobID := persistence.RecordJobStart(ctx, s.pgPool, jobName, domain, traceID, spanID)
|
||||
startedAt := time.Now()
|
||||
|
||||
slog.Info("job started", "job", jobName)
|
||||
|
||||
records, err := fn(ctx)
|
||||
|
||||
duration := time.Since(startedAt).Seconds()
|
||||
attrs := metric.WithAttributes(
|
||||
attribute.String("job.name", jobName),
|
||||
attribute.String("job.domain", domain),
|
||||
)
|
||||
jobDurationSeconds.Record(ctx, duration, attrs)
|
||||
|
||||
if err != nil {
|
||||
slog.Error("job failed", "job", jobName, "err", err, "duration_s", duration)
|
||||
span.RecordError(err)
|
||||
span.SetAttributes(attribute.String("job.status", "failure"))
|
||||
persistence.RecordJobFailure(ctx, s.pgPool, jobID, startedAt, err.Error())
|
||||
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
|
||||
Action: "job.failed", ActorType: "scheduler", ActorID: jobName,
|
||||
Domain: domain, Service: "otel-bi-analytics", Status: "failure",
|
||||
Payload: map[string]any{"job_name": jobName, "error": err.Error()},
|
||||
})
|
||||
jobFailureTotal.Add(ctx, 1, attrs)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("job completed", "job", jobName, "records", records, "duration_s", duration)
|
||||
span.SetAttributes(
|
||||
attribute.String("job.status", "success"),
|
||||
attribute.Int("job.records_processed", records),
|
||||
)
|
||||
persistence.RecordJobComplete(ctx, s.pgPool, jobID, startedAt, records)
|
||||
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
|
||||
Action: "job.completed", ActorType: "scheduler", ActorID: jobName,
|
||||
Domain: domain, Service: "otel-bi-analytics",
|
||||
Payload: map[string]any{"job_name": jobName, "records_processed": records},
|
||||
})
|
||||
jobSuccessTotal.Add(ctx, 1, attrs)
|
||||
jobRecordsProcessed.Add(ctx, int64(records), attrs)
|
||||
}
|
||||
|
||||
func spanCtx(span trace.Span) (traceID, spanID *string) {
|
||||
sctx := span.SpanContext()
|
||||
if !sctx.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
tid := sctx.TraceID().String()
|
||||
sid := sctx.SpanID().String()
|
||||
return &tid, &sid
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// AW jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (s *Scheduler) jobAWForecast() {
|
||||
s.runJob("aw.daily.forecast", "aw", func(ctx context.Context) (int, error) {
|
||||
data, err := analytics.AWGetSalesForecast(ctx, s.awDB, 30)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.PersistForecast(ctx, s.pgPool, data, 30, "scheduler.aw.daily.forecast")
|
||||
return len(data), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scheduler) jobAWScores() {
|
||||
s.runJob("aw.daily.scores", "aw", func(ctx context.Context) (int, error) {
|
||||
reps, err := analytics.AWGetRepScores(ctx, s.awDB, s.topN)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
products, err := analytics.AWGetProductDemand(ctx, s.awDB, s.topN)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.PersistRepScores(ctx, s.pgPool, reps, s.topN, "scheduler.aw.daily.scores")
|
||||
persistence.PersistProductDemand(ctx, s.pgPool, products, s.topN, "scheduler.aw.daily.scores")
|
||||
return len(reps) + len(products), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scheduler) jobAWDataQuality() {
|
||||
s.runJob("aw.daily.data_quality", "aw", func(ctx context.Context) (int, error) {
|
||||
report, err := analytics.AWRunDataQualityCheck(ctx, s.awDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
|
||||
Action: "job.completed", ActorType: "scheduler", ActorID: "aw.daily.data_quality",
|
||||
Domain: "aw", Service: "otel-bi-analytics", EntityType: "data_quality",
|
||||
Status: report.Status,
|
||||
Payload: map[string]any{"status": report.Status, "failed_checks": report.FailedChecks},
|
||||
})
|
||||
return len(report.Checks), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scheduler) jobAWAnomalyDetection() {
|
||||
s.runJob("aw.daily.anomaly_detection", "aw", func(ctx context.Context) (int, error) {
|
||||
data, err := analytics.AWRunAnomalyDetection(ctx, s.awDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.PersistAnomalyRun(ctx, s.pgPool, data, "scheduler.aw.daily.anomaly_detection")
|
||||
return len(data), nil
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// WWI jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (s *Scheduler) jobWWIReorder() {
|
||||
s.runJob("wwi.hourly.reorder", "wwi", func(ctx context.Context) (int, error) {
|
||||
data, err := analytics.WWIGetReorderRecommendations(ctx, s.wwiDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.PersistReorderRecommendations(ctx, s.pgPool, data, "scheduler.wwi.hourly.reorder")
|
||||
if err := persistence.GenerateStockEvents(ctx, s.pgPool, data); err != nil {
|
||||
slog.Warn("generate_stock_events failed", "err", err)
|
||||
}
|
||||
return len(data), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scheduler) jobWWISupplierScores() {
|
||||
s.runJob("wwi.daily.supplier_scores", "wwi", func(ctx context.Context) (int, error) {
|
||||
data, err := analytics.WWIGetSupplierScores(ctx, s.wwiDB, s.topN)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.PersistSupplierScores(ctx, s.pgPool, data, s.topN, "scheduler.wwi.daily.supplier_scores")
|
||||
return len(data), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scheduler) jobWWIEvents() {
|
||||
s.runJob("wwi.hourly.events", "wwi", func(ctx context.Context) (int, error) {
|
||||
data, err := analytics.WWIGetReorderRecommendations(ctx, s.wwiDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var highUrgency []analytics.ReorderRecommendation
|
||||
for _, item := range data {
|
||||
if item.Urgency == "HIGH" {
|
||||
highUrgency = append(highUrgency, item)
|
||||
}
|
||||
}
|
||||
if err := persistence.GenerateStockEvents(ctx, s.pgPool, highUrgency); err != nil {
|
||||
slog.Warn("generate_stock_events (events job) failed", "err", err)
|
||||
}
|
||||
return len(highUrgency), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scheduler) jobWWIDataQuality() {
|
||||
s.runJob("wwi.daily.data_quality", "wwi", func(ctx context.Context) (int, error) {
|
||||
report, err := analytics.WWIRunDataQualityCheck(ctx, s.wwiDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
|
||||
Action: "job.completed", ActorType: "scheduler", ActorID: "wwi.daily.data_quality",
|
||||
Domain: "wwi", Service: "otel-bi-analytics", EntityType: "data_quality",
|
||||
Status: report.Status,
|
||||
Payload: map[string]any{"status": report.Status, "failed_checks": report.FailedChecks},
|
||||
})
|
||||
return len(report.Checks), nil
|
||||
})
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
"""Backend application package."""
|
||||
|
||||
0
backend/app/core/__init__.py
Normal file
0
backend/app/core/__init__.py
Normal file
174
backend/app/core/audit.py
Normal file
174
backend/app/core/audit.py
Normal file
@@ -0,0 +1,174 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from uuid import uuid4
|
||||
|
||||
from opentelemetry import trace
|
||||
from sqlalchemy import DateTime, Integer, String, Text, JSON
|
||||
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, sessionmaker, Session
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
def current_span_context() -> tuple[str | None, str | None]:
|
||||
ctx = trace.get_current_span().get_span_context()
|
||||
if not ctx.is_valid:
|
||||
return None, None
|
||||
return f"{ctx.trace_id:032x}", f"{ctx.span_id:016x}"
|
||||
|
||||
|
||||
class SharedBase(DeclarativeBase):
|
||||
pass
|
||||
|
||||
|
||||
class AuditLog(SharedBase):
|
||||
__tablename__ = "audit_log"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
occurred_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
action: Mapped[str] = mapped_column(String(100), index=True)
|
||||
status: Mapped[str] = mapped_column(String(20), default="success")
|
||||
actor_type: Mapped[str] = mapped_column(String(20), index=True)
|
||||
actor_id: Mapped[str | None] = mapped_column(String(200), nullable=True)
|
||||
domain: Mapped[str] = mapped_column(String(50), index=True)
|
||||
service: Mapped[str] = mapped_column(String(50), index=True)
|
||||
entity_type: Mapped[str | None] = mapped_column(String(100), nullable=True, index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
payload: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
|
||||
|
||||
class JobExecution(SharedBase):
|
||||
__tablename__ = "job_executions"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
started_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
completed_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True)
|
||||
job_name: Mapped[str] = mapped_column(String(100), index=True)
|
||||
domain: Mapped[str] = mapped_column(String(50), index=True)
|
||||
status: Mapped[str] = mapped_column(String(20), index=True)
|
||||
records_processed: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
duration_ms: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
error_message: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
|
||||
|
||||
class ExportRecord(SharedBase):
|
||||
__tablename__ = "export_records"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
domain: Mapped[str] = mapped_column(String(50), index=True)
|
||||
service: Mapped[str] = mapped_column(String(50))
|
||||
source_view: Mapped[str] = mapped_column(String(100), index=True)
|
||||
format: Mapped[str] = mapped_column(String(10))
|
||||
filters_applied: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
row_count: Mapped[int] = mapped_column(Integer)
|
||||
file_size_bytes: Mapped[int] = mapped_column(Integer)
|
||||
actor_id: Mapped[str | None] = mapped_column(String(200), nullable=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
|
||||
|
||||
def append_audit(
|
||||
factory: sessionmaker[Session],
|
||||
*,
|
||||
action: str,
|
||||
actor_type: str,
|
||||
domain: str,
|
||||
service: str,
|
||||
entity_type: str | None = None,
|
||||
actor_id: str | None = None,
|
||||
status: str = "success",
|
||||
payload: dict | None = None,
|
||||
) -> None:
|
||||
trace_id, span_id = current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(AuditLog(
|
||||
action=action,
|
||||
actor_type=actor_type,
|
||||
actor_id=actor_id,
|
||||
domain=domain,
|
||||
service=service,
|
||||
entity_type=entity_type,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
status=status,
|
||||
payload=payload or {},
|
||||
))
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to write audit record (action=%s): %s", action, exc)
|
||||
|
||||
|
||||
def record_job_start(
|
||||
factory: sessionmaker[Session],
|
||||
job_name: str,
|
||||
domain: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> str:
|
||||
job_id = str(uuid4())
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(JobExecution(
|
||||
id=job_id,
|
||||
job_name=job_name,
|
||||
domain=domain,
|
||||
status="running",
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
))
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to record job start (job=%s): %s", job_name, exc)
|
||||
return job_id
|
||||
|
||||
|
||||
def record_job_complete(
|
||||
factory: sessionmaker[Session],
|
||||
job_id: str,
|
||||
started_at: datetime,
|
||||
records_processed: int,
|
||||
) -> None:
|
||||
now = datetime.now(timezone.utc)
|
||||
duration_ms = int((now - started_at).total_seconds() * 1000)
|
||||
try:
|
||||
with factory() as session:
|
||||
session.query(JobExecution).filter_by(id=job_id).update({
|
||||
"status": "success",
|
||||
"completed_at": now,
|
||||
"records_processed": records_processed,
|
||||
"duration_ms": duration_ms,
|
||||
})
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to record job completion (id=%s): %s", job_id, exc)
|
||||
|
||||
|
||||
def record_job_failure(
|
||||
factory: sessionmaker[Session],
|
||||
job_id: str,
|
||||
started_at: datetime,
|
||||
error_message: str,
|
||||
) -> None:
|
||||
now = datetime.now(timezone.utc)
|
||||
duration_ms = int((now - started_at).total_seconds() * 1000)
|
||||
try:
|
||||
with factory() as session:
|
||||
session.query(JobExecution).filter_by(id=job_id).update({
|
||||
"status": "failure",
|
||||
"completed_at": now,
|
||||
"duration_ms": duration_ms,
|
||||
"error_message": error_message[:2000],
|
||||
})
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to record job failure (id=%s): %s", job_id, exc)
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from functools import lru_cache
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
@@ -22,30 +21,20 @@ class Settings(BaseSettings):
|
||||
api_port: int = 8000
|
||||
|
||||
cors_origins: str = "http://localhost:5173"
|
||||
request_timeout_seconds: float = 20.0
|
||||
|
||||
mssql_host: str = "localhost"
|
||||
mssql_port: int = 1433
|
||||
mssql_username: str = "sa"
|
||||
mssql_password: str = "Password!123"
|
||||
mssql_driver: str = "ODBC Driver 18 for SQL Server"
|
||||
mssql_trust_server_certificate: bool = False
|
||||
# Go analytics service
|
||||
analytics_service_url: str = "http://localhost:8080"
|
||||
|
||||
wwi_database: str = "WorldWideImporters"
|
||||
aw_database: str = "AdventureWorks2022DWH"
|
||||
wwi_connection_string: str | None = None
|
||||
aw_connection_string: str | None = None
|
||||
# PostgreSQL — write store for derived data
|
||||
postgres_host: str = "localhost"
|
||||
postgres_port: int = 5432
|
||||
postgres_database: str = "otel_bi_app"
|
||||
postgres_username: str = "otel_bi_app"
|
||||
postgres_password: str = "otel_bi_app"
|
||||
postgres_sslmode: str = "require"
|
||||
postgres_database: str = "otel_bi"
|
||||
postgres_username: str = "otel_bi"
|
||||
postgres_password: str = "otel_bi"
|
||||
postgres_sslmode: str = "prefer"
|
||||
postgres_connection_string: str | None = None
|
||||
postgres_required: bool = True
|
||||
query_service_url: str = "http://localhost:8101"
|
||||
analytics_service_url: str = "http://localhost:8102"
|
||||
persistence_service_url: str = "http://localhost:8103"
|
||||
|
||||
# Frontend OIDC JWT validation
|
||||
require_frontend_auth: bool = True
|
||||
frontend_jwt_issuer_url: str = ""
|
||||
frontend_jwt_audience: str = ""
|
||||
@@ -53,18 +42,21 @@ class Settings(BaseSettings):
|
||||
frontend_jwt_algorithm: str = "RS256"
|
||||
frontend_required_scopes: str = ""
|
||||
frontend_clock_skew_seconds: int = Field(default=30, ge=0, le=300)
|
||||
internal_service_auth_enabled: bool = True
|
||||
internal_service_shared_secret: str = "change-me"
|
||||
internal_service_token_ttl_seconds: int = Field(default=120, ge=30, le=900)
|
||||
internal_service_token_audience: str = "bi-internal"
|
||||
internal_service_allowed_issuers: str = "api-gateway"
|
||||
internal_token_clock_skew_seconds: int = Field(default=15, ge=0, le=120)
|
||||
|
||||
# Frontend OIDC client config (served via /api/config)
|
||||
frontend_oidc_client_id: str = ""
|
||||
frontend_oidc_scope: str = "openid profile email"
|
||||
|
||||
# OpenTelemetry
|
||||
otel_service_name: str = "otel-bi-backend"
|
||||
otel_service_namespace: str = "final-thesis"
|
||||
otel_collector_endpoint: str = "http://localhost:4318"
|
||||
otel_export_timeout_ms: int = 10000
|
||||
|
||||
# Report output — points at the K8s CSI / SMB mountpoint in production
|
||||
report_output_dir: str = "/tmp/otel-bi-reports"
|
||||
|
||||
# Analytics defaults (forwarded to Go service as query params)
|
||||
forecast_horizon_days: int = Field(default=30, ge=7, le=180)
|
||||
default_history_days: int = Field(default=365, ge=30, le=1460)
|
||||
ranking_default_top_n: int = Field(default=10, ge=3, le=100)
|
||||
@@ -72,58 +64,22 @@ class Settings(BaseSettings):
|
||||
|
||||
@property
|
||||
def cors_origins_list(self) -> list[str]:
|
||||
return [
|
||||
origin.strip() for origin in self.cors_origins.split(",") if origin.strip()
|
||||
]
|
||||
return [o.strip() for o in self.cors_origins.split(",") if o.strip()]
|
||||
|
||||
@property
|
||||
def frontend_required_scopes_list(self) -> list[str]:
|
||||
return [
|
||||
scope.strip()
|
||||
for scope in self.frontend_required_scopes.split(" ")
|
||||
if scope.strip()
|
||||
]
|
||||
|
||||
@property
|
||||
def internal_service_allowed_issuers_list(self) -> list[str]:
|
||||
return [
|
||||
issuer.strip()
|
||||
for issuer in self.internal_service_allowed_issuers.split(",")
|
||||
if issuer.strip()
|
||||
]
|
||||
|
||||
def _build_mssql_connection_url(self, database: str) -> str:
|
||||
driver = quote_plus(self.mssql_driver)
|
||||
user = quote_plus(self.mssql_username)
|
||||
password = quote_plus(self.mssql_password)
|
||||
trust_cert = "yes" if self.mssql_trust_server_certificate else "no"
|
||||
return (
|
||||
f"mssql+pyodbc://{user}:{password}@{self.mssql_host}:{self.mssql_port}/{database}"
|
||||
f"?driver={driver}&TrustServerCertificate={trust_cert}&ApplicationIntent=ReadOnly"
|
||||
)
|
||||
|
||||
@property
|
||||
def wwi_connection_url(self) -> str:
|
||||
return self.wwi_connection_string or self._build_mssql_connection_url(
|
||||
self.wwi_database
|
||||
)
|
||||
|
||||
@property
|
||||
def aw_connection_url(self) -> str:
|
||||
return self.aw_connection_string or self._build_mssql_connection_url(
|
||||
self.aw_database
|
||||
)
|
||||
return [s.strip() for s in self.frontend_required_scopes.split(" ") if s.strip()]
|
||||
|
||||
@property
|
||||
def postgres_connection_url(self) -> str:
|
||||
if self.postgres_connection_string:
|
||||
return self.postgres_connection_string
|
||||
|
||||
from urllib.parse import quote_plus
|
||||
user = quote_plus(self.postgres_username)
|
||||
password = quote_plus(self.postgres_password)
|
||||
return (
|
||||
f"postgresql+psycopg://{user}:{password}@{self.postgres_host}:{self.postgres_port}/"
|
||||
f"{self.postgres_database}?sslmode={self.postgres_sslmode}"
|
||||
f"postgresql+psycopg://{user}:{password}@{self.postgres_host}:{self.postgres_port}"
|
||||
f"/{self.postgres_database}?sslmode={self.postgres_sslmode}"
|
||||
)
|
||||
|
||||
|
||||
|
||||
27
backend/app/core/db.py
Normal file
27
backend/app/core/db.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
|
||||
def create_postgres_engine() -> Engine:
|
||||
return create_engine(
|
||||
settings.postgres_connection_url,
|
||||
pool_pre_ping=True,
|
||||
pool_recycle=1800,
|
||||
pool_size=5,
|
||||
max_overflow=10,
|
||||
future=True,
|
||||
)
|
||||
|
||||
|
||||
def create_session_factory(engine: Engine) -> sessionmaker[Session]:
|
||||
return sessionmaker(
|
||||
bind=engine,
|
||||
autoflush=False,
|
||||
autocommit=False,
|
||||
expire_on_commit=False,
|
||||
)
|
||||
27
backend/app/core/executor.py
Normal file
27
backend/app/core/executor.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Shared executor for CPU-bound analytics (pandas/sklearn) and sync MSSQL I/O
|
||||
# (pyodbc is inherently synchronous and blocks the event loop if called directly).
|
||||
#
|
||||
# Workers are capped at 8 to avoid overwhelming the MSSQL connection pools.
|
||||
# In K8s: set ANALYTICS_WORKERS to match the pod's CPU limit.
|
||||
_WORKERS = min(8, int(os.environ.get("ANALYTICS_WORKERS", "0")) or (os.cpu_count() or 2) * 2)
|
||||
|
||||
_executor: ThreadPoolExecutor | None = None
|
||||
|
||||
|
||||
def get_executor() -> ThreadPoolExecutor:
|
||||
global _executor
|
||||
if _executor is None:
|
||||
_executor = ThreadPoolExecutor(max_workers=_WORKERS, thread_name_prefix="analytics")
|
||||
return _executor
|
||||
|
||||
|
||||
def shutdown_executor() -> None:
|
||||
global _executor
|
||||
if _executor is not None:
|
||||
_executor.shutdown(wait=False)
|
||||
_executor = None
|
||||
82
backend/app/core/export.py
Normal file
82
backend/app/core/export.py
Normal file
@@ -0,0 +1,82 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
|
||||
from reportlab.lib import colors
|
||||
from reportlab.lib.pagesizes import A4, landscape
|
||||
from reportlab.lib.styles import getSampleStyleSheet
|
||||
from reportlab.lib.units import cm
|
||||
from reportlab.platypus import (
|
||||
Paragraph,
|
||||
SimpleDocTemplate,
|
||||
Spacer,
|
||||
Table,
|
||||
TableStyle,
|
||||
)
|
||||
|
||||
_PAGE_W, _ = landscape(A4)
|
||||
_MARGIN = 1.5 * cm
|
||||
_HEADER_BG = colors.HexColor("#1a56db")
|
||||
_ROW_BG = colors.HexColor("#eef2ff")
|
||||
|
||||
|
||||
def _pdf_table(rows: list[dict]) -> Table:
|
||||
if not rows:
|
||||
table_data: list[list] = [["No data available"]]
|
||||
n_cols = 1
|
||||
else:
|
||||
headers = list(rows[0].keys())
|
||||
n_cols = len(headers)
|
||||
table_data = [headers] + [
|
||||
[str(row.get(h, "")) for h in headers] for row in rows
|
||||
]
|
||||
|
||||
col_w = (_PAGE_W - 2 * _MARGIN) / n_cols
|
||||
t = Table(table_data, colWidths=[col_w] * n_cols, repeatRows=1)
|
||||
|
||||
style: list = [
|
||||
("BACKGROUND", (0, 0), (-1, 0), _HEADER_BG),
|
||||
("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
|
||||
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
|
||||
("FONTSIZE", (0, 0), (-1, 0), 8),
|
||||
("FONTNAME", (0, 1), (-1, -1), "Helvetica"),
|
||||
("FONTSIZE", (0, 1), (-1, -1), 7),
|
||||
("ALIGN", (0, 0), (-1, -1), "LEFT"),
|
||||
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
|
||||
("GRID", (0, 0), (-1, -1), 0.25, colors.HexColor("#d1d5db")),
|
||||
("TOPPADDING", (0, 0), (-1, -1), 3),
|
||||
("BOTTOMPADDING", (0, 0), (-1, -1), 3),
|
||||
("LEFTPADDING", (0, 0), (-1, -1), 5),
|
||||
("RIGHTPADDING", (0, 0), (-1, -1), 5),
|
||||
]
|
||||
for i in range(1, len(table_data)):
|
||||
bg = _ROW_BG if i % 2 == 1 else colors.white
|
||||
style.append(("BACKGROUND", (0, i), (-1, i), bg))
|
||||
|
||||
t.setStyle(TableStyle(style))
|
||||
return t
|
||||
|
||||
|
||||
def to_pdf_bytes(rows: list[dict], title: str, subtitle: str = "") -> bytes:
|
||||
"""Serialise *rows* to a single-sheet PDF and return the raw bytes."""
|
||||
buf = io.BytesIO()
|
||||
styles = getSampleStyleSheet()
|
||||
story = []
|
||||
|
||||
story.append(Paragraph(title, styles["Title"]))
|
||||
if subtitle:
|
||||
story.append(Spacer(1, 0.2 * cm))
|
||||
story.append(Paragraph(subtitle, styles["Normal"]))
|
||||
story.append(Spacer(1, 0.5 * cm))
|
||||
story.append(_pdf_table(rows))
|
||||
|
||||
doc = SimpleDocTemplate(
|
||||
buf,
|
||||
pagesize=landscape(A4),
|
||||
leftMargin=_MARGIN,
|
||||
rightMargin=_MARGIN,
|
||||
topMargin=_MARGIN,
|
||||
bottomMargin=_MARGIN,
|
||||
)
|
||||
doc.build(story)
|
||||
return buf.getvalue()
|
||||
@@ -7,24 +7,27 @@ from typing import Any
|
||||
from fastapi import FastAPI
|
||||
from opentelemetry import metrics, trace
|
||||
from opentelemetry.baggage.propagation import W3CBaggagePropagator
|
||||
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
|
||||
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
|
||||
from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor
|
||||
from opentelemetry.instrumentation.logging import LoggingInstrumentor
|
||||
from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
|
||||
from opentelemetry.propagate import set_global_textmap
|
||||
from opentelemetry.propagators.composite import CompositePropagator
|
||||
from opentelemetry.sdk._logs import LoggerProvider
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
||||
from opentelemetry._logs import set_logger_provider
|
||||
|
||||
try:
|
||||
from opentelemetry.instrumentation.system_metrics import SystemMetricsInstrumentor
|
||||
except ImportError: # pragma: no cover - defensive fallback for minimal envs
|
||||
except ImportError:
|
||||
SystemMetricsInstrumentor = None # type: ignore[assignment]
|
||||
|
||||
from app.core.config import Settings
|
||||
@@ -36,12 +39,14 @@ LOGGER = logging.getLogger(__name__)
|
||||
class TelemetryProviders:
|
||||
tracer_provider: TracerProvider
|
||||
meter_provider: MeterProvider
|
||||
logger_provider: LoggerProvider
|
||||
|
||||
|
||||
def configure_otel(settings: Settings) -> TelemetryProviders:
|
||||
set_global_textmap(
|
||||
CompositePropagator([TraceContextTextMapPropagator(), W3CBaggagePropagator()])
|
||||
)
|
||||
|
||||
resource = Resource.create(
|
||||
{
|
||||
"service.name": settings.otel_service_name,
|
||||
@@ -50,34 +55,54 @@ def configure_otel(settings: Settings) -> TelemetryProviders:
|
||||
}
|
||||
)
|
||||
|
||||
trace_exporter = OTLPSpanExporter(
|
||||
endpoint=f"{settings.otel_collector_endpoint}/v1/traces",
|
||||
timeout=settings.otel_export_timeout_ms / 1000,
|
||||
)
|
||||
tracer_provider = TracerProvider(resource=resource)
|
||||
tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
|
||||
tracer_provider.add_span_processor(
|
||||
BatchSpanProcessor(
|
||||
OTLPSpanExporter(
|
||||
endpoint=f"{settings.otel_collector_endpoint}/v1/traces",
|
||||
timeout=settings.otel_export_timeout_ms / 1000,
|
||||
)
|
||||
)
|
||||
)
|
||||
trace.set_tracer_provider(tracer_provider)
|
||||
|
||||
metric_reader = PeriodicExportingMetricReader(
|
||||
exporter=OTLPMetricExporter(
|
||||
endpoint=f"{settings.otel_collector_endpoint}/v1/metrics",
|
||||
timeout=settings.otel_export_timeout_ms / 1000,
|
||||
),
|
||||
export_interval_millis=10000,
|
||||
meter_provider = MeterProvider(
|
||||
resource=resource,
|
||||
metric_readers=[
|
||||
PeriodicExportingMetricReader(
|
||||
exporter=OTLPMetricExporter(
|
||||
endpoint=f"{settings.otel_collector_endpoint}/v1/metrics",
|
||||
timeout=settings.otel_export_timeout_ms / 1000,
|
||||
),
|
||||
export_interval_millis=10_000,
|
||||
)
|
||||
],
|
||||
)
|
||||
meter_provider = MeterProvider(resource=resource, metric_readers=[metric_reader])
|
||||
metrics.set_meter_provider(meter_provider)
|
||||
|
||||
logger_provider = LoggerProvider(resource=resource)
|
||||
logger_provider.add_log_record_processor(
|
||||
BatchLogRecordProcessor(
|
||||
OTLPLogExporter(
|
||||
endpoint=f"{settings.otel_collector_endpoint}/v1/logs",
|
||||
timeout=settings.otel_export_timeout_ms / 1000,
|
||||
)
|
||||
)
|
||||
)
|
||||
set_logger_provider(logger_provider)
|
||||
|
||||
LoggingInstrumentor().instrument(set_logging_format=True)
|
||||
|
||||
if SystemMetricsInstrumentor is not None:
|
||||
SystemMetricsInstrumentor().instrument()
|
||||
else:
|
||||
LOGGER.warning(
|
||||
"System metrics instrumentor not available, runtime host metrics disabled."
|
||||
)
|
||||
LOGGER.info("OpenTelemetry providers configured")
|
||||
LOGGER.warning("SystemMetricsInstrumentor not available — skipping.")
|
||||
|
||||
LOGGER.info("OTel providers configured", extra={"service.name": settings.otel_service_name})
|
||||
return TelemetryProviders(
|
||||
tracer_provider=tracer_provider, meter_provider=meter_provider
|
||||
tracer_provider=tracer_provider,
|
||||
meter_provider=meter_provider,
|
||||
logger_provider=logger_provider,
|
||||
)
|
||||
|
||||
|
||||
@@ -85,19 +110,15 @@ def instrument_fastapi(app: FastAPI) -> None:
|
||||
FastAPIInstrumentor.instrument_app(app)
|
||||
|
||||
|
||||
def instrument_sqlalchemy_engines(engines: dict[str, Any]) -> None:
|
||||
def instrument_sqlalchemy(engines: dict[str, Any]) -> None:
|
||||
for engine in engines.values():
|
||||
SQLAlchemyInstrumentor().instrument(engine=engine)
|
||||
|
||||
|
||||
def instrument_httpx_clients() -> None:
|
||||
HTTPXClientInstrumentor().instrument()
|
||||
|
||||
|
||||
def shutdown_otel(providers: TelemetryProviders) -> None:
|
||||
HTTPXClientInstrumentor().uninstrument()
|
||||
if SystemMetricsInstrumentor is not None:
|
||||
SystemMetricsInstrumentor().uninstrument()
|
||||
LoggingInstrumentor().uninstrument()
|
||||
providers.meter_provider.shutdown()
|
||||
providers.tracer_provider.shutdown()
|
||||
providers.logger_provider.shutdown()
|
||||
|
||||
187
backend/app/core/reports.py
Normal file
187
backend/app/core/reports.py
Normal file
@@ -0,0 +1,187 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import openpyxl
|
||||
from reportlab.lib import colors
|
||||
from reportlab.lib.pagesizes import A4, landscape
|
||||
from reportlab.lib.styles import getSampleStyleSheet
|
||||
from reportlab.lib.units import cm
|
||||
from reportlab.platypus import (
|
||||
PageBreak,
|
||||
Paragraph,
|
||||
SimpleDocTemplate,
|
||||
Spacer,
|
||||
Table,
|
||||
TableStyle,
|
||||
)
|
||||
|
||||
_PAGE_W, _ = landscape(A4)
|
||||
_MARGIN = 1.5 * cm
|
||||
_HEADER_BG = colors.HexColor("#1a56db")
|
||||
_ROW_BG = colors.HexColor("#eef2ff")
|
||||
|
||||
|
||||
def _normalise(rows: list[dict] | dict) -> list[dict]:
|
||||
if isinstance(rows, dict):
|
||||
return [rows]
|
||||
return rows or []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# XLSX
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _save_xlsx(data: dict, path: str, report_id: str, generated_at: str) -> None:
|
||||
wb = openpyxl.Workbook()
|
||||
|
||||
ws = wb.active
|
||||
ws.title = "Metadata"
|
||||
ws.append(["Field", "Value"])
|
||||
ws.append(["Generated At (UTC)", generated_at])
|
||||
ws.append(["Report ID", report_id])
|
||||
|
||||
sheets = [
|
||||
("AW Sales KPIs", _normalise(data.get("aw_sales_kpis", {}))),
|
||||
("AW Sales History", _normalise(data.get("aw_sales_history", []))),
|
||||
("AW Sales Forecast", _normalise(data.get("aw_sales_forecast", []))),
|
||||
("AW Rep Scores", _normalise(data.get("aw_rep_scores", []))),
|
||||
("AW Product Demand", _normalise(data.get("aw_product_demand", []))),
|
||||
("WWI Sales KPIs", _normalise(data.get("wwi_sales_kpis", {}))),
|
||||
("WWI Stock Recs", _normalise(data.get("wwi_stock_recommendations", []))),
|
||||
("WWI Supplier Scores", _normalise(data.get("wwi_supplier_scores", []))),
|
||||
("WWI Business Events", _normalise(data.get("wwi_business_events", []))),
|
||||
]
|
||||
|
||||
for sheet_name, rows in sheets:
|
||||
ws = wb.create_sheet(title=sheet_name)
|
||||
if rows:
|
||||
ws.append(list(rows[0].keys()))
|
||||
for row in rows:
|
||||
ws.append([str(v) if v is not None else "" for v in row.values()])
|
||||
else:
|
||||
ws.append(["No data"])
|
||||
|
||||
wb.save(path)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# PDF
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _pdf_table(rows: list[dict] | dict) -> Table:
|
||||
data = _normalise(rows)
|
||||
if not data:
|
||||
table_data: list[list] = [["No data available"]]
|
||||
n_cols = 1
|
||||
else:
|
||||
headers = list(data[0].keys())
|
||||
n_cols = len(headers)
|
||||
table_data = [headers] + [
|
||||
[str(row.get(h, "")) for h in headers] for row in data
|
||||
]
|
||||
|
||||
col_w = (_PAGE_W - 2 * _MARGIN) / n_cols
|
||||
t = Table(table_data, colWidths=[col_w] * n_cols, repeatRows=1)
|
||||
|
||||
style: list = [
|
||||
("BACKGROUND", (0, 0), (-1, 0), _HEADER_BG),
|
||||
("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
|
||||
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
|
||||
("FONTSIZE", (0, 0), (-1, 0), 8),
|
||||
("FONTNAME", (0, 1), (-1, -1), "Helvetica"),
|
||||
("FONTSIZE", (0, 1), (-1, -1), 7),
|
||||
("ALIGN", (0, 0), (-1, -1), "LEFT"),
|
||||
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
|
||||
("GRID", (0, 0), (-1, -1), 0.25, colors.HexColor("#d1d5db")),
|
||||
("TOPPADDING", (0, 0), (-1, -1), 3),
|
||||
("BOTTOMPADDING", (0, 0), (-1, -1), 3),
|
||||
("LEFTPADDING", (0, 0), (-1, -1), 5),
|
||||
("RIGHTPADDING", (0, 0), (-1, -1), 5),
|
||||
]
|
||||
for i in range(1, len(table_data)):
|
||||
bg = _ROW_BG if i % 2 == 1 else colors.white
|
||||
style.append(("BACKGROUND", (0, i), (-1, i), bg))
|
||||
|
||||
t.setStyle(TableStyle(style))
|
||||
return t
|
||||
|
||||
|
||||
def _section(story: list, title: str, rows: list[dict] | dict, styles) -> None:
|
||||
story.append(Paragraph(title, styles["Heading2"]))
|
||||
story.append(Spacer(1, 0.25 * cm))
|
||||
story.append(_pdf_table(rows))
|
||||
story.append(Spacer(1, 0.5 * cm))
|
||||
|
||||
|
||||
def _save_pdf(data: dict, path: str, report_id: str, generated_at: str) -> None:
|
||||
styles = getSampleStyleSheet()
|
||||
story: list = []
|
||||
|
||||
story.append(Paragraph("OTel BI Platform — Generated Report", styles["Title"]))
|
||||
story.append(Spacer(1, 0.2 * cm))
|
||||
story.append(Paragraph(
|
||||
f"Report ID: {report_id} | Generated: {generated_at}",
|
||||
styles["Normal"],
|
||||
))
|
||||
story.append(Spacer(1, 0.6 * cm))
|
||||
|
||||
story.append(Paragraph("AdventureWorks DW", styles["Heading1"]))
|
||||
story.append(Spacer(1, 0.3 * cm))
|
||||
_section(story, "Sales KPIs", data.get("aw_sales_kpis", {}), styles)
|
||||
_section(story, "Sales History", data.get("aw_sales_history", []), styles)
|
||||
story.append(PageBreak())
|
||||
_section(story, "Sales Forecast", data.get("aw_sales_forecast", []), styles)
|
||||
_section(story, "Rep Scores", data.get("aw_rep_scores", []), styles)
|
||||
_section(story, "Product Demand", data.get("aw_product_demand", []), styles)
|
||||
story.append(PageBreak())
|
||||
|
||||
story.append(Paragraph("WideWorldImporters DW", styles["Heading1"]))
|
||||
story.append(Spacer(1, 0.3 * cm))
|
||||
_section(story, "Sales KPIs", data.get("wwi_sales_kpis", {}), styles)
|
||||
_section(story, "Stock Recommendations", data.get("wwi_stock_recommendations", []), styles)
|
||||
story.append(PageBreak())
|
||||
_section(story, "Supplier Scores", data.get("wwi_supplier_scores", []), styles)
|
||||
_section(story, "Business Events", data.get("wwi_business_events", []), styles)
|
||||
|
||||
doc = SimpleDocTemplate(
|
||||
path,
|
||||
pagesize=landscape(A4),
|
||||
leftMargin=_MARGIN,
|
||||
rightMargin=_MARGIN,
|
||||
topMargin=_MARGIN,
|
||||
bottomMargin=_MARGIN,
|
||||
)
|
||||
doc.build(story)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def save_report(data: dict, output_dir: str) -> dict:
|
||||
"""Generate XLSX and PDF reports from aggregated BI data and write both to *output_dir*."""
|
||||
now = datetime.now(timezone.utc)
|
||||
ts = now.strftime("%Y%m%d_%H%M%S")
|
||||
uid = uuid.uuid4().hex[:6]
|
||||
report_id = f"{ts}_{uid}"
|
||||
generated_at = now.isoformat()
|
||||
|
||||
out = Path(output_dir)
|
||||
out.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
base = f"otel_bi_report_{report_id}"
|
||||
xlsx_path = str(out / f"{base}.xlsx")
|
||||
pdf_path = str(out / f"{base}.pdf")
|
||||
|
||||
_save_xlsx(data, xlsx_path, report_id, generated_at)
|
||||
_save_pdf(data, pdf_path, report_id, generated_at)
|
||||
|
||||
return {
|
||||
"report_id": report_id,
|
||||
"generated_at": generated_at,
|
||||
"xlsx": {"filename": f"{base}.xlsx", "path": xlsx_path},
|
||||
"pdf": {"filename": f"{base}.pdf", "path": pdf_path},
|
||||
}
|
||||
@@ -2,11 +2,9 @@ from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from time import time
|
||||
from uuid import uuid4
|
||||
|
||||
import jwt
|
||||
from fastapi import Depends, Header, HTTPException, status
|
||||
from fastapi import Depends, HTTPException, status
|
||||
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
||||
from jwt import InvalidTokenError, PyJWKClient
|
||||
|
||||
@@ -23,14 +21,6 @@ class FrontendPrincipal:
|
||||
token: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class InternalPrincipal:
|
||||
subject: str
|
||||
scopes: list[str]
|
||||
claims: dict
|
||||
token: str
|
||||
|
||||
|
||||
class FrontendJWTVerifier:
|
||||
@property
|
||||
def jwks_url(self) -> str:
|
||||
@@ -66,7 +56,6 @@ class FrontendJWTVerifier:
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="FRONTEND_JWT_AUDIENCE is not configured.",
|
||||
)
|
||||
|
||||
try:
|
||||
signing_key = self._jwks_client().get_signing_key_from_jwt(token).key
|
||||
claims = jwt.decode(
|
||||
@@ -92,103 +81,13 @@ class FrontendJWTVerifier:
|
||||
|
||||
scopes = self._extract_scopes(claims)
|
||||
required = settings.frontend_required_scopes_list
|
||||
missing = [scope for scope in required if scope not in scopes]
|
||||
missing = [s for s in required if s not in scopes]
|
||||
if missing:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"Missing required scope(s): {', '.join(missing)}",
|
||||
)
|
||||
return FrontendPrincipal(
|
||||
subject=subject, scopes=scopes, claims=claims, token=token
|
||||
)
|
||||
|
||||
|
||||
class InternalTokenManager:
|
||||
token_type = "internal-service"
|
||||
|
||||
@staticmethod
|
||||
def _assert_secret() -> str:
|
||||
secret = settings.internal_service_shared_secret
|
||||
if not secret or secret == "change-me":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="INTERNAL_SERVICE_SHARED_SECRET must be configured.",
|
||||
)
|
||||
if len(secret.encode("utf-8")) < 32:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=(
|
||||
"INTERNAL_SERVICE_SHARED_SECRET must be at least 32 bytes for "
|
||||
"HS256 token signing."
|
||||
),
|
||||
)
|
||||
return secret
|
||||
|
||||
def mint(
|
||||
self,
|
||||
*,
|
||||
subject: str,
|
||||
scopes: list[str],
|
||||
source_service: str,
|
||||
) -> str:
|
||||
now = int(time())
|
||||
payload = {
|
||||
"sub": subject,
|
||||
"scope": " ".join(scopes),
|
||||
"iss": source_service,
|
||||
"aud": settings.internal_service_token_audience,
|
||||
"typ": self.token_type,
|
||||
"iat": now,
|
||||
"nbf": now,
|
||||
"exp": now + settings.internal_service_token_ttl_seconds,
|
||||
"jti": str(uuid4()),
|
||||
}
|
||||
return jwt.encode(payload, self._assert_secret(), algorithm="HS256")
|
||||
|
||||
def verify(self, token: str) -> InternalPrincipal:
|
||||
try:
|
||||
claims = jwt.decode(
|
||||
token,
|
||||
self._assert_secret(),
|
||||
algorithms=["HS256"],
|
||||
audience=settings.internal_service_token_audience,
|
||||
options={
|
||||
"require": ["sub", "iss", "aud", "exp", "iat", "nbf", "jti", "typ"]
|
||||
},
|
||||
leeway=settings.internal_token_clock_skew_seconds,
|
||||
)
|
||||
except InvalidTokenError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid internal service token.",
|
||||
) from exc
|
||||
|
||||
subject = str(claims.get("sub") or "")
|
||||
if not subject:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Internal token missing subject.",
|
||||
)
|
||||
|
||||
issuer = str(claims.get("iss") or "")
|
||||
if issuer not in settings.internal_service_allowed_issuers_list:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Internal token issuer is not allowed.",
|
||||
)
|
||||
|
||||
token_type = str(claims.get("typ") or "")
|
||||
if token_type != self.token_type:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Internal token type is invalid.",
|
||||
)
|
||||
|
||||
scope = claims.get("scope")
|
||||
scopes = [item for item in str(scope).split(" ") if item] if scope else []
|
||||
return InternalPrincipal(
|
||||
subject=subject, scopes=scopes, claims=claims, token=token
|
||||
)
|
||||
return FrontendPrincipal(subject=subject, scopes=scopes, claims=claims, token=token)
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
@@ -196,36 +95,14 @@ def get_frontend_verifier() -> FrontendJWTVerifier:
|
||||
return FrontendJWTVerifier()
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_internal_token_manager() -> InternalTokenManager:
|
||||
return InternalTokenManager()
|
||||
|
||||
|
||||
def require_frontend_principal(
|
||||
credentials: HTTPAuthorizationCredentials | None = Depends(BEARER_SCHEME),
|
||||
) -> FrontendPrincipal:
|
||||
if not settings.require_frontend_auth:
|
||||
return FrontendPrincipal(subject="anonymous", scopes=[], claims={}, token="")
|
||||
|
||||
if credentials is None or credentials.scheme.lower() != "bearer":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Missing bearer token.",
|
||||
)
|
||||
return get_frontend_verifier().verify(credentials.credentials)
|
||||
|
||||
|
||||
def require_internal_principal(
|
||||
internal_token: str | None = Header(default=None, alias="x-internal-service-token"),
|
||||
) -> InternalPrincipal:
|
||||
if not settings.internal_service_auth_enabled:
|
||||
return InternalPrincipal(
|
||||
subject="internal-unauth", scopes=[], claims={}, token=""
|
||||
)
|
||||
|
||||
if not internal_token:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Missing x-internal-service-token header.",
|
||||
)
|
||||
return get_internal_token_manager().verify(internal_token)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""Database helpers for warehouse connections."""
|
||||
@@ -1,34 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy import create_engine, event
|
||||
from sqlalchemy.engine import Engine
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
|
||||
def _create_read_only_engine(connection_url: str) -> Engine:
|
||||
engine = create_engine(
|
||||
connection_url, pool_pre_ping=True, pool_recycle=3600, future=True
|
||||
)
|
||||
|
||||
@event.listens_for(engine, "connect")
|
||||
def _on_connect(dbapi_connection, _connection_record) -> None:
|
||||
cursor = dbapi_connection.cursor()
|
||||
try:
|
||||
cursor.execute("SET TRANSACTION ISOLATION LEVEL READ COMMITTED;")
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
return engine
|
||||
|
||||
|
||||
def create_warehouse_engines() -> dict[str, Engine]:
|
||||
return {
|
||||
"wwi": _create_read_only_engine(settings.wwi_connection_url),
|
||||
"aw": _create_read_only_engine(settings.aw_connection_url),
|
||||
}
|
||||
|
||||
|
||||
def dispose_engines(engines: dict[str, Engine]) -> None:
|
||||
for engine in engines.values():
|
||||
engine.dispose()
|
||||
@@ -1,27 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.postgres_models import Base
|
||||
|
||||
|
||||
def create_postgres_engine() -> Engine:
|
||||
return create_engine(
|
||||
settings.postgres_connection_url,
|
||||
pool_pre_ping=True,
|
||||
pool_recycle=3600,
|
||||
future=True,
|
||||
)
|
||||
|
||||
|
||||
def initialize_postgres_schema(engine: Engine) -> None:
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
|
||||
def create_postgres_session_factory(engine: Engine) -> sessionmaker[Session]:
|
||||
return sessionmaker(
|
||||
bind=engine, autoflush=False, autocommit=False, expire_on_commit=False
|
||||
)
|
||||
@@ -1,86 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from uuid import uuid4
|
||||
|
||||
from sqlalchemy import JSON, DateTime, Float, Integer, String, Text
|
||||
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
class Base(DeclarativeBase):
|
||||
pass
|
||||
|
||||
|
||||
class AuditLog(Base):
|
||||
__tablename__ = "audit_logs"
|
||||
|
||||
id: Mapped[str] = mapped_column(
|
||||
String(36), primary_key=True, default=lambda: str(uuid4())
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), default=_utcnow, index=True
|
||||
)
|
||||
method: Mapped[str] = mapped_column(String(12), index=True)
|
||||
path: Mapped[str] = mapped_column(String(300), index=True)
|
||||
query_string: Mapped[str] = mapped_column(String(1000), default="")
|
||||
status_code: Mapped[int] = mapped_column(Integer, index=True)
|
||||
duration_ms: Mapped[float] = mapped_column(Float)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True, index=True)
|
||||
client_ip: Mapped[str | None] = mapped_column(String(120), nullable=True)
|
||||
user_agent: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
details: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
|
||||
|
||||
class ForecastRun(Base):
|
||||
__tablename__ = "forecast_runs"
|
||||
|
||||
id: Mapped[str] = mapped_column(
|
||||
String(36), primary_key=True, default=lambda: str(uuid4())
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), default=_utcnow, index=True
|
||||
)
|
||||
horizon_days: Mapped[int] = mapped_column(Integer)
|
||||
point_count: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True, index=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class RankingRun(Base):
|
||||
__tablename__ = "ranking_runs"
|
||||
|
||||
id: Mapped[str] = mapped_column(
|
||||
String(36), primary_key=True, default=lambda: str(uuid4())
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), default=_utcnow, index=True
|
||||
)
|
||||
top_n: Mapped[int] = mapped_column(Integer)
|
||||
item_count: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True, index=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class RecommendationRun(Base):
|
||||
__tablename__ = "recommendation_runs"
|
||||
|
||||
id: Mapped[str] = mapped_column(
|
||||
String(36), primary_key=True, default=lambda: str(uuid4())
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), default=_utcnow, index=True
|
||||
)
|
||||
item_count: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True, index=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
@@ -1,167 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
AW_DAILY_SALES_QUERIES = [
|
||||
"""
|
||||
SELECT
|
||||
CAST(d.FullDateAlternateKey AS date) AS sale_date,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimDate AS d ON d.DateKey = f.OrderDateKey
|
||||
GROUP BY CAST(d.FullDateAlternateKey AS date)
|
||||
ORDER BY sale_date;
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
CAST(OrderDate AS date) AS sale_date,
|
||||
SUM(SalesAmount) AS revenue,
|
||||
SUM(TotalProductCost) AS cost,
|
||||
SUM(OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales
|
||||
GROUP BY CAST(OrderDate AS date)
|
||||
ORDER BY sale_date;
|
||||
""",
|
||||
]
|
||||
|
||||
WWI_DAILY_SALES_QUERIES = [
|
||||
"""
|
||||
SELECT
|
||||
CAST(i.InvoiceDate AS date) AS sale_date,
|
||||
SUM(il.ExtendedPrice) AS revenue,
|
||||
SUM(il.TaxAmount) AS cost,
|
||||
SUM(il.Quantity) AS quantity,
|
||||
COUNT_BIG(DISTINCT i.InvoiceID) AS orders
|
||||
FROM Sales.Invoices AS i
|
||||
INNER JOIN Sales.InvoiceLines AS il ON il.InvoiceID = i.InvoiceID
|
||||
GROUP BY CAST(i.InvoiceDate AS date)
|
||||
ORDER BY sale_date;
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
CAST(i.InvoiceDate AS date) AS sale_date,
|
||||
SUM(il.UnitPrice * il.Quantity) AS revenue,
|
||||
CAST(0 AS float) AS cost,
|
||||
SUM(il.Quantity) AS quantity,
|
||||
COUNT_BIG(DISTINCT i.InvoiceID) AS orders
|
||||
FROM Sales.Invoices AS i
|
||||
INNER JOIN Sales.InvoiceLines AS il ON il.InvoiceID = i.InvoiceID
|
||||
GROUP BY CAST(i.InvoiceDate AS date)
|
||||
ORDER BY sale_date;
|
||||
""",
|
||||
]
|
||||
|
||||
AW_PRODUCT_PERFORMANCE_QUERIES = [
|
||||
"""
|
||||
SELECT
|
||||
p.ProductAlternateKey AS product_id,
|
||||
p.EnglishProductName AS product_name,
|
||||
COALESCE(sc.EnglishProductSubcategoryName, 'Unknown') AS category_name,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimProduct AS p ON p.ProductKey = f.ProductKey
|
||||
LEFT JOIN dbo.DimProductSubcategory AS sc ON sc.ProductSubcategoryKey = p.ProductSubcategoryKey
|
||||
GROUP BY p.ProductAlternateKey, p.EnglishProductName, sc.EnglishProductSubcategoryName
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
CAST(ProductKey AS nvarchar(100)) AS product_id,
|
||||
CAST(ProductKey AS nvarchar(100)) AS product_name,
|
||||
'Unknown' AS category_name,
|
||||
SUM(SalesAmount) AS revenue,
|
||||
SUM(TotalProductCost) AS cost,
|
||||
SUM(OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales
|
||||
GROUP BY ProductKey
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
]
|
||||
|
||||
WWI_PRODUCT_PERFORMANCE_QUERIES = [
|
||||
"""
|
||||
SELECT
|
||||
CAST(s.StockItemID AS nvarchar(100)) AS product_id,
|
||||
s.StockItemName AS product_name,
|
||||
COALESCE(cg.StockGroupName, 'Unknown') AS category_name,
|
||||
SUM(il.ExtendedPrice) AS revenue,
|
||||
SUM(il.TaxAmount) AS cost,
|
||||
SUM(il.Quantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM Sales.InvoiceLines AS il
|
||||
INNER JOIN Warehouse.StockItems AS s ON s.StockItemID = il.StockItemID
|
||||
LEFT JOIN Warehouse.StockItemStockGroups AS sig ON sig.StockItemID = s.StockItemID
|
||||
LEFT JOIN Warehouse.StockGroups AS cg ON cg.StockGroupID = sig.StockGroupID
|
||||
GROUP BY s.StockItemID, s.StockItemName, cg.StockGroupName
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
CAST(il.StockItemID AS nvarchar(100)) AS product_id,
|
||||
CAST(il.StockItemID AS nvarchar(100)) AS product_name,
|
||||
'Unknown' AS category_name,
|
||||
SUM(il.UnitPrice * il.Quantity) AS revenue,
|
||||
CAST(0 AS float) AS cost,
|
||||
SUM(il.Quantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM Sales.InvoiceLines AS il
|
||||
GROUP BY il.StockItemID
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
]
|
||||
|
||||
AW_CUSTOMER_QUERIES = [
|
||||
"""
|
||||
SELECT
|
||||
CAST(c.CustomerAlternateKey AS nvarchar(100)) AS customer_id,
|
||||
c.FirstName + ' ' + c.LastName AS customer_name,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimCustomer AS c ON c.CustomerKey = f.CustomerKey
|
||||
GROUP BY c.CustomerAlternateKey, c.FirstName, c.LastName
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
CAST(CustomerKey AS nvarchar(100)) AS customer_id,
|
||||
CAST(CustomerKey AS nvarchar(100)) AS customer_name,
|
||||
SUM(SalesAmount) AS revenue,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales
|
||||
GROUP BY CustomerKey
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
]
|
||||
|
||||
WWI_CUSTOMER_QUERIES = [
|
||||
"""
|
||||
SELECT
|
||||
CAST(c.CustomerID AS nvarchar(100)) AS customer_id,
|
||||
c.CustomerName AS customer_name,
|
||||
SUM(il.ExtendedPrice) AS revenue,
|
||||
COUNT_BIG(DISTINCT i.InvoiceID) AS orders
|
||||
FROM Sales.Invoices AS i
|
||||
INNER JOIN Sales.InvoiceLines AS il ON il.InvoiceID = i.InvoiceID
|
||||
INNER JOIN Sales.Customers AS c ON c.CustomerID = i.CustomerID
|
||||
GROUP BY c.CustomerID, c.CustomerName
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
CAST(i.CustomerID AS nvarchar(100)) AS customer_id,
|
||||
CAST(i.CustomerID AS nvarchar(100)) AS customer_name,
|
||||
SUM(il.UnitPrice * il.Quantity) AS revenue,
|
||||
COUNT_BIG(DISTINCT i.InvoiceID) AS orders
|
||||
FROM Sales.Invoices AS i
|
||||
INNER JOIN Sales.InvoiceLines AS il ON il.InvoiceID = i.InvoiceID
|
||||
GROUP BY i.CustomerID
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
]
|
||||
0
backend/app/domain/__init__.py
Normal file
0
backend/app/domain/__init__.py
Normal file
0
backend/app/domain/aw/__init__.py
Normal file
0
backend/app/domain/aw/__init__.py
Normal file
258
backend/app/domain/aw/analytics.py
Normal file
258
backend/app/domain/aw/analytics.py
Normal file
@@ -0,0 +1,258 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from opentelemetry import metrics, trace
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from app.core.audit import append_audit
|
||||
from app.domain.aw.models import AWSalesForecast, AWRepScore, AWProductDemand, AWAnomalyRun
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
tracer = trace.get_tracer("otel-bi.domain.aw")
|
||||
meter = metrics.get_meter("otel-bi.domain.aw")
|
||||
|
||||
_persist_counter = meter.create_counter(
|
||||
"aw_persist_writes_total",
|
||||
description="Number of AW PostgreSQL write operations",
|
||||
)
|
||||
|
||||
|
||||
def _current_span_context() -> tuple[str | None, str | None]:
|
||||
ctx = trace.get_current_span().get_span_context()
|
||||
if not ctx.is_valid:
|
||||
return None, None
|
||||
return f"{ctx.trace_id:032x}", f"{ctx.span_id:016x}"
|
||||
|
||||
|
||||
def _actor_type(trigger_source: str) -> str:
|
||||
return "scheduler" if trigger_source.startswith("scheduler") else "api"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Persist functions — called after Go service returns data
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def persist_forecast(
|
||||
factory: sessionmaker[Session],
|
||||
data: list[dict],
|
||||
horizon_days: int,
|
||||
trigger_source: str,
|
||||
) -> None:
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(AWSalesForecast(
|
||||
horizon_days=horizon_days,
|
||||
point_count=len(data),
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=data,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "sales_forecast"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist AW forecast: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="forecast.generated",
|
||||
actor_type=_actor_type(trigger_source),
|
||||
actor_id=trigger_source,
|
||||
domain="aw",
|
||||
service="otel-bi-backend",
|
||||
entity_type="sales_forecast",
|
||||
payload={"horizon_days": horizon_days, "point_count": len(data)},
|
||||
)
|
||||
|
||||
|
||||
def persist_rep_scores(
|
||||
factory: sessionmaker[Session],
|
||||
data: list[dict],
|
||||
top_n: int,
|
||||
trigger_source: str,
|
||||
) -> None:
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(AWRepScore(
|
||||
rep_count=len(data),
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=data,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "rep_scores"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist AW rep scores: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="scores.generated",
|
||||
actor_type=_actor_type(trigger_source),
|
||||
actor_id=trigger_source,
|
||||
domain="aw",
|
||||
service="otel-bi-backend",
|
||||
entity_type="rep_scores",
|
||||
payload={"rep_count": len(data), "top_n": top_n},
|
||||
)
|
||||
|
||||
|
||||
def persist_product_demand(
|
||||
factory: sessionmaker[Session],
|
||||
data: list[dict],
|
||||
top_n: int,
|
||||
trigger_source: str,
|
||||
) -> None:
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(AWProductDemand(
|
||||
product_count=len(data),
|
||||
top_n=top_n,
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=data,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "product_demand"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist AW product demand: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="scores.generated",
|
||||
actor_type=_actor_type(trigger_source),
|
||||
actor_id=trigger_source,
|
||||
domain="aw",
|
||||
service="otel-bi-backend",
|
||||
entity_type="product_demand",
|
||||
payload={"product_count": len(data), "top_n": top_n},
|
||||
)
|
||||
|
||||
|
||||
def persist_anomaly_run(
|
||||
factory: sessionmaker[Session],
|
||||
data: list[dict],
|
||||
trigger_source: str,
|
||||
) -> None:
|
||||
anomaly_count = sum(1 for p in data if p.get("is_anomaly"))
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(AWAnomalyRun(
|
||||
anomaly_count=anomaly_count,
|
||||
series_days=365,
|
||||
window_days=30,
|
||||
threshold_sigma=2.0,
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=data,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "anomaly_run"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist AW anomaly run: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="anomaly_detection.ran",
|
||||
actor_type=_actor_type(trigger_source),
|
||||
actor_id=trigger_source,
|
||||
domain="aw",
|
||||
service="otel-bi-backend",
|
||||
entity_type="anomaly_detection",
|
||||
payload={"series_days": 365, "window_days": 30, "anomaly_count": anomaly_count},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Read functions — query PostgreSQL for stored results
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def list_forecasts(factory: sessionmaker[Session], limit: int = 50) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(AWSalesForecast)
|
||||
.order_by(AWSalesForecast.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"created_at": r.created_at.isoformat(),
|
||||
"horizon_days": r.horizon_days,
|
||||
"point_count": r.point_count,
|
||||
"trigger_source": r.trigger_source,
|
||||
"trace_id": r.trace_id,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def list_rep_scores(factory: sessionmaker[Session], limit: int = 50) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(AWRepScore)
|
||||
.order_by(AWRepScore.computed_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"computed_at": r.computed_at.isoformat(),
|
||||
"rep_count": r.rep_count,
|
||||
"trigger_source": r.trigger_source,
|
||||
"trace_id": r.trace_id,
|
||||
"payload": r.payload,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def list_product_demand(factory: sessionmaker[Session], limit: int = 50) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(AWProductDemand)
|
||||
.order_by(AWProductDemand.computed_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"computed_at": r.computed_at.isoformat(),
|
||||
"product_count": r.product_count,
|
||||
"top_n": r.top_n,
|
||||
"trigger_source": r.trigger_source,
|
||||
"trace_id": r.trace_id,
|
||||
"payload": r.payload,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def list_anomaly_runs(factory: sessionmaker[Session], limit: int = 20) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(AWAnomalyRun)
|
||||
.order_by(AWAnomalyRun.detected_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"detected_at": r.detected_at.isoformat(),
|
||||
"anomaly_count": r.anomaly_count,
|
||||
"series_days": r.series_days,
|
||||
"window_days": r.window_days,
|
||||
"threshold_sigma": r.threshold_sigma,
|
||||
"trigger_source": r.trigger_source,
|
||||
"trace_id": r.trace_id,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
77
backend/app/domain/aw/models.py
Normal file
77
backend/app/domain/aw/models.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from uuid import uuid4
|
||||
|
||||
from sqlalchemy import JSON, DateTime, Integer, String
|
||||
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
class AWBase(DeclarativeBase):
|
||||
pass
|
||||
|
||||
|
||||
class AWSalesForecast(AWBase):
|
||||
"""Persisted AW sales forecast runs."""
|
||||
|
||||
__tablename__ = "aw_sales_forecasts"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
horizon_days: Mapped[int] = mapped_column(Integer)
|
||||
point_count: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class AWRepScore(AWBase):
|
||||
"""Persisted AW sales rep performance scoring runs."""
|
||||
|
||||
__tablename__ = "aw_rep_scores"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
computed_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
rep_count: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class AWProductDemand(AWBase):
|
||||
"""Persisted AW product demand scoring runs."""
|
||||
|
||||
__tablename__ = "aw_product_demand"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
computed_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
product_count: Mapped[int] = mapped_column(Integer)
|
||||
top_n: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class AWAnomalyRun(AWBase):
|
||||
"""Persisted AW revenue anomaly detection runs."""
|
||||
|
||||
__tablename__ = "aw_anomaly_runs"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
detected_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
anomaly_count: Mapped[int] = mapped_column(Integer)
|
||||
series_days: Mapped[int] = mapped_column(Integer)
|
||||
window_days: Mapped[int] = mapped_column(Integer)
|
||||
threshold_sigma: Mapped[float] = mapped_column(default=2.0)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
# Full annotated series (date, revenue, rolling_mean, lower_band, upper_band, is_anomaly, z_score)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
131
backend/app/domain/aw/queries.py
Normal file
131
backend/app/domain/aw/queries.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from __future__ import annotations
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AdventureWorksDW2022 — read-only MSSQL queries
|
||||
# Each list contains fallback variants tried in order.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Daily sales combining FactInternetSales + FactResellerSales
|
||||
AW_DAILY_SALES: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
CAST(d.FullDateAlternateKey AS date) AS sale_date,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimDate AS d ON d.DateKey = f.OrderDateKey
|
||||
GROUP BY CAST(d.FullDateAlternateKey AS date)
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT
|
||||
CAST(d.FullDateAlternateKey AS date) AS sale_date,
|
||||
SUM(r.SalesAmount) AS revenue,
|
||||
SUM(r.TotalProductCost) AS cost,
|
||||
SUM(r.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactResellerSales AS r
|
||||
INNER JOIN dbo.DimDate AS d ON d.DateKey = r.OrderDateKey
|
||||
GROUP BY CAST(d.FullDateAlternateKey AS date)
|
||||
|
||||
ORDER BY sale_date;
|
||||
""",
|
||||
# Fallback: internet sales only using OrderDate column directly
|
||||
"""
|
||||
SELECT
|
||||
CAST(OrderDate AS date) AS sale_date,
|
||||
SUM(SalesAmount) AS revenue,
|
||||
SUM(TotalProductCost) AS cost,
|
||||
SUM(OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales
|
||||
GROUP BY CAST(OrderDate AS date)
|
||||
ORDER BY sale_date;
|
||||
""",
|
||||
]
|
||||
|
||||
# Sales rep performance — reseller sales attributed to employees
|
||||
AW_REP_PERFORMANCE: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
e.EmployeeKey AS employee_key,
|
||||
e.FirstName + ' ' + e.LastName AS rep_name,
|
||||
COALESCE(e.Title, 'Sales Rep') AS rep_title,
|
||||
COALESCE(st.SalesTerritoryRegion, 'Unknown') AS territory,
|
||||
SUM(r.SalesAmount) AS revenue,
|
||||
SUM(r.TotalProductCost) AS cost,
|
||||
COUNT_BIG(*) AS orders,
|
||||
AVG(r.SalesAmount) AS avg_deal_size
|
||||
FROM dbo.FactResellerSales AS r
|
||||
INNER JOIN dbo.DimEmployee AS e
|
||||
ON e.EmployeeKey = r.EmployeeKey
|
||||
INNER JOIN dbo.DimSalesTerritory AS st
|
||||
ON st.SalesTerritoryKey = r.SalesTerritoryKey
|
||||
WHERE e.SalesPersonFlag = 1
|
||||
GROUP BY
|
||||
e.EmployeeKey,
|
||||
e.FirstName, e.LastName,
|
||||
e.Title,
|
||||
st.SalesTerritoryRegion
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
# Fallback without SalesPersonFlag filter
|
||||
"""
|
||||
SELECT
|
||||
e.EmployeeKey AS employee_key,
|
||||
e.FirstName + ' ' + e.LastName AS rep_name,
|
||||
COALESCE(e.Title, 'Employee') AS rep_title,
|
||||
'Unknown' AS territory,
|
||||
SUM(r.SalesAmount) AS revenue,
|
||||
SUM(r.TotalProductCost) AS cost,
|
||||
COUNT_BIG(*) AS orders,
|
||||
AVG(r.SalesAmount) AS avg_deal_size
|
||||
FROM dbo.FactResellerSales AS r
|
||||
INNER JOIN dbo.DimEmployee AS e ON e.EmployeeKey = r.EmployeeKey
|
||||
GROUP BY e.EmployeeKey, e.FirstName, e.LastName, e.Title
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
]
|
||||
|
||||
# Product demand — internet sales with full category hierarchy
|
||||
AW_PRODUCT_DEMAND: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
p.ProductAlternateKey AS product_id,
|
||||
p.EnglishProductName AS product_name,
|
||||
COALESCE(pc.EnglishProductCategoryName, 'Unknown') AS category,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
INNER JOIN dbo.DimProduct AS p
|
||||
ON p.ProductKey = f.ProductKey
|
||||
LEFT JOIN dbo.DimProductSubcategory AS sc
|
||||
ON sc.ProductSubcategoryKey = p.ProductSubcategoryKey
|
||||
LEFT JOIN dbo.DimProductCategory AS pc
|
||||
ON pc.ProductCategoryKey = sc.ProductCategoryKey
|
||||
GROUP BY
|
||||
p.ProductAlternateKey,
|
||||
p.EnglishProductName,
|
||||
pc.EnglishProductCategoryName
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
# Fallback: no category join
|
||||
"""
|
||||
SELECT
|
||||
CAST(f.ProductKey AS nvarchar(50)) AS product_id,
|
||||
COALESCE(p.EnglishProductName, CAST(f.ProductKey AS nvarchar(50))) AS product_name,
|
||||
'Unknown' AS category,
|
||||
SUM(f.SalesAmount) AS revenue,
|
||||
SUM(f.TotalProductCost) AS cost,
|
||||
SUM(f.OrderQuantity) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM dbo.FactInternetSales AS f
|
||||
LEFT JOIN dbo.DimProduct AS p ON p.ProductKey = f.ProductKey
|
||||
GROUP BY f.ProductKey, p.EnglishProductName
|
||||
ORDER BY revenue DESC;
|
||||
""",
|
||||
]
|
||||
0
backend/app/domain/wwi/__init__.py
Normal file
0
backend/app/domain/wwi/__init__.py
Normal file
297
backend/app/domain/wwi/analytics.py
Normal file
297
backend/app/domain/wwi/analytics.py
Normal file
@@ -0,0 +1,297 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from opentelemetry import metrics, trace
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from app.core.audit import append_audit
|
||||
from app.domain.wwi.models import (
|
||||
WWIReorderRecommendation,
|
||||
WWISupplierScore,
|
||||
WWIWhatIfScenario,
|
||||
WWIBusinessEvent,
|
||||
)
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
tracer = trace.get_tracer("otel-bi.domain.wwi")
|
||||
meter = metrics.get_meter("otel-bi.domain.wwi")
|
||||
|
||||
_persist_counter = meter.create_counter(
|
||||
"wwi_persist_writes_total",
|
||||
description="Number of WWI PostgreSQL write operations",
|
||||
)
|
||||
_event_counter = meter.create_counter(
|
||||
"wwi_business_events_generated_total",
|
||||
description="Business events automatically generated",
|
||||
)
|
||||
|
||||
|
||||
def _current_span_context() -> tuple[str | None, str | None]:
|
||||
ctx = trace.get_current_span().get_span_context()
|
||||
if not ctx.is_valid:
|
||||
return None, None
|
||||
return f"{ctx.trace_id:032x}", f"{ctx.span_id:016x}"
|
||||
|
||||
|
||||
def _actor_type(trigger_source: str) -> str:
|
||||
return "scheduler" if trigger_source.startswith("scheduler") else "api"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Persist functions — called after Go service returns data
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def persist_reorder_recommendations(
|
||||
factory: sessionmaker[Session],
|
||||
data: list[dict],
|
||||
trigger_source: str,
|
||||
) -> None:
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(WWIReorderRecommendation(
|
||||
item_count=len(data),
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=data,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "reorder_recommendations"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist WWI reorder recommendations: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="recommendations.generated",
|
||||
actor_type=_actor_type(trigger_source),
|
||||
actor_id=trigger_source,
|
||||
domain="wwi",
|
||||
service="otel-bi-backend",
|
||||
entity_type="reorder_recommendations",
|
||||
payload={"item_count": len(data)},
|
||||
)
|
||||
|
||||
|
||||
def persist_supplier_scores(
|
||||
factory: sessionmaker[Session],
|
||||
data: list[dict],
|
||||
top_n: int,
|
||||
trigger_source: str,
|
||||
) -> None:
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(WWISupplierScore(
|
||||
supplier_count=len(data),
|
||||
top_n=top_n,
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=data,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "supplier_scores"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist WWI supplier scores: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="scores.generated",
|
||||
actor_type=_actor_type(trigger_source),
|
||||
actor_id=trigger_source,
|
||||
domain="wwi",
|
||||
service="otel-bi-backend",
|
||||
entity_type="supplier_scores",
|
||||
payload={"supplier_count": len(data), "top_n": top_n},
|
||||
)
|
||||
|
||||
|
||||
def persist_whatif_scenario(
|
||||
factory: sessionmaker[Session],
|
||||
result: dict,
|
||||
) -> None:
|
||||
trace_id, span_id = _current_span_context()
|
||||
try:
|
||||
with factory() as session:
|
||||
session.add(WWIWhatIfScenario(
|
||||
stock_item_key=result["stock_item_key"],
|
||||
stock_item_name=result["stock_item_name"],
|
||||
demand_multiplier=result["demand_multiplier"],
|
||||
current_stock=result["current_stock"],
|
||||
avg_daily_demand=result["adjusted_daily_demand"],
|
||||
projected_days_until_stockout=result.get("projected_days_until_stockout"),
|
||||
recommended_order_qty=float(result["recommended_order_qty"]),
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
result=result,
|
||||
))
|
||||
session.commit()
|
||||
_persist_counter.add(1, {"entity": "whatif_scenario"})
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist WWI what-if scenario: %s", exc)
|
||||
append_audit(
|
||||
factory,
|
||||
action="scenario.submitted",
|
||||
actor_type="user",
|
||||
domain="wwi",
|
||||
service="otel-bi-backend",
|
||||
entity_type="whatif_scenario",
|
||||
payload={
|
||||
"stock_item_key": result["stock_item_key"],
|
||||
"demand_multiplier": result["demand_multiplier"],
|
||||
"projected_days_until_stockout": result.get("projected_days_until_stockout"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Business events — generated from reorder data in Python (PostgreSQL writes)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def generate_stock_events(
|
||||
factory: sessionmaker[Session],
|
||||
recommendations: list[dict],
|
||||
) -> None:
|
||||
"""Write LOW_STOCK events for HIGH-urgency items, deduplicating within 24h."""
|
||||
trace_id, span_id = _current_span_context()
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(hours=24)
|
||||
try:
|
||||
with factory() as session:
|
||||
for item in recommendations:
|
||||
if item.get("urgency") != "HIGH":
|
||||
continue
|
||||
entity_key = str(item["stock_item_key"])
|
||||
existing = (
|
||||
session.query(WWIBusinessEvent)
|
||||
.filter(
|
||||
WWIBusinessEvent.event_type == "LOW_STOCK",
|
||||
WWIBusinessEvent.entity_key == entity_key,
|
||||
WWIBusinessEvent.occurred_at >= cutoff,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
if existing:
|
||||
continue
|
||||
days_str = (
|
||||
f"{item['days_until_stockout']:.1f} days"
|
||||
if item.get("days_until_stockout") is not None
|
||||
else "immediately"
|
||||
)
|
||||
session.add(WWIBusinessEvent(
|
||||
event_type="LOW_STOCK",
|
||||
severity="HIGH",
|
||||
entity_key=entity_key,
|
||||
entity_name=item["stock_item_name"],
|
||||
message=(
|
||||
f"Stock for '{item['stock_item_name']}' will be exhausted in {days_str}. "
|
||||
f"Current stock: {item['current_stock']:.0f} units, "
|
||||
f"daily demand: {item['avg_daily_demand']:.1f} units."
|
||||
),
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
details={
|
||||
"current_stock": item["current_stock"],
|
||||
"avg_daily_demand": item["avg_daily_demand"],
|
||||
"recommended_reorder_qty": item["recommended_reorder_qty"],
|
||||
},
|
||||
))
|
||||
_event_counter.add(1, {"event_type": "LOW_STOCK"})
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to persist WWI business events: %s", exc)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Read functions — query PostgreSQL for stored results
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_business_events(factory: sessionmaker[Session], limit: int = 100) -> list[dict]:
|
||||
with tracer.start_as_current_span("wwi.analytics.business_events"):
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(WWIBusinessEvent)
|
||||
.order_by(WWIBusinessEvent.occurred_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"occurred_at": r.occurred_at.isoformat(),
|
||||
"event_type": r.event_type,
|
||||
"severity": r.severity,
|
||||
"entity_key": r.entity_key,
|
||||
"entity_name": r.entity_name,
|
||||
"message": r.message,
|
||||
"trace_id": r.trace_id,
|
||||
"details": r.details,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def list_reorder_recommendations(factory: sessionmaker[Session], limit: int = 50) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(WWIReorderRecommendation)
|
||||
.order_by(WWIReorderRecommendation.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"created_at": r.created_at.isoformat(),
|
||||
"item_count": r.item_count,
|
||||
"trigger_source": r.trigger_source,
|
||||
"trace_id": r.trace_id,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def list_supplier_scores(factory: sessionmaker[Session], limit: int = 50) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(WWISupplierScore)
|
||||
.order_by(WWISupplierScore.computed_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"computed_at": r.computed_at.isoformat(),
|
||||
"supplier_count": r.supplier_count,
|
||||
"top_n": r.top_n,
|
||||
"trigger_source": r.trigger_source,
|
||||
"trace_id": r.trace_id,
|
||||
"payload": r.payload,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def list_whatif_scenarios(factory: sessionmaker[Session], limit: int = 50) -> list[dict]:
|
||||
with factory() as session:
|
||||
rows = (
|
||||
session.query(WWIWhatIfScenario)
|
||||
.order_by(WWIWhatIfScenario.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"created_at": r.created_at.isoformat(),
|
||||
"stock_item_key": r.stock_item_key,
|
||||
"stock_item_name": r.stock_item_name,
|
||||
"demand_multiplier": r.demand_multiplier,
|
||||
"projected_days_until_stockout": r.projected_days_until_stockout,
|
||||
"recommended_order_qty": r.recommended_order_qty,
|
||||
"result": r.result,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
80
backend/app/domain/wwi/models.py
Normal file
80
backend/app/domain/wwi/models.py
Normal file
@@ -0,0 +1,80 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from uuid import uuid4
|
||||
|
||||
from sqlalchemy import JSON, DateTime, Float, Integer, String, Text
|
||||
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
class WWIBase(DeclarativeBase):
|
||||
pass
|
||||
|
||||
|
||||
class WWIReorderRecommendation(WWIBase):
|
||||
"""Persisted WWI stock reorder recommendation runs."""
|
||||
|
||||
__tablename__ = "wwi_reorder_recommendations"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
item_count: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class WWISupplierScore(WWIBase):
|
||||
"""Persisted WWI supplier reliability scoring runs."""
|
||||
|
||||
__tablename__ = "wwi_supplier_scores"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
computed_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
supplier_count: Mapped[int] = mapped_column(Integer)
|
||||
top_n: Mapped[int] = mapped_column(Integer)
|
||||
trigger_source: Mapped[str] = mapped_column(String(64), index=True)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
payload: Mapped[list[dict]] = mapped_column(JSON, default=list)
|
||||
|
||||
|
||||
class WWIWhatIfScenario(WWIBase):
|
||||
"""User-submitted what-if simulation results."""
|
||||
|
||||
__tablename__ = "wwi_whatif_scenarios"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
stock_item_key: Mapped[int] = mapped_column(Integer, index=True)
|
||||
stock_item_name: Mapped[str] = mapped_column(String(200))
|
||||
demand_multiplier: Mapped[float] = mapped_column(Float)
|
||||
current_stock: Mapped[float] = mapped_column(Float)
|
||||
avg_daily_demand: Mapped[float] = mapped_column(Float)
|
||||
projected_days_until_stockout: Mapped[float | None] = mapped_column(Float, nullable=True)
|
||||
recommended_order_qty: Mapped[float] = mapped_column(Float)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
result: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
|
||||
|
||||
class WWIBusinessEvent(WWIBase):
|
||||
"""Automatically generated business alert events."""
|
||||
|
||||
__tablename__ = "wwi_business_events"
|
||||
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid4()))
|
||||
occurred_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, index=True)
|
||||
event_type: Mapped[str] = mapped_column(String(50), index=True) # LOW_STOCK, ORDER_DROP, SUPPLIER_RISK
|
||||
severity: Mapped[str] = mapped_column(String(20), index=True) # HIGH, MEDIUM, LOW
|
||||
entity_key: Mapped[str] = mapped_column(String(100), index=True)
|
||||
entity_name: Mapped[str] = mapped_column(String(200))
|
||||
message: Mapped[str] = mapped_column(Text)
|
||||
trace_id: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
span_id: Mapped[str | None] = mapped_column(String(16), nullable=True)
|
||||
details: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
171
backend/app/domain/wwi/queries.py
Normal file
171
backend/app/domain/wwi/queries.py
Normal file
@@ -0,0 +1,171 @@
|
||||
from __future__ import annotations
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# WideWorldImportersDW — read-only MSSQL queries
|
||||
#
|
||||
# Column names in this DW use spaces and require bracket notation.
|
||||
# Each list contains fallback variants tried in order.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Daily sales from Fact.Sale joined to Dimension.Date
|
||||
WWI_DAILY_SALES: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
d.[Date] AS sale_date,
|
||||
SUM(s.[Total Excluding Tax]) AS revenue,
|
||||
SUM(s.[Total Excluding Tax] - s.[Profit]) AS cost,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d
|
||||
ON d.[Date Key] = s.[Delivery Date Key]
|
||||
GROUP BY d.[Date]
|
||||
ORDER BY d.[Date];
|
||||
""",
|
||||
# Fallback: use Invoice Date Key if Delivery Date Key is missing
|
||||
"""
|
||||
SELECT
|
||||
d.[Date] AS sale_date,
|
||||
SUM(s.[Total Excluding Tax]) AS revenue,
|
||||
SUM(s.[Total Excluding Tax] - s.[Profit]) AS cost,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS quantity,
|
||||
COUNT_BIG(*) AS orders
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d
|
||||
ON d.[Date Key] = s.[Invoice Date Key]
|
||||
GROUP BY d.[Date]
|
||||
ORDER BY d.[Date];
|
||||
""",
|
||||
]
|
||||
|
||||
# Current stock levels per stock item (net movement quantity)
|
||||
WWI_STOCK_LEVELS: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
si.[Stock Item Key] AS stock_item_key,
|
||||
si.[Stock Item] AS stock_item_name,
|
||||
si.[Unit Price] AS unit_price,
|
||||
si.[Lead Time Days] AS lead_time_days,
|
||||
SUM(CAST(m.[Quantity] AS FLOAT)) AS current_stock
|
||||
FROM [Dimension].[Stock Item] AS si
|
||||
LEFT JOIN [Fact].[Movement] AS m
|
||||
ON m.[Stock Item Key] = si.[Stock Item Key]
|
||||
WHERE si.[Stock Item Key] <> 0
|
||||
GROUP BY
|
||||
si.[Stock Item Key],
|
||||
si.[Stock Item],
|
||||
si.[Unit Price],
|
||||
si.[Lead Time Days];
|
||||
""",
|
||||
# Fallback: without movement (returns 0 stock)
|
||||
"""
|
||||
SELECT
|
||||
si.[Stock Item Key] AS stock_item_key,
|
||||
si.[Stock Item] AS stock_item_name,
|
||||
si.[Unit Price] AS unit_price,
|
||||
si.[Lead Time Days] AS lead_time_days,
|
||||
CAST(0 AS FLOAT) AS current_stock
|
||||
FROM [Dimension].[Stock Item] AS si
|
||||
WHERE si.[Stock Item Key] <> 0;
|
||||
""",
|
||||
]
|
||||
|
||||
# 90-day demand velocity per stock item from Fact.Sale
|
||||
WWI_DEMAND_VELOCITY: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
s.[Stock Item Key] AS stock_item_key,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS qty_sold_90d,
|
||||
COUNT_BIG(DISTINCT s.[WWI Invoice ID]) AS invoice_count_90d
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d
|
||||
ON d.[Date Key] = s.[Delivery Date Key]
|
||||
WHERE d.[Date] >= DATEADD(day, -90, GETDATE())
|
||||
AND s.[Stock Item Key] <> 0
|
||||
GROUP BY s.[Stock Item Key];
|
||||
""",
|
||||
"""
|
||||
SELECT
|
||||
s.[Stock Item Key] AS stock_item_key,
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) AS qty_sold_90d,
|
||||
COUNT_BIG(DISTINCT s.[WWI Invoice ID]) AS invoice_count_90d
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d
|
||||
ON d.[Date Key] = s.[Invoice Date Key]
|
||||
WHERE d.[Date] >= DATEADD(day, -90, GETDATE())
|
||||
AND s.[Stock Item Key] <> 0
|
||||
GROUP BY s.[Stock Item Key];
|
||||
""",
|
||||
]
|
||||
|
||||
# Supplier reliability data from Fact.Purchase
|
||||
WWI_SUPPLIER_PERFORMANCE: list[str] = [
|
||||
"""
|
||||
SELECT
|
||||
sup.[Supplier Key] AS supplier_key,
|
||||
sup.[Supplier] AS supplier_name,
|
||||
sup.[Category] AS category,
|
||||
COUNT_BIG(*) AS total_orders,
|
||||
SUM(CAST(p.[Ordered Outers] AS FLOAT)) AS total_ordered_outers,
|
||||
SUM(CAST(p.[Received Outers] AS FLOAT)) AS total_received_outers,
|
||||
SUM(CASE WHEN p.[Is Order Finalized] = 1 THEN 1 ELSE 0 END) AS finalized_orders
|
||||
FROM [Dimension].[Supplier] AS sup
|
||||
INNER JOIN [Fact].[Purchase] AS p
|
||||
ON p.[Supplier Key] = sup.[Supplier Key]
|
||||
WHERE sup.[Supplier Key] <> 0
|
||||
GROUP BY
|
||||
sup.[Supplier Key],
|
||||
sup.[Supplier],
|
||||
sup.[Category]
|
||||
ORDER BY total_orders DESC;
|
||||
""",
|
||||
# Fallback: without Is Order Finalized
|
||||
"""
|
||||
SELECT
|
||||
sup.[Supplier Key] AS supplier_key,
|
||||
sup.[Supplier] AS supplier_name,
|
||||
sup.[Category] AS category,
|
||||
COUNT_BIG(*) AS total_orders,
|
||||
SUM(CAST(p.[Ordered Outers] AS FLOAT)) AS total_ordered_outers,
|
||||
SUM(CAST(p.[Received Outers] AS FLOAT)) AS total_received_outers,
|
||||
COUNT_BIG(*) AS finalized_orders
|
||||
FROM [Dimension].[Supplier] AS sup
|
||||
INNER JOIN [Fact].[Purchase] AS p
|
||||
ON p.[Supplier Key] = sup.[Supplier Key]
|
||||
WHERE sup.[Supplier Key] <> 0
|
||||
GROUP BY
|
||||
sup.[Supplier Key],
|
||||
sup.[Supplier],
|
||||
sup.[Category]
|
||||
ORDER BY total_orders DESC;
|
||||
""",
|
||||
]
|
||||
|
||||
# Single stock item detail for what-if scenario computation
|
||||
WWI_STOCK_ITEM_DETAIL = """
|
||||
SELECT
|
||||
si.[Stock Item Key] AS stock_item_key,
|
||||
si.[Stock Item] AS stock_item_name,
|
||||
si.[Unit Price] AS unit_price,
|
||||
si.[Lead Time Days] AS lead_time_days,
|
||||
COALESCE(SUM(CAST(m.[Quantity] AS FLOAT)), 0) AS current_stock
|
||||
FROM [Dimension].[Stock Item] AS si
|
||||
LEFT JOIN [Fact].[Movement] AS m
|
||||
ON m.[Stock Item Key] = si.[Stock Item Key]
|
||||
WHERE si.[Stock Item Key] = :stock_item_key
|
||||
GROUP BY
|
||||
si.[Stock Item Key],
|
||||
si.[Stock Item],
|
||||
si.[Unit Price],
|
||||
si.[Lead Time Days];
|
||||
"""
|
||||
|
||||
WWI_STOCK_ITEM_DEMAND = """
|
||||
SELECT
|
||||
SUM(CAST(s.[Quantity] AS FLOAT)) / NULLIF(90.0, 0) AS avg_daily_demand
|
||||
FROM [Fact].[Sale] AS s
|
||||
INNER JOIN [Dimension].[Date] AS d
|
||||
ON d.[Date Key] = s.[Delivery Date Key]
|
||||
WHERE s.[Stock Item Key] = :stock_item_key
|
||||
AND d.[Date] >= DATEADD(day, -90, GETDATE());
|
||||
"""
|
||||
100
backend/app/main.py
Normal file
100
backend/app/main.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import httpx
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.requests import Request as StarletteRequest
|
||||
from starlette.responses import Response as StarletteResponse
|
||||
|
||||
from app.core.audit import SharedBase
|
||||
from app.core.config import settings
|
||||
from app.core.db import create_postgres_engine, create_session_factory
|
||||
from app.core.executor import get_executor, shutdown_executor
|
||||
from app.core.otel import configure_otel, instrument_fastapi, instrument_sqlalchemy, shutdown_otel
|
||||
from app.domain.aw.models import AWBase
|
||||
from app.domain.wwi.models import WWIBase
|
||||
from app.routers import aw, platform, wwi
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SecurityHeadersMiddleware(BaseHTTPMiddleware):
|
||||
async def dispatch(self, request: StarletteRequest, call_next) -> StarletteResponse:
|
||||
response = await call_next(request)
|
||||
response.headers["X-Content-Type-Options"] = "nosniff"
|
||||
response.headers["X-Frame-Options"] = "DENY"
|
||||
response.headers["Referrer-Policy"] = "strict-origin-when-cross-origin"
|
||||
return response
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# --- startup ---
|
||||
providers = configure_otel(settings)
|
||||
LOGGER.info("OTel configured for %s", settings.otel_service_name)
|
||||
|
||||
pg_engine = create_postgres_engine()
|
||||
instrument_sqlalchemy({"pg": pg_engine})
|
||||
|
||||
SharedBase.metadata.create_all(pg_engine)
|
||||
AWBase.metadata.create_all(pg_engine)
|
||||
WWIBase.metadata.create_all(pg_engine)
|
||||
|
||||
pg_factory = create_session_factory(pg_engine)
|
||||
|
||||
analytics_client = httpx.AsyncClient(
|
||||
base_url=settings.analytics_service_url,
|
||||
timeout=httpx.Timeout(60.0),
|
||||
)
|
||||
|
||||
executor = get_executor()
|
||||
|
||||
app.state.pg_engine = pg_engine
|
||||
app.state.pg_factory = pg_factory
|
||||
app.state.analytics_client = analytics_client
|
||||
|
||||
LOGGER.info("Ready: analytics_service=%s thread_pool_workers=%d",
|
||||
settings.analytics_service_url, executor._max_workers) # noqa: SLF001
|
||||
|
||||
instrument_fastapi(app)
|
||||
|
||||
yield
|
||||
|
||||
# --- shutdown ---
|
||||
LOGGER.info("Shutting down")
|
||||
await analytics_client.aclose()
|
||||
shutdown_executor()
|
||||
pg_engine.dispose()
|
||||
shutdown_otel(providers)
|
||||
|
||||
|
||||
def create_app() -> FastAPI:
|
||||
app = FastAPI(
|
||||
title="otel-bi-backend",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan,
|
||||
docs_url="/docs" if settings.app_env != "prod" else None,
|
||||
redoc_url=None,
|
||||
)
|
||||
|
||||
app.add_middleware(SecurityHeadersMiddleware)
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=settings.cors_origins_list,
|
||||
allow_credentials=True,
|
||||
allow_methods=["GET", "POST", "DELETE"],
|
||||
allow_headers=["Authorization", "Content-Type"],
|
||||
)
|
||||
|
||||
app.include_router(platform.router)
|
||||
app.include_router(aw.router)
|
||||
app.include_router(wwi.router)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
app = create_app()
|
||||
0
backend/app/routers/__init__.py
Normal file
0
backend/app/routers/__init__.py
Normal file
464
backend/app/routers/aw.py
Normal file
464
backend/app/routers/aw.py
Normal file
@@ -0,0 +1,464 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Literal
|
||||
|
||||
import httpx
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request, Response
|
||||
from opentelemetry import propagate, trace
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from app.core.audit import ExportRecord, append_audit, current_span_context
|
||||
from app.core.config import settings
|
||||
from app.core.executor import get_executor
|
||||
from app.core.export import to_pdf_bytes
|
||||
from app.core.security import FrontendPrincipal, require_frontend_principal
|
||||
from app.domain.aw import analytics
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
tracer = trace.get_tracer("otel-bi.routers.aw")
|
||||
|
||||
router = APIRouter(prefix="/api/aw", tags=["aw"])
|
||||
|
||||
_XLSX_MEDIA = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
||||
_PDF_MEDIA = "application/pdf"
|
||||
|
||||
|
||||
def _trace_headers() -> dict[str, str]:
|
||||
ctx = trace.get_current_span().get_span_context()
|
||||
if not ctx.is_valid:
|
||||
return {}
|
||||
return {"x-trace-id": f"{ctx.trace_id:032x}", "x-span-id": f"{ctx.span_id:016x}"}
|
||||
|
||||
|
||||
def _propagation_headers() -> dict[str, str]:
|
||||
headers: dict[str, str] = {}
|
||||
propagate.inject(headers)
|
||||
return headers
|
||||
|
||||
|
||||
async def _get(client: httpx.AsyncClient, path: str, params: dict | None = None) -> Any:
|
||||
try:
|
||||
r = await client.get(path, params=params, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
return r.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise HTTPException(status_code=502, detail=f"Analytics service error: {exc.response.status_code}")
|
||||
except httpx.RequestError as exc:
|
||||
raise HTTPException(status_code=503, detail=f"Analytics service unavailable: {exc}")
|
||||
|
||||
|
||||
async def _post(client: httpx.AsyncClient, path: str, json: dict) -> Any:
|
||||
try:
|
||||
r = await client.post(path, json=json, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
return r.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise HTTPException(
|
||||
status_code=502 if exc.response.status_code != 404 else 404,
|
||||
detail=f"Analytics service error: {exc.response.status_code}",
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise HTTPException(status_code=503, detail=f"Analytics service unavailable: {exc}")
|
||||
|
||||
|
||||
def _record_export(
|
||||
pg_factory: sessionmaker[Session],
|
||||
domain: str,
|
||||
source_view: str,
|
||||
fmt: str,
|
||||
filters: dict,
|
||||
row_count: int,
|
||||
file_size_bytes: int,
|
||||
actor_id: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
try:
|
||||
with pg_factory() as session:
|
||||
session.add(ExportRecord(
|
||||
domain=domain, service="otel-bi-backend", source_view=source_view,
|
||||
format=fmt, filters_applied=filters, row_count=row_count,
|
||||
file_size_bytes=file_size_bytes, actor_id=actor_id,
|
||||
trace_id=trace_id, span_id=span_id,
|
||||
))
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to record export metadata: %s", exc)
|
||||
append_audit(
|
||||
pg_factory,
|
||||
action="export.created", actor_type="user", actor_id=actor_id,
|
||||
domain=domain, service="otel-bi-backend", entity_type=source_view,
|
||||
payload={"format": fmt, "row_count": row_count, "file_size_bytes": file_size_bytes, **filters},
|
||||
)
|
||||
|
||||
|
||||
async def _proxy_xlsx(
|
||||
client: httpx.AsyncClient,
|
||||
go_path: str,
|
||||
params: dict,
|
||||
filename_stem: str,
|
||||
domain: str,
|
||||
source_view: str,
|
||||
filters: dict,
|
||||
actor_id: str,
|
||||
pg_factory: sessionmaker[Session],
|
||||
) -> Response:
|
||||
"""Fetch XLSX bytes from Go, write ExportRecord, return response."""
|
||||
try:
|
||||
r = await client.get(go_path, params=params, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise HTTPException(status_code=502, detail=f"Analytics service error: {exc.response.status_code}")
|
||||
except httpx.RequestError as exc:
|
||||
raise HTTPException(status_code=503, detail=f"Analytics service unavailable: {exc}")
|
||||
|
||||
content = r.content
|
||||
row_count = int(r.headers.get("X-Row-Count", "0"))
|
||||
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
||||
filename = f"{filename_stem}_{today}.xlsx"
|
||||
trace_id, span_id = current_span_context()
|
||||
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _record_export(pg_factory, domain, source_view, "xlsx", filters,
|
||||
row_count, len(content), actor_id, trace_id, span_id),
|
||||
)
|
||||
return Response(
|
||||
content=content, media_type=_XLSX_MEDIA,
|
||||
headers={"Content-Disposition": f'attachment; filename="{filename}"'},
|
||||
)
|
||||
|
||||
|
||||
def _make_pdf(
|
||||
data: list[dict],
|
||||
filename_stem: str,
|
||||
pdf_title: str,
|
||||
domain: str,
|
||||
source_view: str,
|
||||
filters: dict,
|
||||
actor_id: str,
|
||||
pg_factory: sessionmaker[Session],
|
||||
) -> Response:
|
||||
with tracer.start_as_current_span(f"export.{domain}.{source_view}") as span:
|
||||
span.set_attribute("export.format", "pdf")
|
||||
span.set_attribute("export.row_count", len(data))
|
||||
content = to_pdf_bytes(data, title=pdf_title)
|
||||
span.set_attribute("export.file_size_bytes", len(content))
|
||||
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
||||
filename = f"{filename_stem}_{today}.pdf"
|
||||
trace_id, span_id = current_span_context()
|
||||
_record_export(pg_factory, domain, source_view, "pdf", filters,
|
||||
len(data), len(content), actor_id, trace_id, span_id)
|
||||
return Response(
|
||||
content=content, media_type=_PDF_MEDIA,
|
||||
headers={"Content-Disposition": f'attachment; filename="{filename}"'},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sales
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/sales/kpis")
|
||||
async def aw_sales_kpis(
|
||||
response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
return await _get(request.app.state.analytics_client, "/aw/sales/kpis")
|
||||
|
||||
|
||||
@router.get("/sales/history")
|
||||
async def aw_sales_history(
|
||||
response: Response, request: Request,
|
||||
days_back: int = Query(default=settings.default_history_days, ge=30, le=1460),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
return await _get(request.app.state.analytics_client, "/aw/sales/history", {"days_back": days_back})
|
||||
|
||||
|
||||
@router.get("/sales/forecast")
|
||||
async def aw_sales_forecast(
|
||||
response: Response, request: Request,
|
||||
horizon_days: int = Query(default=settings.forecast_horizon_days, ge=7, le=180),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
data = await _get(client, "/aw/sales/forecast", {"horizon_days": horizon_days})
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: analytics.persist_forecast(pg_factory, data, horizon_days, "api.sales.forecast"),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Rep scores & product demand
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/reps/scores")
|
||||
async def aw_rep_scores(
|
||||
response: Response, request: Request,
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
data = await _get(client, "/aw/reps/scores", {"top_n": top_n})
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: analytics.persist_rep_scores(pg_factory, data, top_n, "api.reps.scores"),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
@router.get("/products/demand")
|
||||
async def aw_product_demand(
|
||||
response: Response, request: Request,
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
data = await _get(client, "/aw/products/demand", {"top_n": top_n})
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: analytics.persist_product_demand(pg_factory, data, top_n, "api.products.demand"),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Anomaly detection
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/anomalies")
|
||||
async def aw_anomalies(
|
||||
response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
data = await _get(client, "/aw/anomalies")
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: analytics.persist_anomaly_run(pg_factory, data, "api.aw.anomalies"),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stored records
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/records/forecasts")
|
||||
async def aw_records_forecasts(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.list_forecasts(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
@router.get("/records/rep-scores")
|
||||
async def aw_records_rep_scores(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.list_rep_scores(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
@router.get("/records/product-demand")
|
||||
async def aw_records_product_demand(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.list_product_demand(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Exports
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/export/sales-history")
|
||||
async def export_aw_sales_history(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
days_back: int = Query(default=settings.default_history_days, ge=30, le=1460),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
filters = {"days_back": days_back}
|
||||
|
||||
if format == "xlsx":
|
||||
return await _proxy_xlsx(client, "/aw/export/sales-history", filters,
|
||||
"aw_sales_history", "aw", "sales-history", filters, actor_id, pg_factory)
|
||||
|
||||
data = await _get(client, "/aw/sales/history", filters)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "aw_sales_history", "AdventureWorks — Sales History",
|
||||
"aw", "sales-history", filters, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/export/sales-forecast")
|
||||
async def export_aw_sales_forecast(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
horizon_days: int = Query(default=settings.forecast_horizon_days, ge=7, le=180),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
filters = {"horizon_days": horizon_days}
|
||||
|
||||
if format == "xlsx":
|
||||
return await _proxy_xlsx(client, "/aw/export/sales-forecast", filters,
|
||||
"aw_sales_forecast", "aw", "sales-forecast", filters, actor_id, pg_factory)
|
||||
|
||||
data = await _get(client, "/aw/sales/forecast", filters)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "aw_sales_forecast", "AdventureWorks — Sales Forecast",
|
||||
"aw", "sales-forecast", filters, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/export/rep-scores")
|
||||
async def export_aw_rep_scores(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
filters = {"top_n": top_n}
|
||||
|
||||
if format == "xlsx":
|
||||
return await _proxy_xlsx(client, "/aw/export/rep-scores", filters,
|
||||
"aw_rep_scores", "aw", "rep-scores", filters, actor_id, pg_factory)
|
||||
|
||||
data = await _get(client, "/aw/reps/scores", filters)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "aw_rep_scores", "AdventureWorks — Sales Rep Performance",
|
||||
"aw", "rep-scores", filters, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/export/product-demand")
|
||||
async def export_aw_product_demand(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
filters = {"top_n": top_n}
|
||||
|
||||
if format == "xlsx":
|
||||
return await _proxy_xlsx(client, "/aw/export/product-demand", filters,
|
||||
"aw_product_demand", "aw", "product-demand", filters, actor_id, pg_factory)
|
||||
|
||||
data = await _get(client, "/aw/products/demand", filters)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "aw_product_demand", "AdventureWorks — Product Demand Scores",
|
||||
"aw", "product-demand", filters, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Job triggers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/jobs/{job_name}/trigger")
|
||||
async def trigger_aw_job(
|
||||
job_name: str, response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
return await _post(request.app.state.analytics_client, f"/scheduler/aw/{job_name}/trigger", {})
|
||||
|
||||
|
||||
@router.get("/jobs")
|
||||
async def aw_job_history(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=50, ge=1, le=200),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: _list_jobs(pg_factory, "aw", limit)
|
||||
)
|
||||
|
||||
|
||||
def _list_jobs(pg_factory, domain: str, limit: int) -> list[dict]:
|
||||
from app.core.audit import JobExecution
|
||||
with pg_factory() as session:
|
||||
rows = (
|
||||
session.query(JobExecution)
|
||||
.filter_by(domain=domain)
|
||||
.order_by(JobExecution.started_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"job_name": r.job_name,
|
||||
"domain": r.domain,
|
||||
"status": r.status,
|
||||
"started_at": r.started_at.isoformat(),
|
||||
"completed_at": r.completed_at.isoformat() if r.completed_at else None,
|
||||
"duration_ms": r.duration_ms,
|
||||
"records_processed": r.records_processed,
|
||||
"error_message": r.error_message,
|
||||
"trace_id": r.trace_id,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
254
backend/app/routers/platform.py
Normal file
254
backend/app/routers/platform.py
Normal file
@@ -0,0 +1,254 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request, Response
|
||||
from opentelemetry import propagate, trace
|
||||
|
||||
from app.core.audit import AuditLog, ExportRecord, append_audit
|
||||
from app.core.config import settings
|
||||
from app.core.executor import get_executor
|
||||
from app.core.reports import save_report
|
||||
from app.core.security import FrontendPrincipal, require_frontend_principal
|
||||
from app.domain.wwi import analytics as wwi_analytics
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["platform"])
|
||||
|
||||
|
||||
def _trace_headers() -> dict[str, str]:
|
||||
ctx = trace.get_current_span().get_span_context()
|
||||
if not ctx.is_valid:
|
||||
return {}
|
||||
return {"x-trace-id": f"{ctx.trace_id:032x}", "x-span-id": f"{ctx.span_id:016x}"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# System
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/api/config")
|
||||
def frontend_config() -> dict:
|
||||
return {
|
||||
"oidc_enabled": settings.require_frontend_auth,
|
||||
"oidc_authority": settings.frontend_jwt_issuer_url,
|
||||
"oidc_client_id": settings.frontend_oidc_client_id,
|
||||
"oidc_scope": settings.frontend_oidc_scope,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/api/health")
|
||||
def health(response: Response) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
return {"status": "ok", "service": "otel-bi-backend"}
|
||||
|
||||
|
||||
@router.get("/api/telemetry/status")
|
||||
def telemetry_status(
|
||||
response: Response,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
return {
|
||||
"status": "instrumented",
|
||||
"service": "otel-bi-backend",
|
||||
"collector_endpoint": settings.otel_collector_endpoint,
|
||||
"subject": principal.subject,
|
||||
**_trace_headers(),
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cross-domain report generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _propagation_headers() -> dict[str, str]:
|
||||
headers: dict[str, str] = {}
|
||||
propagate.inject(headers)
|
||||
return headers
|
||||
|
||||
|
||||
@router.post("/api/reports/generate")
|
||||
async def generate_report(
|
||||
request: Request,
|
||||
response: Response,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
loop = asyncio.get_running_loop()
|
||||
executor = get_executor()
|
||||
|
||||
import httpx as _httpx
|
||||
|
||||
async def _fetch(path: str, params: dict | None = None):
|
||||
try:
|
||||
r = await client.get(path, params=params, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
return r.json()
|
||||
except (_httpx.HTTPStatusError, _httpx.RequestError):
|
||||
return {}
|
||||
|
||||
(
|
||||
aw_kpis, aw_history, aw_forecast,
|
||||
aw_reps, aw_products,
|
||||
wwi_kpis, wwi_stock, wwi_suppliers,
|
||||
) = await asyncio.gather(
|
||||
_fetch("/aw/sales/kpis"),
|
||||
_fetch("/aw/sales/history", {"days_back": settings.default_history_days}),
|
||||
_fetch("/aw/sales/forecast", {"horizon_days": settings.forecast_horizon_days}),
|
||||
_fetch("/aw/reps/scores", {"top_n": settings.ranking_default_top_n}),
|
||||
_fetch("/aw/products/demand", {"top_n": settings.ranking_default_top_n}),
|
||||
_fetch("/wwi/sales/kpis"),
|
||||
_fetch("/wwi/stock/recommendations"),
|
||||
_fetch("/wwi/suppliers/scores", {"top_n": settings.ranking_default_top_n}),
|
||||
)
|
||||
|
||||
wwi_events = await loop.run_in_executor(
|
||||
executor, lambda: wwi_analytics.get_business_events(pg_factory, 200)
|
||||
)
|
||||
|
||||
data = {
|
||||
"aw_sales_kpis": aw_kpis,
|
||||
"aw_sales_history": aw_history,
|
||||
"aw_sales_forecast": aw_forecast,
|
||||
"aw_rep_scores": aw_reps,
|
||||
"aw_product_demand": aw_products,
|
||||
"wwi_sales_kpis": wwi_kpis,
|
||||
"wwi_stock_recommendations": wwi_stock,
|
||||
"wwi_supplier_scores": wwi_suppliers,
|
||||
"wwi_business_events": wwi_events,
|
||||
}
|
||||
|
||||
report = await loop.run_in_executor(
|
||||
executor, lambda: save_report(data, settings.report_output_dir)
|
||||
)
|
||||
|
||||
append_audit(
|
||||
pg_factory,
|
||||
action="report.generated", actor_type="user", actor_id=actor_id,
|
||||
domain="platform", service="otel-bi-backend", entity_type="full_report",
|
||||
payload={
|
||||
"report_id": report["report_id"],
|
||||
"xlsx": report["xlsx"]["filename"],
|
||||
"pdf": report["pdf"]["filename"],
|
||||
},
|
||||
)
|
||||
|
||||
return {**report, "output_dir": settings.report_output_dir, **_trace_headers()}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Audit log
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/api/audit")
|
||||
async def audit_log(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=100, ge=1, le=500),
|
||||
domain: str | None = Query(default=None),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
def _query():
|
||||
with pg_factory() as session:
|
||||
q = session.query(AuditLog).order_by(AuditLog.occurred_at.desc())
|
||||
if domain:
|
||||
q = q.filter_by(domain=domain)
|
||||
rows = q.limit(limit).all()
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"occurred_at": r.occurred_at.isoformat(),
|
||||
"action": r.action,
|
||||
"status": r.status,
|
||||
"actor_type": r.actor_type,
|
||||
"actor_id": r.actor_id,
|
||||
"domain": r.domain,
|
||||
"service": r.service,
|
||||
"entity_type": r.entity_type,
|
||||
"trace_id": r.trace_id,
|
||||
"payload": r.payload,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
return await asyncio.get_running_loop().run_in_executor(get_executor(), _query)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Export history
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/api/exports")
|
||||
async def export_history(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=100, ge=1, le=500),
|
||||
domain: str | None = Query(default=None),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
def _query():
|
||||
with pg_factory() as session:
|
||||
q = session.query(ExportRecord).order_by(ExportRecord.created_at.desc())
|
||||
if domain:
|
||||
q = q.filter_by(domain=domain)
|
||||
rows = q.limit(limit).all()
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"exported_at": r.created_at.isoformat(),
|
||||
"domain": r.domain,
|
||||
"service": r.service,
|
||||
"source_view": r.source_view,
|
||||
"format": r.format,
|
||||
"filters_applied": r.filters_applied,
|
||||
"row_count": r.row_count,
|
||||
"file_size_bytes": r.file_size_bytes,
|
||||
"actor_id": r.actor_id,
|
||||
"trace_id": r.trace_id,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
return await asyncio.get_running_loop().run_in_executor(get_executor(), _query)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Job history (platform-level — both domains in one response)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/api/jobs/aw")
|
||||
async def jobs_aw(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=50, ge=1, le=200),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
from app.routers.aw import _list_jobs
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: _list_jobs(pg_factory, "aw", limit)
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/jobs/wwi")
|
||||
async def jobs_wwi(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=50, ge=1, le=200),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
from app.routers.wwi import _list_jobs
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: _list_jobs(pg_factory, "wwi", limit)
|
||||
)
|
||||
440
backend/app/routers/wwi.py
Normal file
440
backend/app/routers/wwi.py
Normal file
@@ -0,0 +1,440 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Literal
|
||||
|
||||
import httpx
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request, Response
|
||||
from opentelemetry import propagate, trace
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from app.core.audit import ExportRecord, append_audit, current_span_context
|
||||
from app.core.config import settings
|
||||
from app.core.executor import get_executor
|
||||
from app.core.export import to_pdf_bytes
|
||||
from app.core.security import FrontendPrincipal, require_frontend_principal
|
||||
from app.domain.wwi import analytics
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
tracer = trace.get_tracer("otel-bi.routers.wwi")
|
||||
|
||||
router = APIRouter(prefix="/api/wwi", tags=["wwi"])
|
||||
|
||||
_XLSX_MEDIA = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
||||
_PDF_MEDIA = "application/pdf"
|
||||
|
||||
|
||||
class WhatIfRequest(BaseModel):
|
||||
stock_item_key: int = Field(..., ge=1)
|
||||
demand_multiplier: float = Field(default=1.0, ge=0.1, le=5.0)
|
||||
|
||||
|
||||
def _trace_headers() -> dict[str, str]:
|
||||
ctx = trace.get_current_span().get_span_context()
|
||||
if not ctx.is_valid:
|
||||
return {}
|
||||
return {"x-trace-id": f"{ctx.trace_id:032x}", "x-span-id": f"{ctx.span_id:016x}"}
|
||||
|
||||
|
||||
def _propagation_headers() -> dict[str, str]:
|
||||
headers: dict[str, str] = {}
|
||||
propagate.inject(headers)
|
||||
return headers
|
||||
|
||||
|
||||
async def _get(client: httpx.AsyncClient, path: str, params: dict | None = None) -> Any:
|
||||
try:
|
||||
r = await client.get(path, params=params, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
return r.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise HTTPException(status_code=502, detail=f"Analytics service error: {exc.response.status_code}")
|
||||
except httpx.RequestError as exc:
|
||||
raise HTTPException(status_code=503, detail=f"Analytics service unavailable: {exc}")
|
||||
|
||||
|
||||
async def _post(client: httpx.AsyncClient, path: str, json: dict) -> Any:
|
||||
try:
|
||||
r = await client.post(path, json=json, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
return r.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise HTTPException(status_code=502 if exc.response.status_code != 404 else 404,
|
||||
detail=f"Analytics service error: {exc.response.status_code}")
|
||||
except httpx.RequestError as exc:
|
||||
raise HTTPException(status_code=503, detail=f"Analytics service unavailable: {exc}")
|
||||
|
||||
|
||||
def _record_export(
|
||||
pg_factory: sessionmaker[Session],
|
||||
domain: str,
|
||||
source_view: str,
|
||||
fmt: str,
|
||||
filters: dict,
|
||||
row_count: int,
|
||||
file_size_bytes: int,
|
||||
actor_id: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
try:
|
||||
with pg_factory() as session:
|
||||
session.add(ExportRecord(
|
||||
domain=domain, service="otel-bi-backend", source_view=source_view,
|
||||
format=fmt, filters_applied=filters, row_count=row_count,
|
||||
file_size_bytes=file_size_bytes, actor_id=actor_id,
|
||||
trace_id=trace_id, span_id=span_id,
|
||||
))
|
||||
session.commit()
|
||||
except Exception as exc: # noqa: BLE001
|
||||
LOGGER.warning("Failed to record export metadata: %s", exc)
|
||||
append_audit(
|
||||
pg_factory,
|
||||
action="export.created", actor_type="user", actor_id=actor_id,
|
||||
domain=domain, service="otel-bi-backend", entity_type=source_view,
|
||||
payload={"format": fmt, "row_count": row_count, "file_size_bytes": file_size_bytes, **filters},
|
||||
)
|
||||
|
||||
|
||||
async def _proxy_xlsx(
|
||||
client: httpx.AsyncClient,
|
||||
go_path: str,
|
||||
params: dict,
|
||||
filename_stem: str,
|
||||
domain: str,
|
||||
source_view: str,
|
||||
filters: dict,
|
||||
actor_id: str,
|
||||
pg_factory: sessionmaker[Session],
|
||||
) -> Response:
|
||||
"""Fetch XLSX bytes from Go, write ExportRecord, return response."""
|
||||
try:
|
||||
r = await client.get(go_path, params=params, headers=_propagation_headers())
|
||||
r.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise HTTPException(status_code=502, detail=f"Analytics service error: {exc.response.status_code}")
|
||||
except httpx.RequestError as exc:
|
||||
raise HTTPException(status_code=503, detail=f"Analytics service unavailable: {exc}")
|
||||
|
||||
content = r.content
|
||||
row_count = int(r.headers.get("X-Row-Count", "0"))
|
||||
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
||||
filename = f"{filename_stem}_{today}.xlsx"
|
||||
trace_id, span_id = current_span_context()
|
||||
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _record_export(pg_factory, domain, source_view, "xlsx", filters,
|
||||
row_count, len(content), actor_id, trace_id, span_id),
|
||||
)
|
||||
return Response(
|
||||
content=content, media_type=_XLSX_MEDIA,
|
||||
headers={"Content-Disposition": f'attachment; filename="{filename}"'},
|
||||
)
|
||||
|
||||
|
||||
def _make_pdf(
|
||||
data: list[dict],
|
||||
filename_stem: str,
|
||||
pdf_title: str,
|
||||
domain: str,
|
||||
source_view: str,
|
||||
filters: dict,
|
||||
actor_id: str,
|
||||
pg_factory: sessionmaker[Session],
|
||||
) -> Response:
|
||||
with tracer.start_as_current_span(f"export.{domain}.{source_view}") as span:
|
||||
span.set_attribute("export.format", "pdf")
|
||||
span.set_attribute("export.row_count", len(data))
|
||||
content = to_pdf_bytes(data, title=pdf_title)
|
||||
span.set_attribute("export.file_size_bytes", len(content))
|
||||
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
||||
filename = f"{filename_stem}_{today}.pdf"
|
||||
trace_id, span_id = current_span_context()
|
||||
_record_export(pg_factory, domain, source_view, "pdf", filters,
|
||||
len(data), len(content), actor_id, trace_id, span_id)
|
||||
return Response(
|
||||
content=content, media_type=_PDF_MEDIA,
|
||||
headers={"Content-Disposition": f'attachment; filename="{filename}"'},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KPIs
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/sales/kpis")
|
||||
async def wwi_sales_kpis(
|
||||
response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
return await _get(request.app.state.analytics_client, "/wwi/sales/kpis")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stock & reorder
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/stock/recommendations")
|
||||
async def wwi_reorder_recommendations(
|
||||
response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
data = await _get(client, "/wwi/stock/recommendations")
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: (
|
||||
analytics.generate_stock_events(pg_factory, data),
|
||||
analytics.persist_reorder_recommendations(pg_factory, data, "api.stock.recommendations"),
|
||||
),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Supplier scores
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/suppliers/scores")
|
||||
async def wwi_supplier_scores(
|
||||
response: Response, request: Request,
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
data = await _get(client, "/wwi/suppliers/scores", {"top_n": top_n})
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: analytics.persist_supplier_scores(pg_factory, data, top_n, "api.suppliers.scores"),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Business events
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/events")
|
||||
async def wwi_business_events(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=100, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.get_business_events(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# What-if scenarios
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/scenarios")
|
||||
async def wwi_create_scenario(
|
||||
body: WhatIfRequest, response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
|
||||
result = await _post(client, "/wwi/scenarios", {
|
||||
"stock_item_key": body.stock_item_key,
|
||||
"demand_multiplier": body.demand_multiplier,
|
||||
})
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
get_executor(),
|
||||
lambda: analytics.persist_whatif_scenario(pg_factory, result),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/scenarios")
|
||||
async def wwi_list_scenarios(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.list_whatif_scenarios(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stored records
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/records/reorder-recommendations")
|
||||
async def wwi_records_reorder(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.list_reorder_recommendations(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
@router.get("/records/supplier-scores")
|
||||
async def wwi_records_supplier_scores(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.list_supplier_scores(pg_factory, limit=limit)
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Exports
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.get("/export/stock-recommendations")
|
||||
async def export_wwi_stock_recommendations(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
|
||||
if format == "xlsx":
|
||||
return await _proxy_xlsx(client, "/wwi/export/stock-recommendations", {},
|
||||
"wwi_stock_recommendations", "wwi", "stock-recommendations",
|
||||
{}, actor_id, pg_factory)
|
||||
|
||||
data = await _get(client, "/wwi/stock/recommendations")
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "wwi_stock_recommendations",
|
||||
"WideWorldImporters — Stock Reorder Recommendations",
|
||||
"wwi", "stock-recommendations", {}, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/export/supplier-scores")
|
||||
async def export_wwi_supplier_scores(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
client = request.app.state.analytics_client
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
filters = {"top_n": top_n}
|
||||
|
||||
if format == "xlsx":
|
||||
return await _proxy_xlsx(client, "/wwi/export/supplier-scores", filters,
|
||||
"wwi_supplier_scores", "wwi", "supplier-scores",
|
||||
filters, actor_id, pg_factory)
|
||||
|
||||
data = await _get(client, "/wwi/suppliers/scores", filters)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "wwi_supplier_scores",
|
||||
"WideWorldImporters — Supplier Reliability Scores",
|
||||
"wwi", "supplier-scores", filters, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/export/business-events")
|
||||
async def export_wwi_business_events(
|
||||
request: Request,
|
||||
format: Literal["xlsx", "pdf"] = Query(default="xlsx"),
|
||||
limit: int = Query(default=100, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> Response:
|
||||
pg_factory = request.app.state.pg_factory
|
||||
actor_id = principal.subject
|
||||
filters = {"limit": limit}
|
||||
|
||||
data = await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: analytics.get_business_events(pg_factory, limit=limit)
|
||||
)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(),
|
||||
lambda: _make_pdf(data, "wwi_business_events",
|
||||
"WideWorldImporters — Business Events",
|
||||
"wwi", "business-events", filters, actor_id, pg_factory),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Job triggers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/jobs/{job_name}/trigger")
|
||||
async def trigger_wwi_job(
|
||||
job_name: str, response: Response, request: Request,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
response.headers.update(_trace_headers())
|
||||
return await _post(request.app.state.analytics_client, f"/scheduler/wwi/{job_name}/trigger", {})
|
||||
|
||||
|
||||
@router.get("/jobs")
|
||||
async def wwi_job_history(
|
||||
response: Response, request: Request,
|
||||
limit: int = Query(default=50, ge=1, le=200),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(_trace_headers())
|
||||
pg_factory = request.app.state.pg_factory
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
get_executor(), lambda: _list_jobs(pg_factory, "wwi", limit)
|
||||
)
|
||||
|
||||
|
||||
def _list_jobs(pg_factory, domain: str, limit: int) -> list[dict]:
|
||||
from app.core.audit import JobExecution
|
||||
with pg_factory() as session:
|
||||
rows = (
|
||||
session.query(JobExecution)
|
||||
.filter_by(domain=domain)
|
||||
.order_by(JobExecution.started_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"job_name": r.job_name,
|
||||
"domain": r.domain,
|
||||
"status": r.status,
|
||||
"started_at": r.started_at.isoformat(),
|
||||
"completed_at": r.completed_at.isoformat() if r.completed_at else None,
|
||||
"duration_ms": r.duration_ms,
|
||||
"records_processed": r.records_processed,
|
||||
"error_message": r.error_message,
|
||||
"trace_id": r.trace_id,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
@@ -1 +0,0 @@
|
||||
"""Business logic services."""
|
||||
@@ -1,373 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import date, timedelta
|
||||
from math import sqrt
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from opentelemetry import trace
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
from app.core.config import settings
|
||||
from app.services.persistence_service import PersistenceService
|
||||
from app.services.warehouse_service import ReadOnlyWarehouseClient
|
||||
|
||||
|
||||
@dataclass
|
||||
class DashboardSnapshot:
|
||||
kpis: dict
|
||||
history: list[dict]
|
||||
forecasts: list[dict]
|
||||
rankings: list[dict]
|
||||
recommendations: list[dict]
|
||||
|
||||
|
||||
class AnalyticsService:
|
||||
def __init__(
|
||||
self,
|
||||
warehouse_client: ReadOnlyWarehouseClient,
|
||||
persistence_service: PersistenceService | None = None,
|
||||
) -> None:
|
||||
self.warehouse_client = warehouse_client
|
||||
self.persistence_service = persistence_service
|
||||
self.tracer = trace.get_tracer(__name__)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_frame(df: pd.DataFrame, date_col: str = "sale_date") -> pd.DataFrame:
|
||||
normalized = df.copy()
|
||||
normalized[date_col] = pd.to_datetime(normalized[date_col], errors="coerce")
|
||||
for numeric in ("revenue", "cost", "quantity", "orders"):
|
||||
if numeric in normalized.columns:
|
||||
normalized[numeric] = pd.to_numeric(
|
||||
normalized[numeric], errors="coerce"
|
||||
).fillna(0.0)
|
||||
return normalized.dropna(subset=[date_col])
|
||||
|
||||
def load_sales_history(self, days_back: int | None = None) -> pd.DataFrame:
|
||||
with self.tracer.start_as_current_span("analytics.load_sales_history"):
|
||||
daily_sales = self._normalize_frame(
|
||||
self.warehouse_client.fetch_daily_sales()
|
||||
)
|
||||
days = days_back or settings.default_history_days
|
||||
min_date = pd.Timestamp(date.today() - timedelta(days=days))
|
||||
filtered = daily_sales[daily_sales["sale_date"] >= min_date]
|
||||
return (
|
||||
filtered.groupby("sale_date", as_index=False)[
|
||||
["revenue", "cost", "quantity", "orders"]
|
||||
]
|
||||
.sum()
|
||||
.sort_values("sale_date")
|
||||
)
|
||||
|
||||
def get_kpis(self) -> dict:
|
||||
with self.tracer.start_as_current_span("analytics.kpis"):
|
||||
sales = self.load_sales_history(days_back=180)
|
||||
if sales.empty:
|
||||
return {
|
||||
"total_revenue": 0.0,
|
||||
"gross_margin_pct": 0.0,
|
||||
"total_quantity": 0.0,
|
||||
"avg_order_value": 0.0,
|
||||
"records_in_window": 0,
|
||||
}
|
||||
|
||||
total_revenue = float(sales["revenue"].sum())
|
||||
total_cost = float(sales["cost"].sum())
|
||||
total_orders = max(float(sales["orders"].sum()), 1.0)
|
||||
margin_pct = (
|
||||
((total_revenue - total_cost) / total_revenue * 100)
|
||||
if total_revenue
|
||||
else 0.0
|
||||
)
|
||||
return {
|
||||
"total_revenue": round(total_revenue, 2),
|
||||
"gross_margin_pct": round(margin_pct, 2),
|
||||
"total_quantity": round(float(sales["quantity"].sum()), 2),
|
||||
"avg_order_value": round(total_revenue / total_orders, 2),
|
||||
"records_in_window": int(sales.shape[0]),
|
||||
}
|
||||
|
||||
def get_history_points(self, days_back: int | None = None) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("analytics.history_points"):
|
||||
sales = self.load_sales_history(days_back=days_back)
|
||||
if sales.empty:
|
||||
return []
|
||||
return [
|
||||
{
|
||||
"date": pd.Timestamp(row["sale_date"]).date().isoformat(),
|
||||
"revenue": round(float(row["revenue"]), 2),
|
||||
"cost": round(float(row["cost"]), 2),
|
||||
"quantity": round(float(row["quantity"]), 2),
|
||||
}
|
||||
for _, row in sales.iterrows()
|
||||
]
|
||||
|
||||
def get_forecast(
|
||||
self,
|
||||
horizon_days: int | None = None,
|
||||
*,
|
||||
trigger_source: str = "api.forecasts",
|
||||
persist: bool = True,
|
||||
) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("analytics.forecast"):
|
||||
horizon = horizon_days or settings.forecast_horizon_days
|
||||
sales = self.load_sales_history(days_back=720)
|
||||
if sales.empty:
|
||||
return []
|
||||
|
||||
series = (
|
||||
sales.set_index("sale_date")["revenue"]
|
||||
.sort_index()
|
||||
.resample("D")
|
||||
.sum()
|
||||
.fillna(0.0)
|
||||
)
|
||||
y = series.values
|
||||
x = np.arange(len(y), dtype=float).reshape(-1, 1)
|
||||
model = LinearRegression()
|
||||
model.fit(x, y)
|
||||
baseline = model.predict(x)
|
||||
residual = y - baseline
|
||||
sigma = float(np.std(residual)) if len(residual) > 1 else 0.0
|
||||
|
||||
weekday_baseline = series.groupby(series.index.weekday).mean()
|
||||
overall_mean = float(series.mean()) if len(series) else 0.0
|
||||
weekday_factor = (
|
||||
weekday_baseline / overall_mean
|
||||
if overall_mean > 0
|
||||
else pd.Series([1.0] * 7, index=range(7))
|
||||
)
|
||||
weekday_factor = weekday_factor.replace([np.inf, -np.inf], 1.0).fillna(1.0)
|
||||
|
||||
future_x = np.arange(len(y), len(y) + horizon, dtype=float).reshape(-1, 1)
|
||||
raw_forecast = model.predict(future_x)
|
||||
|
||||
predictions: list[dict] = []
|
||||
start_date = series.index.max().date()
|
||||
for idx, point in enumerate(raw_forecast, start=1):
|
||||
day = start_date + timedelta(days=idx)
|
||||
factor = (
|
||||
float(weekday_factor.loc[day.weekday()])
|
||||
if day.weekday() in weekday_factor.index
|
||||
else 1.0
|
||||
)
|
||||
yhat = max(float(point) * factor, 0.0)
|
||||
ci = 1.96 * sigma * sqrt(1 + idx / max(len(y), 1))
|
||||
predictions.append(
|
||||
{
|
||||
"date": day.isoformat(),
|
||||
"predicted_revenue": round(yhat, 2),
|
||||
"lower_bound": round(max(yhat - ci, 0.0), 2),
|
||||
"upper_bound": round(yhat + ci, 2),
|
||||
}
|
||||
)
|
||||
|
||||
if persist and self.persistence_service is not None:
|
||||
span_context = trace.get_current_span().get_span_context()
|
||||
trace_id = (
|
||||
f"{span_context.trace_id:032x}" if span_context.is_valid else None
|
||||
)
|
||||
span_id = (
|
||||
f"{span_context.span_id:016x}" if span_context.is_valid else None
|
||||
)
|
||||
self.persistence_service.record_forecast_run(
|
||||
horizon_days=horizon,
|
||||
payload=predictions,
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
)
|
||||
|
||||
return predictions
|
||||
|
||||
def get_rankings(
|
||||
self,
|
||||
top_n: int | None = None,
|
||||
*,
|
||||
trigger_source: str = "api.rankings",
|
||||
persist: bool = True,
|
||||
) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("analytics.rankings"):
|
||||
n = top_n or settings.ranking_default_top_n
|
||||
products = self.warehouse_client.fetch_product_performance().copy()
|
||||
if products.empty:
|
||||
return []
|
||||
|
||||
products["revenue"] = pd.to_numeric(
|
||||
products["revenue"], errors="coerce"
|
||||
).fillna(0.0)
|
||||
products["cost"] = pd.to_numeric(products["cost"], errors="coerce").fillna(
|
||||
0.0
|
||||
)
|
||||
products["quantity"] = pd.to_numeric(
|
||||
products["quantity"], errors="coerce"
|
||||
).fillna(0.0)
|
||||
products["orders"] = pd.to_numeric(
|
||||
products["orders"], errors="coerce"
|
||||
).fillna(0.0)
|
||||
|
||||
grouped = (
|
||||
products.groupby(
|
||||
["product_id", "product_name", "category_name"], as_index=False
|
||||
)[["revenue", "cost", "quantity", "orders"]]
|
||||
.sum()
|
||||
.sort_values("revenue", ascending=False)
|
||||
)
|
||||
|
||||
grouped["margin_pct"] = np.where(
|
||||
grouped["revenue"] > 0,
|
||||
((grouped["revenue"] - grouped["cost"]) / grouped["revenue"]) * 100,
|
||||
0.0,
|
||||
)
|
||||
|
||||
revenue_norm = grouped["revenue"] / max(
|
||||
float(grouped["revenue"].max()), 1.0
|
||||
)
|
||||
margin_norm = (grouped["margin_pct"] + 100) / 200
|
||||
velocity_norm = grouped["quantity"] / max(
|
||||
float(grouped["quantity"].max()), 1.0
|
||||
)
|
||||
grouped["score"] = (
|
||||
(0.55 * revenue_norm)
|
||||
+ (0.30 * margin_norm.clip(0, 1))
|
||||
+ (0.15 * velocity_norm)
|
||||
)
|
||||
ranked = (
|
||||
grouped.sort_values("score", ascending=False)
|
||||
.head(n)
|
||||
.reset_index(drop=True)
|
||||
)
|
||||
|
||||
result = [
|
||||
{
|
||||
"rank": int(idx + 1),
|
||||
"product_id": str(row["product_id"]),
|
||||
"product_name": str(row["product_name"]),
|
||||
"category": str(row["category_name"]),
|
||||
"revenue": round(float(row["revenue"]), 2),
|
||||
"margin_pct": round(float(row["margin_pct"]), 2),
|
||||
"score": round(float(row["score"]) * 100, 2),
|
||||
}
|
||||
for idx, row in ranked.iterrows()
|
||||
]
|
||||
|
||||
if persist and self.persistence_service is not None:
|
||||
span_context = trace.get_current_span().get_span_context()
|
||||
trace_id = (
|
||||
f"{span_context.trace_id:032x}" if span_context.is_valid else None
|
||||
)
|
||||
span_id = (
|
||||
f"{span_context.span_id:016x}" if span_context.is_valid else None
|
||||
)
|
||||
self.persistence_service.record_ranking_run(
|
||||
top_n=n,
|
||||
payload=result,
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def get_recommendations(
|
||||
self,
|
||||
rankings: list[dict] | None = None,
|
||||
*,
|
||||
trigger_source: str = "api.recommendations",
|
||||
persist: bool = True,
|
||||
) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("analytics.recommendations"):
|
||||
ranking_rows = (
|
||||
rankings
|
||||
if rankings is not None
|
||||
else self.get_rankings(
|
||||
top_n=20, trigger_source=trigger_source, persist=persist
|
||||
)
|
||||
)
|
||||
customers = self.warehouse_client.fetch_customer_performance().copy()
|
||||
if customers.empty:
|
||||
customers = pd.DataFrame(columns=["customer_name", "revenue", "orders"])
|
||||
|
||||
recommendations: list[dict] = []
|
||||
|
||||
if ranking_rows:
|
||||
champion = ranking_rows[0]
|
||||
recommendations.append(
|
||||
{
|
||||
"title": "Double down on champion SKU",
|
||||
"priority": "high",
|
||||
"summary": (
|
||||
f"Promote '{champion['product_name']}' with score {champion['score']:.2f} "
|
||||
f"and margin {champion['margin_pct']:.2f}%."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
low_margin = next(
|
||||
(row for row in ranking_rows if row["margin_pct"] < 10), None
|
||||
)
|
||||
if low_margin:
|
||||
recommendations.append(
|
||||
{
|
||||
"title": "Review pricing for low-margin bestseller",
|
||||
"priority": "medium",
|
||||
"summary": (
|
||||
f"'{low_margin['product_name']}' has strong rank but only "
|
||||
f"{low_margin['margin_pct']:.2f}% margin."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
if not customers.empty:
|
||||
customers["revenue"] = pd.to_numeric(
|
||||
customers["revenue"], errors="coerce"
|
||||
).fillna(0.0)
|
||||
customers["orders"] = pd.to_numeric(
|
||||
customers["orders"], errors="coerce"
|
||||
).fillna(0.0)
|
||||
customer = customers.sort_values("revenue", ascending=False).iloc[0]
|
||||
recommendations.append(
|
||||
{
|
||||
"title": "Protect top customer relationship",
|
||||
"priority": "high",
|
||||
"summary": (
|
||||
f"Prioritize retention for '{customer['customer_name']}' with "
|
||||
f"{float(customer['orders']):.0f} orders and {float(customer['revenue']):.2f} revenue."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
result = recommendations[:5]
|
||||
if persist and self.persistence_service is not None:
|
||||
span_context = trace.get_current_span().get_span_context()
|
||||
trace_id = (
|
||||
f"{span_context.trace_id:032x}" if span_context.is_valid else None
|
||||
)
|
||||
span_id = (
|
||||
f"{span_context.span_id:016x}" if span_context.is_valid else None
|
||||
)
|
||||
self.persistence_service.record_recommendation_run(
|
||||
payload=result,
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
)
|
||||
return result
|
||||
|
||||
def get_dashboard(self) -> DashboardSnapshot:
|
||||
with self.tracer.start_as_current_span("analytics.dashboard"):
|
||||
rankings = self.get_rankings(trigger_source="api.dashboard", persist=True)
|
||||
return DashboardSnapshot(
|
||||
kpis=self.get_kpis(),
|
||||
history=self.get_history_points(),
|
||||
forecasts=self.get_forecast(
|
||||
trigger_source="api.dashboard", persist=True
|
||||
),
|
||||
rankings=rankings,
|
||||
recommendations=self.get_recommendations(
|
||||
rankings=rankings,
|
||||
trigger_source="api.dashboard",
|
||||
persist=True,
|
||||
),
|
||||
)
|
||||
@@ -1,281 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from time import perf_counter
|
||||
|
||||
from opentelemetry import metrics, trace
|
||||
from sqlalchemy import desc, select
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
from app.db.postgres_models import AuditLog, ForecastRun, RankingRun, RecommendationRun
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PersistenceService:
|
||||
def __init__(self, session_factory: sessionmaker[Session]) -> None:
|
||||
self.session_factory = session_factory
|
||||
self.tracer = trace.get_tracer(__name__)
|
||||
self.meter = metrics.get_meter(__name__)
|
||||
self.write_counter = self.meter.create_counter(
|
||||
name="postgres_persist_writes_total",
|
||||
description="Total writes to app persistence PostgreSQL",
|
||||
)
|
||||
self.write_latency = self.meter.create_histogram(
|
||||
name="postgres_persist_write_latency_ms",
|
||||
unit="ms",
|
||||
description="Latency of app persistence write operations",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _to_audit_dict(row: AuditLog) -> dict:
|
||||
return {
|
||||
"id": row.id,
|
||||
"created_at": row.created_at.isoformat(),
|
||||
"method": row.method,
|
||||
"path": row.path,
|
||||
"query_string": row.query_string,
|
||||
"status_code": row.status_code,
|
||||
"duration_ms": row.duration_ms,
|
||||
"trace_id": row.trace_id,
|
||||
"span_id": row.span_id,
|
||||
"client_ip": row.client_ip,
|
||||
"user_agent": row.user_agent,
|
||||
"details": row.details,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _to_forecast_dict(row: ForecastRun) -> dict:
|
||||
return {
|
||||
"id": row.id,
|
||||
"created_at": row.created_at.isoformat(),
|
||||
"horizon_days": row.horizon_days,
|
||||
"point_count": row.point_count,
|
||||
"trigger_source": row.trigger_source,
|
||||
"trace_id": row.trace_id,
|
||||
"span_id": row.span_id,
|
||||
"payload": row.payload,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _to_ranking_dict(row: RankingRun) -> dict:
|
||||
return {
|
||||
"id": row.id,
|
||||
"created_at": row.created_at.isoformat(),
|
||||
"top_n": row.top_n,
|
||||
"item_count": row.item_count,
|
||||
"trigger_source": row.trigger_source,
|
||||
"trace_id": row.trace_id,
|
||||
"span_id": row.span_id,
|
||||
"payload": row.payload,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _to_recommendation_dict(row: RecommendationRun) -> dict:
|
||||
return {
|
||||
"id": row.id,
|
||||
"created_at": row.created_at.isoformat(),
|
||||
"item_count": row.item_count,
|
||||
"trigger_source": row.trigger_source,
|
||||
"trace_id": row.trace_id,
|
||||
"span_id": row.span_id,
|
||||
"payload": row.payload,
|
||||
}
|
||||
|
||||
def record_audit_log(
|
||||
self,
|
||||
*,
|
||||
method: str,
|
||||
path: str,
|
||||
query_string: str,
|
||||
status_code: int,
|
||||
duration_ms: float,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
client_ip: str | None,
|
||||
user_agent: str | None,
|
||||
details: dict | None = None,
|
||||
) -> None:
|
||||
started = perf_counter()
|
||||
with self.tracer.start_as_current_span("persist.audit_log"):
|
||||
try:
|
||||
with self.session_factory() as session:
|
||||
session.add(
|
||||
AuditLog(
|
||||
method=method,
|
||||
path=path,
|
||||
query_string=query_string[:1000],
|
||||
status_code=status_code,
|
||||
duration_ms=duration_ms,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
details=details or {},
|
||||
)
|
||||
)
|
||||
session.commit()
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "audit", "status": "ok"}
|
||||
)
|
||||
except SQLAlchemyError as exc:
|
||||
LOGGER.exception("Failed to persist audit log: %s", exc)
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "audit", "status": "error"}
|
||||
)
|
||||
finally:
|
||||
self.write_latency.record(
|
||||
(perf_counter() - started) * 1000,
|
||||
attributes={"entity": "audit"},
|
||||
)
|
||||
|
||||
def record_forecast_run(
|
||||
self,
|
||||
*,
|
||||
horizon_days: int,
|
||||
payload: list[dict],
|
||||
trigger_source: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
started = perf_counter()
|
||||
with self.tracer.start_as_current_span("persist.forecast_run"):
|
||||
try:
|
||||
with self.session_factory() as session:
|
||||
session.add(
|
||||
ForecastRun(
|
||||
horizon_days=horizon_days,
|
||||
point_count=len(payload),
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=payload,
|
||||
)
|
||||
)
|
||||
session.commit()
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "forecast", "status": "ok"}
|
||||
)
|
||||
except SQLAlchemyError as exc:
|
||||
LOGGER.exception("Failed to persist forecast run: %s", exc)
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "forecast", "status": "error"}
|
||||
)
|
||||
finally:
|
||||
self.write_latency.record(
|
||||
(perf_counter() - started) * 1000,
|
||||
attributes={"entity": "forecast"},
|
||||
)
|
||||
|
||||
def record_ranking_run(
|
||||
self,
|
||||
*,
|
||||
top_n: int,
|
||||
payload: list[dict],
|
||||
trigger_source: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
started = perf_counter()
|
||||
with self.tracer.start_as_current_span("persist.ranking_run"):
|
||||
try:
|
||||
with self.session_factory() as session:
|
||||
session.add(
|
||||
RankingRun(
|
||||
top_n=top_n,
|
||||
item_count=len(payload),
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=payload,
|
||||
)
|
||||
)
|
||||
session.commit()
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "ranking", "status": "ok"}
|
||||
)
|
||||
except SQLAlchemyError as exc:
|
||||
LOGGER.exception("Failed to persist ranking run: %s", exc)
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "ranking", "status": "error"}
|
||||
)
|
||||
finally:
|
||||
self.write_latency.record(
|
||||
(perf_counter() - started) * 1000,
|
||||
attributes={"entity": "ranking"},
|
||||
)
|
||||
|
||||
def record_recommendation_run(
|
||||
self,
|
||||
*,
|
||||
payload: list[dict],
|
||||
trigger_source: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
started = perf_counter()
|
||||
with self.tracer.start_as_current_span("persist.recommendation_run"):
|
||||
try:
|
||||
with self.session_factory() as session:
|
||||
session.add(
|
||||
RecommendationRun(
|
||||
item_count=len(payload),
|
||||
trigger_source=trigger_source,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
payload=payload,
|
||||
)
|
||||
)
|
||||
session.commit()
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "recommendation", "status": "ok"}
|
||||
)
|
||||
except SQLAlchemyError as exc:
|
||||
LOGGER.exception("Failed to persist recommendation run: %s", exc)
|
||||
self.write_counter.add(
|
||||
1, attributes={"entity": "recommendation", "status": "error"}
|
||||
)
|
||||
finally:
|
||||
self.write_latency.record(
|
||||
(perf_counter() - started) * 1000,
|
||||
attributes={"entity": "recommendation"},
|
||||
)
|
||||
|
||||
def list_audit_logs(self, limit: int) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("persist.list_audit_logs"):
|
||||
with self.session_factory() as session:
|
||||
rows = session.execute(
|
||||
select(AuditLog).order_by(desc(AuditLog.created_at)).limit(limit)
|
||||
).scalars()
|
||||
return [self._to_audit_dict(row) for row in rows]
|
||||
|
||||
def list_forecast_runs(self, limit: int) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("persist.list_forecast_runs"):
|
||||
with self.session_factory() as session:
|
||||
rows = session.execute(
|
||||
select(ForecastRun)
|
||||
.order_by(desc(ForecastRun.created_at))
|
||||
.limit(limit)
|
||||
).scalars()
|
||||
return [self._to_forecast_dict(row) for row in rows]
|
||||
|
||||
def list_ranking_runs(self, limit: int) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("persist.list_ranking_runs"):
|
||||
with self.session_factory() as session:
|
||||
rows = session.execute(
|
||||
select(RankingRun)
|
||||
.order_by(desc(RankingRun.created_at))
|
||||
.limit(limit)
|
||||
).scalars()
|
||||
return [self._to_ranking_dict(row) for row in rows]
|
||||
|
||||
def list_recommendation_runs(self, limit: int) -> list[dict]:
|
||||
with self.tracer.start_as_current_span("persist.list_recommendation_runs"):
|
||||
with self.session_factory() as session:
|
||||
rows = session.execute(
|
||||
select(RecommendationRun)
|
||||
.order_by(desc(RecommendationRun.created_at))
|
||||
.limit(limit)
|
||||
).scalars()
|
||||
return [self._to_recommendation_dict(row) for row in rows]
|
||||
@@ -1,101 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
from collections.abc import Sequence
|
||||
from time import perf_counter
|
||||
|
||||
import pandas as pd
|
||||
from opentelemetry import metrics, trace
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from app.db import queries
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReadOnlyWarehouseClient:
|
||||
def __init__(self, engines: dict[str, Engine]) -> None:
|
||||
self.engines = engines
|
||||
self.tracer = trace.get_tracer(__name__)
|
||||
self.meter = metrics.get_meter(__name__)
|
||||
self.query_counter = self.meter.create_counter(
|
||||
name="warehouse_queries_total",
|
||||
description="Total warehouse query executions",
|
||||
)
|
||||
self.query_latency = self.meter.create_histogram(
|
||||
name="warehouse_query_latency_ms",
|
||||
unit="ms",
|
||||
description="Warehouse query latency",
|
||||
)
|
||||
|
||||
def _validate_read_only_query(self, sql: str) -> None:
|
||||
normalized = sql.strip().lower()
|
||||
if not (normalized.startswith("select") or normalized.startswith("with")):
|
||||
raise ValueError("Only read-only SELECT/CTE SQL statements are allowed.")
|
||||
|
||||
def _run_query_list(
|
||||
self, source: str, sql_candidates: Sequence[str]
|
||||
) -> pd.DataFrame:
|
||||
engine = self.engines[source]
|
||||
last_error: Exception | None = None
|
||||
|
||||
for candidate in sql_candidates:
|
||||
self._validate_read_only_query(candidate)
|
||||
query_hash = hashlib.sha256(candidate.encode("utf-8")).hexdigest()[:12]
|
||||
with self.tracer.start_as_current_span("warehouse.query") as span:
|
||||
span.set_attribute("db.system", "mssql")
|
||||
span.set_attribute("db.source", source)
|
||||
span.set_attribute("db.query.hash", query_hash)
|
||||
started = perf_counter()
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
with self.tracer.start_as_current_span(
|
||||
"warehouse.query.execute"
|
||||
):
|
||||
df = pd.read_sql_query(sql=text(candidate), con=conn)
|
||||
elapsed_ms = (perf_counter() - started) * 1000
|
||||
self.query_latency.record(elapsed_ms, attributes={"source": source})
|
||||
self.query_counter.add(
|
||||
1, attributes={"source": source, "status": "ok"}
|
||||
)
|
||||
return df
|
||||
except SQLAlchemyError as exc:
|
||||
last_error = exc
|
||||
elapsed_ms = (perf_counter() - started) * 1000
|
||||
self.query_latency.record(elapsed_ms, attributes={"source": source})
|
||||
self.query_counter.add(
|
||||
1, attributes={"source": source, "status": "error"}
|
||||
)
|
||||
LOGGER.warning(
|
||||
"Query failed for %s with hash %s: %s", source, query_hash, exc
|
||||
)
|
||||
|
||||
if last_error is not None:
|
||||
raise RuntimeError(
|
||||
f"All query candidates failed for source '{source}'."
|
||||
) from last_error
|
||||
return pd.DataFrame()
|
||||
|
||||
def fetch_daily_sales(self) -> pd.DataFrame:
|
||||
aw = self._run_query_list("aw", queries.AW_DAILY_SALES_QUERIES)
|
||||
aw["source"] = "AdventureWorks2022DWH"
|
||||
wwi = self._run_query_list("wwi", queries.WWI_DAILY_SALES_QUERIES)
|
||||
wwi["source"] = "WorldWideImporters"
|
||||
return pd.concat([aw, wwi], ignore_index=True)
|
||||
|
||||
def fetch_product_performance(self) -> pd.DataFrame:
|
||||
aw = self._run_query_list("aw", queries.AW_PRODUCT_PERFORMANCE_QUERIES)
|
||||
aw["source"] = "AdventureWorks2022DWH"
|
||||
wwi = self._run_query_list("wwi", queries.WWI_PRODUCT_PERFORMANCE_QUERIES)
|
||||
wwi["source"] = "WorldWideImporters"
|
||||
return pd.concat([aw, wwi], ignore_index=True)
|
||||
|
||||
def fetch_customer_performance(self) -> pd.DataFrame:
|
||||
aw = self._run_query_list("aw", queries.AW_CUSTOMER_QUERIES)
|
||||
aw["source"] = "AdventureWorks2022DWH"
|
||||
wwi = self._run_query_list("wwi", queries.WWI_CUSTOMER_QUERIES)
|
||||
wwi["source"] = "WorldWideImporters"
|
||||
return pd.concat([aw, wwi], ignore_index=True)
|
||||
@@ -1 +0,0 @@
|
||||
"""Microservices package for BI platform."""
|
||||
@@ -1 +0,0 @@
|
||||
"""Analytics and forecasting microservice."""
|
||||
@@ -1,260 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
from contextvars import ContextVar
|
||||
|
||||
import httpx
|
||||
import pandas as pd
|
||||
from fastapi import Depends, FastAPI, Query, Request, Response
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.otel import (
|
||||
TelemetryProviders,
|
||||
configure_otel,
|
||||
instrument_fastapi,
|
||||
instrument_httpx_clients,
|
||||
shutdown_otel,
|
||||
)
|
||||
from app.core.security import InternalPrincipal, require_internal_principal
|
||||
from app.services.analytics_service import AnalyticsService
|
||||
from microservices.common.http import current_trace_headers, with_internal_service_token
|
||||
|
||||
logging.basicConfig(level=settings.log_level)
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
FORWARD_HEADERS: ContextVar[dict[str, str]] = ContextVar("forward_headers", default={})
|
||||
|
||||
|
||||
class QueryWarehouseClient:
|
||||
def __init__(self, client: httpx.Client, query_service_url: str) -> None:
|
||||
self.client = client
|
||||
self.query_service_url = query_service_url.rstrip("/")
|
||||
|
||||
def _fetch(self, path: str) -> pd.DataFrame:
|
||||
response = self.client.get(
|
||||
f"{self.query_service_url}{path}",
|
||||
headers=FORWARD_HEADERS.get(),
|
||||
timeout=settings.request_timeout_seconds,
|
||||
)
|
||||
response.raise_for_status()
|
||||
return pd.DataFrame(response.json())
|
||||
|
||||
def fetch_daily_sales(self) -> pd.DataFrame:
|
||||
return self._fetch("/internal/daily-sales")
|
||||
|
||||
def fetch_product_performance(self) -> pd.DataFrame:
|
||||
return self._fetch("/internal/product-performance")
|
||||
|
||||
def fetch_customer_performance(self) -> pd.DataFrame:
|
||||
return self._fetch("/internal/customer-performance")
|
||||
|
||||
|
||||
class PersistenceProxy:
|
||||
def __init__(self, client: httpx.Client, persistence_service_url: str) -> None:
|
||||
self.client = client
|
||||
self.persistence_service_url = persistence_service_url.rstrip("/")
|
||||
|
||||
def _post(self, path: str, payload: dict) -> None:
|
||||
response = self.client.post(
|
||||
f"{self.persistence_service_url}{path}",
|
||||
headers=FORWARD_HEADERS.get(),
|
||||
json=payload,
|
||||
timeout=settings.request_timeout_seconds,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
def record_forecast_run(
|
||||
self,
|
||||
*,
|
||||
horizon_days: int,
|
||||
payload: list[dict],
|
||||
trigger_source: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
self._post(
|
||||
"/internal/forecast-runs",
|
||||
{
|
||||
"horizon_days": horizon_days,
|
||||
"payload": payload,
|
||||
"trigger_source": trigger_source,
|
||||
"trace_id": trace_id,
|
||||
"span_id": span_id,
|
||||
},
|
||||
)
|
||||
|
||||
def record_ranking_run(
|
||||
self,
|
||||
*,
|
||||
top_n: int,
|
||||
payload: list[dict],
|
||||
trigger_source: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
self._post(
|
||||
"/internal/ranking-runs",
|
||||
{
|
||||
"top_n": top_n,
|
||||
"payload": payload,
|
||||
"trigger_source": trigger_source,
|
||||
"trace_id": trace_id,
|
||||
"span_id": span_id,
|
||||
},
|
||||
)
|
||||
|
||||
def record_recommendation_run(
|
||||
self,
|
||||
*,
|
||||
payload: list[dict],
|
||||
trigger_source: str,
|
||||
trace_id: str | None,
|
||||
span_id: str | None,
|
||||
) -> None:
|
||||
self._post(
|
||||
"/internal/recommendation-runs",
|
||||
{
|
||||
"payload": payload,
|
||||
"trigger_source": trigger_source,
|
||||
"trace_id": trace_id,
|
||||
"span_id": span_id,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
telemetry: TelemetryProviders = configure_otel(settings)
|
||||
instrument_httpx_clients()
|
||||
|
||||
http_client = httpx.Client()
|
||||
warehouse_client = QueryWarehouseClient(http_client, settings.query_service_url)
|
||||
persistence_proxy = PersistenceProxy(http_client, settings.persistence_service_url)
|
||||
app.state.http_client = http_client
|
||||
app.state.analytics = AnalyticsService(warehouse_client, persistence_proxy)
|
||||
LOGGER.info("Analytics service ready")
|
||||
yield
|
||||
http_client.close()
|
||||
shutdown_otel(telemetry)
|
||||
|
||||
|
||||
app = FastAPI(title="analytics-service", version="0.1.0", lifespan=lifespan)
|
||||
instrument_fastapi(app)
|
||||
|
||||
|
||||
def _analytics() -> AnalyticsService:
|
||||
return app.state.analytics
|
||||
|
||||
|
||||
def _with_request_headers(request: Request):
|
||||
headers = current_trace_headers()
|
||||
incoming_internal = request.headers.get("x-internal-service-token")
|
||||
if incoming_internal:
|
||||
headers = with_internal_service_token(headers, incoming_internal)
|
||||
token = FORWARD_HEADERS.set(headers)
|
||||
return token
|
||||
|
||||
|
||||
@app.get("/internal/health")
|
||||
def health(request: Request, response: Response) -> dict:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
return {"status": "ok", "service": "analytics-service"}
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
|
||||
|
||||
@app.get("/internal/kpis")
|
||||
def kpis(
|
||||
request: Request,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> dict:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _analytics().get_kpis()
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
|
||||
|
||||
@app.get("/internal/history")
|
||||
def history(
|
||||
request: Request,
|
||||
response: Response,
|
||||
days_back: int = Query(default=settings.default_history_days, ge=30, le=1460),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _analytics().get_history_points(days_back=days_back)
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
|
||||
|
||||
@app.get("/internal/forecasts")
|
||||
def forecasts(
|
||||
request: Request,
|
||||
response: Response,
|
||||
days: int = Query(default=settings.forecast_horizon_days, ge=7, le=180),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _analytics().get_forecast(
|
||||
horizon_days=days, trigger_source="analytics.api.forecasts", persist=True
|
||||
)
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
|
||||
|
||||
@app.get("/internal/rankings")
|
||||
def rankings(
|
||||
request: Request,
|
||||
response: Response,
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _analytics().get_rankings(
|
||||
top_n=top_n, trigger_source="analytics.api.rankings", persist=True
|
||||
)
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
|
||||
|
||||
@app.get("/internal/recommendations")
|
||||
def recommendations(
|
||||
request: Request,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _analytics().get_recommendations(
|
||||
trigger_source="analytics.api.recommendations", persist=True
|
||||
)
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
|
||||
|
||||
@app.get("/internal/dashboard")
|
||||
def dashboard(
|
||||
request: Request,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> dict:
|
||||
token = _with_request_headers(request)
|
||||
try:
|
||||
response.headers.update(current_trace_headers())
|
||||
snapshot = _analytics().get_dashboard()
|
||||
return snapshot.__dict__
|
||||
finally:
|
||||
FORWARD_HEADERS.reset(token)
|
||||
@@ -1 +0,0 @@
|
||||
"""Public API gateway microservice."""
|
||||
@@ -1,326 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
from time import perf_counter
|
||||
|
||||
import httpx
|
||||
from fastapi import Depends, FastAPI, HTTPException, Query, Request, Response
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.otel import (
|
||||
TelemetryProviders,
|
||||
configure_otel,
|
||||
instrument_fastapi,
|
||||
instrument_httpx_clients,
|
||||
shutdown_otel,
|
||||
)
|
||||
from app.core.security import (
|
||||
FrontendPrincipal,
|
||||
get_internal_token_manager,
|
||||
require_frontend_principal,
|
||||
)
|
||||
from microservices.common.http import current_trace_headers, with_internal_service_token
|
||||
|
||||
logging.basicConfig(level=settings.log_level)
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _raise_upstream(exc: httpx.HTTPStatusError) -> None:
|
||||
detail = exc.response.text
|
||||
raise HTTPException(status_code=exc.response.status_code, detail=detail) from exc
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
telemetry: TelemetryProviders = configure_otel(settings)
|
||||
instrument_httpx_clients()
|
||||
app.state.http_client = httpx.Client()
|
||||
LOGGER.info("API gateway ready")
|
||||
yield
|
||||
app.state.http_client.close()
|
||||
shutdown_otel(telemetry)
|
||||
|
||||
|
||||
app = FastAPI(title="api-gateway-service", version="0.1.0", lifespan=lifespan)
|
||||
instrument_fastapi(app)
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=settings.cors_origins_list,
|
||||
allow_credentials=True,
|
||||
allow_methods=["GET", "POST"],
|
||||
allow_headers=["*"],
|
||||
expose_headers=["x-trace-id", "x-span-id"],
|
||||
)
|
||||
|
||||
|
||||
@app.middleware("http")
|
||||
async def security_headers(request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
response.headers["X-Content-Type-Options"] = "nosniff"
|
||||
response.headers["X-Frame-Options"] = "DENY"
|
||||
response.headers["Referrer-Policy"] = "no-referrer"
|
||||
response.headers["Permissions-Policy"] = "camera=(), microphone=(), geolocation=()"
|
||||
response.headers["X-Permitted-Cross-Domain-Policies"] = "none"
|
||||
response.headers["Strict-Transport-Security"] = (
|
||||
"max-age=31536000; includeSubDomains"
|
||||
)
|
||||
response.headers["Cache-Control"] = "no-store"
|
||||
response.headers["Pragma"] = "no-cache"
|
||||
return response
|
||||
|
||||
|
||||
def _client() -> httpx.Client:
|
||||
return app.state.http_client
|
||||
|
||||
|
||||
def _upstream_headers(principal: FrontendPrincipal) -> dict[str, str]:
|
||||
token = get_internal_token_manager().mint(
|
||||
subject=principal.subject,
|
||||
scopes=principal.scopes,
|
||||
source_service="api-gateway",
|
||||
)
|
||||
return with_internal_service_token(current_trace_headers(), token)
|
||||
|
||||
|
||||
def _get_json(url: str, principal: FrontendPrincipal) -> dict | list:
|
||||
try:
|
||||
response = _client().get(
|
||||
url,
|
||||
headers=_upstream_headers(principal),
|
||||
timeout=settings.request_timeout_seconds,
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
_raise_upstream(exc)
|
||||
|
||||
|
||||
def _audit_payload(
|
||||
request: Request, response: Response, started: float, principal: FrontendPrincipal
|
||||
) -> dict:
|
||||
headers = current_trace_headers()
|
||||
return {
|
||||
"method": request.method,
|
||||
"path": request.url.path,
|
||||
"query_string": request.url.query,
|
||||
"status_code": response.status_code,
|
||||
"duration_ms": (perf_counter() - started) * 1000,
|
||||
"trace_id": headers.get("x-trace-id"),
|
||||
"span_id": headers.get("x-span-id"),
|
||||
"client_ip": request.client.host if request.client else None,
|
||||
"user_agent": request.headers.get("user-agent"),
|
||||
"details": {
|
||||
"subject": principal.subject,
|
||||
"scopes": principal.scopes,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _persist_audit(
|
||||
request: Request, response: Response, started: float, principal: FrontendPrincipal
|
||||
) -> None:
|
||||
if not request.url.path.startswith("/api/"):
|
||||
return
|
||||
try:
|
||||
_client().post(
|
||||
f"{settings.persistence_service_url.rstrip('/')}/internal/audit-logs",
|
||||
headers=_upstream_headers(principal),
|
||||
json=_audit_payload(request, response, started, principal),
|
||||
timeout=settings.request_timeout_seconds,
|
||||
).raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
LOGGER.warning("Audit persistence failed: %s", exc)
|
||||
|
||||
|
||||
@app.get("/api/health")
|
||||
def health(response: Response) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
return {"status": "ok", "service": "api-gateway-service"}
|
||||
|
||||
|
||||
@app.get("/api/telemetry/status")
|
||||
def telemetry_status(
|
||||
request: Request,
|
||||
response: Response,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = {
|
||||
"status": "instrumented",
|
||||
"service_name": "api-gateway-service",
|
||||
"collector_endpoint": settings.otel_collector_endpoint,
|
||||
"trace_id": current_trace_headers().get("x-trace-id"),
|
||||
"span_id": current_trace_headers().get("x-span-id"),
|
||||
"trace_headers": ["traceparent", "tracestate", "baggage", "x-trace-id"],
|
||||
"subject": principal.subject,
|
||||
}
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload
|
||||
|
||||
|
||||
@app.get("/api/kpis")
|
||||
def kpis(
|
||||
request: Request,
|
||||
response: Response,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.analytics_service_url.rstrip('/')}/internal/kpis", principal
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/history")
|
||||
def history(
|
||||
request: Request,
|
||||
response: Response,
|
||||
days_back: int = Query(default=settings.default_history_days, ge=30, le=1460),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.analytics_service_url.rstrip('/')}/internal/history?days_back={days_back}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/forecasts")
|
||||
def forecasts(
|
||||
request: Request,
|
||||
response: Response,
|
||||
days: int = Query(default=settings.forecast_horizon_days, ge=7, le=180),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.analytics_service_url.rstrip('/')}/internal/forecasts?days={days}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/rankings")
|
||||
def rankings(
|
||||
request: Request,
|
||||
response: Response,
|
||||
top_n: int = Query(default=settings.ranking_default_top_n, ge=3, le=100),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.analytics_service_url.rstrip('/')}/internal/rankings?top_n={top_n}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/recommendations")
|
||||
def recommendations(
|
||||
request: Request,
|
||||
response: Response,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.analytics_service_url.rstrip('/')}/internal/recommendations",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/dashboard")
|
||||
def dashboard(
|
||||
request: Request,
|
||||
response: Response,
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> dict:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.analytics_service_url.rstrip('/')}/internal/dashboard", principal
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/storage/audit-logs")
|
||||
def storage_audit_logs(
|
||||
request: Request,
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.persistence_service_url.rstrip('/')}/internal/audit-logs?limit={limit}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/storage/forecasts")
|
||||
def storage_forecasts(
|
||||
request: Request,
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.persistence_service_url.rstrip('/')}/internal/forecast-runs?limit={limit}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/storage/rankings")
|
||||
def storage_rankings(
|
||||
request: Request,
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.persistence_service_url.rstrip('/')}/internal/ranking-runs?limit={limit}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
|
||||
|
||||
@app.get("/api/storage/recommendations")
|
||||
def storage_recommendations(
|
||||
request: Request,
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
principal: FrontendPrincipal = Depends(require_frontend_principal),
|
||||
) -> list[dict]:
|
||||
started = perf_counter()
|
||||
response.headers.update(current_trace_headers())
|
||||
payload = _get_json(
|
||||
f"{settings.persistence_service_url.rstrip('/')}/internal/recommendation-runs?limit={limit}",
|
||||
principal,
|
||||
)
|
||||
_persist_audit(request, response, started, principal)
|
||||
return payload # type: ignore[return-value]
|
||||
@@ -1 +0,0 @@
|
||||
"""Read-only MSSQL query microservice."""
|
||||
@@ -1,85 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import pandas as pd
|
||||
from fastapi import Depends, FastAPI, Response
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.otel import (
|
||||
TelemetryProviders,
|
||||
configure_otel,
|
||||
instrument_fastapi,
|
||||
instrument_sqlalchemy_engines,
|
||||
shutdown_otel,
|
||||
)
|
||||
from app.core.security import InternalPrincipal, require_internal_principal
|
||||
from app.db.engine import create_warehouse_engines, dispose_engines
|
||||
from app.services.warehouse_service import ReadOnlyWarehouseClient
|
||||
from microservices.common.http import current_trace_headers
|
||||
|
||||
logging.basicConfig(level=settings.log_level)
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _frame_to_rows(df: pd.DataFrame) -> list[dict]:
|
||||
rows: list[dict] = []
|
||||
for _, row in df.iterrows():
|
||||
payload: dict = {}
|
||||
for key, value in row.items():
|
||||
if hasattr(value, "isoformat"):
|
||||
payload[str(key)] = value.isoformat()
|
||||
else:
|
||||
payload[str(key)] = value
|
||||
rows.append(payload)
|
||||
return rows
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
telemetry: TelemetryProviders = configure_otel(settings)
|
||||
engines = create_warehouse_engines()
|
||||
instrument_sqlalchemy_engines(engines)
|
||||
app.state.query_client = ReadOnlyWarehouseClient(engines)
|
||||
LOGGER.info("BI query service ready with read-only MSSQL engines")
|
||||
yield
|
||||
dispose_engines(engines)
|
||||
shutdown_otel(telemetry)
|
||||
|
||||
|
||||
app = FastAPI(title="bi-query-service", version="0.1.0", lifespan=lifespan)
|
||||
instrument_fastapi(app)
|
||||
|
||||
|
||||
@app.get("/internal/health")
|
||||
def health(response: Response) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
return {"status": "ok", "service": "bi-query-service"}
|
||||
|
||||
|
||||
@app.get("/internal/daily-sales")
|
||||
def daily_sales(
|
||||
response: Response, _auth: InternalPrincipal = Depends(require_internal_principal)
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
client: ReadOnlyWarehouseClient = app.state.query_client
|
||||
return _frame_to_rows(client.fetch_daily_sales())
|
||||
|
||||
|
||||
@app.get("/internal/product-performance")
|
||||
def product_performance(
|
||||
response: Response, _auth: InternalPrincipal = Depends(require_internal_principal)
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
client: ReadOnlyWarehouseClient = app.state.query_client
|
||||
return _frame_to_rows(client.fetch_product_performance())
|
||||
|
||||
|
||||
@app.get("/internal/customer-performance")
|
||||
def customer_performance(
|
||||
response: Response, _auth: InternalPrincipal = Depends(require_internal_principal)
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
client: ReadOnlyWarehouseClient = app.state.query_client
|
||||
return _frame_to_rows(client.fetch_customer_performance())
|
||||
@@ -1 +0,0 @@
|
||||
"""Shared helpers for microservices."""
|
||||
@@ -1,19 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from opentelemetry import trace
|
||||
|
||||
|
||||
def current_trace_headers() -> dict[str, str]:
|
||||
span_context = trace.get_current_span().get_span_context()
|
||||
if not span_context.is_valid:
|
||||
return {}
|
||||
return {
|
||||
"x-trace-id": f"{span_context.trace_id:032x}",
|
||||
"x-span-id": f"{span_context.span_id:016x}",
|
||||
}
|
||||
|
||||
|
||||
def with_internal_service_token(headers: dict[str, str], token: str) -> dict[str, str]:
|
||||
merged = dict(headers)
|
||||
merged["x-internal-service-token"] = token
|
||||
return merged
|
||||
@@ -1 +0,0 @@
|
||||
"""PostgreSQL persistence microservice."""
|
||||
@@ -1,176 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import Depends, FastAPI, Query, Response
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.otel import (
|
||||
TelemetryProviders,
|
||||
configure_otel,
|
||||
instrument_fastapi,
|
||||
instrument_sqlalchemy_engines,
|
||||
shutdown_otel,
|
||||
)
|
||||
from app.core.security import InternalPrincipal, require_internal_principal
|
||||
from app.db.postgres import (
|
||||
create_postgres_engine,
|
||||
create_postgres_session_factory,
|
||||
initialize_postgres_schema,
|
||||
)
|
||||
from app.services.persistence_service import PersistenceService
|
||||
from microservices.common.http import current_trace_headers
|
||||
|
||||
logging.basicConfig(level=settings.log_level)
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuditLogIn(BaseModel):
|
||||
method: str
|
||||
path: str
|
||||
query_string: str = ""
|
||||
status_code: int
|
||||
duration_ms: float
|
||||
trace_id: str | None = None
|
||||
span_id: str | None = None
|
||||
client_ip: str | None = None
|
||||
user_agent: str | None = None
|
||||
details: dict = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ForecastRunIn(BaseModel):
|
||||
horizon_days: int
|
||||
payload: list[dict]
|
||||
trigger_source: str
|
||||
trace_id: str | None = None
|
||||
span_id: str | None = None
|
||||
|
||||
|
||||
class RankingRunIn(BaseModel):
|
||||
top_n: int
|
||||
payload: list[dict]
|
||||
trigger_source: str
|
||||
trace_id: str | None = None
|
||||
span_id: str | None = None
|
||||
|
||||
|
||||
class RecommendationRunIn(BaseModel):
|
||||
payload: list[dict]
|
||||
trigger_source: str
|
||||
trace_id: str | None = None
|
||||
span_id: str | None = None
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
telemetry: TelemetryProviders = configure_otel(settings)
|
||||
engine = create_postgres_engine()
|
||||
initialize_postgres_schema(engine)
|
||||
instrument_sqlalchemy_engines({"appdb": engine})
|
||||
app.state.persistence_service = PersistenceService(
|
||||
create_postgres_session_factory(engine)
|
||||
)
|
||||
LOGGER.info("Persistence service ready with PostgreSQL")
|
||||
yield
|
||||
engine.dispose()
|
||||
shutdown_otel(telemetry)
|
||||
|
||||
|
||||
app = FastAPI(title="persistence-service", version="0.1.0", lifespan=lifespan)
|
||||
instrument_fastapi(app)
|
||||
|
||||
|
||||
def _service() -> PersistenceService:
|
||||
return app.state.persistence_service
|
||||
|
||||
|
||||
@app.get("/internal/health")
|
||||
def health(response: Response) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
return {"status": "ok", "service": "persistence-service"}
|
||||
|
||||
|
||||
@app.post("/internal/audit-logs")
|
||||
def create_audit_log(
|
||||
payload: AuditLogIn,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
_service().record_audit_log(**payload.model_dump())
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.post("/internal/forecast-runs")
|
||||
def create_forecast_run(
|
||||
payload: ForecastRunIn,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
_service().record_forecast_run(**payload.model_dump())
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.post("/internal/ranking-runs")
|
||||
def create_ranking_run(
|
||||
payload: RankingRunIn,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
_service().record_ranking_run(**payload.model_dump())
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.post("/internal/recommendation-runs")
|
||||
def create_recommendation_run(
|
||||
payload: RecommendationRunIn,
|
||||
response: Response,
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> dict:
|
||||
response.headers.update(current_trace_headers())
|
||||
_service().record_recommendation_run(**payload.model_dump())
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.get("/internal/audit-logs")
|
||||
def list_audit_logs(
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _service().list_audit_logs(limit=limit)
|
||||
|
||||
|
||||
@app.get("/internal/forecast-runs")
|
||||
def list_forecast_runs(
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _service().list_forecast_runs(limit=limit)
|
||||
|
||||
|
||||
@app.get("/internal/ranking-runs")
|
||||
def list_ranking_runs(
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _service().list_ranking_runs(limit=limit)
|
||||
|
||||
|
||||
@app.get("/internal/recommendation-runs")
|
||||
def list_recommendation_runs(
|
||||
response: Response,
|
||||
limit: int = Query(default=settings.storage_default_limit, ge=1, le=500),
|
||||
_auth: InternalPrincipal = Depends(require_internal_principal),
|
||||
) -> list[dict]:
|
||||
response.headers.update(current_trace_headers())
|
||||
return _service().list_recommendation_runs(limit=limit)
|
||||
@@ -1,46 +1,43 @@
|
||||
[project]
|
||||
name = "otel-bi-backend"
|
||||
version = "0.1.0"
|
||||
description = "OpenTelemetry-instrumented BI and forecasting backend for MSSQL data warehouses"
|
||||
requires-python = ">=3.11"
|
||||
version = "0.2.0"
|
||||
description = "OpenTelemetry-instrumented BI microservices backend (AdventureWorks DW + WideWorldImporters DW)"
|
||||
requires-python = ">=3.14"
|
||||
license = "AGPL-3.0-or-later"
|
||||
authors = [{ name = "Domagoj Andrić" }]
|
||||
dependencies = [
|
||||
"fastapi>=0.116.0",
|
||||
"uvicorn[standard]>=0.35.0",
|
||||
"httpx>=0.28.0",
|
||||
"pydantic>=2.11.0",
|
||||
"pydantic-settings>=2.10.0",
|
||||
"python-dotenv>=1.1.0",
|
||||
"httpx>=0.28.0",
|
||||
"pyjwt[crypto]>=2.10.0",
|
||||
"sqlalchemy>=2.0.40",
|
||||
"pyodbc>=5.2.0",
|
||||
"psycopg[binary]>=3.2.0",
|
||||
"pandas>=2.3.0",
|
||||
"numpy>=2.3.0",
|
||||
"scikit-learn>=1.7.0",
|
||||
"openpyxl>=3.1.0",
|
||||
"reportlab>=4.2.0",
|
||||
"opentelemetry-api>=1.36.0",
|
||||
"opentelemetry-sdk>=1.36.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.36.0",
|
||||
"opentelemetry-instrumentation-fastapi>=0.57b0",
|
||||
"opentelemetry-instrumentation-httpx>=0.57b0",
|
||||
"opentelemetry-instrumentation-sqlalchemy>=0.57b0",
|
||||
"opentelemetry-instrumentation-logging>=0.57b0",
|
||||
"opentelemetry-instrumentation-system-metrics>=0.57b0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
# Explicitly list packages so hatchling never picks up test/build artifacts
|
||||
packages = ["app"]
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest>=8.4.0",
|
||||
]
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=68", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["app*", "microservices*"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
pythonpath = ["."]
|
||||
|
||||
@@ -1,79 +1,173 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date, timedelta
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from app.services.analytics_service import AnalyticsService
|
||||
import pytest
|
||||
|
||||
|
||||
class StubWarehouseClient:
|
||||
def fetch_daily_sales(self) -> pd.DataFrame:
|
||||
today = date.today()
|
||||
rows = []
|
||||
for i in range(120):
|
||||
day = today - timedelta(days=120 - i)
|
||||
rows.append(
|
||||
{
|
||||
"sale_date": day.isoformat(),
|
||||
"revenue": 1000 + (i * 5),
|
||||
"cost": 500 + (i * 2),
|
||||
"quantity": 40 + i,
|
||||
"orders": 5 + (i % 4),
|
||||
"source": "stub",
|
||||
}
|
||||
)
|
||||
return pd.DataFrame(rows)
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def fetch_product_performance(self) -> pd.DataFrame:
|
||||
return pd.DataFrame(
|
||||
[
|
||||
{
|
||||
"product_id": "A1",
|
||||
"product_name": "Alpha",
|
||||
"category_name": "CatA",
|
||||
"revenue": 12000,
|
||||
"cost": 6000,
|
||||
"quantity": 400,
|
||||
"orders": 150,
|
||||
"source": "stub",
|
||||
},
|
||||
{
|
||||
"product_id": "B1",
|
||||
"product_name": "Beta",
|
||||
"category_name": "CatB",
|
||||
"revenue": 9000,
|
||||
"cost": 8500,
|
||||
"quantity": 300,
|
||||
"orders": 110,
|
||||
"source": "stub",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
def fetch_customer_performance(self) -> pd.DataFrame:
|
||||
return pd.DataFrame(
|
||||
[
|
||||
{
|
||||
"customer_id": "C1",
|
||||
"customer_name": "Contoso",
|
||||
"revenue": 15000,
|
||||
"orders": 80,
|
||||
"source": "stub",
|
||||
}
|
||||
]
|
||||
)
|
||||
def _make_pg_factory(session_mock: MagicMock) -> MagicMock:
|
||||
factory = MagicMock()
|
||||
factory.return_value.__enter__ = MagicMock(return_value=session_mock)
|
||||
factory.return_value.__exit__ = MagicMock(return_value=False)
|
||||
return factory
|
||||
|
||||
|
||||
def test_forecast_has_expected_horizon() -> None:
|
||||
service = AnalyticsService(StubWarehouseClient()) # type: ignore[arg-type]
|
||||
forecast = service.get_forecast(horizon_days=15)
|
||||
assert len(forecast) == 15
|
||||
assert "predicted_revenue" in forecast[0]
|
||||
# ---------------------------------------------------------------------------
|
||||
# AW persistence layer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestAWPersist:
|
||||
def test_persist_forecast_writes_record(self) -> None:
|
||||
from app.domain.aw import analytics
|
||||
|
||||
session = MagicMock()
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
data = [
|
||||
{"date": "2025-01-01", "predicted_revenue": 1000.0,
|
||||
"lower_bound": 900.0, "upper_bound": 1100.0},
|
||||
]
|
||||
|
||||
with patch.object(analytics, "append_audit"):
|
||||
analytics.persist_forecast(factory, data, horizon_days=30, trigger_source="test")
|
||||
|
||||
session.add.assert_called_once()
|
||||
model = session.add.call_args[0][0]
|
||||
assert model.horizon_days == 30
|
||||
assert model.point_count == 1
|
||||
|
||||
def test_persist_rep_scores_writes_record(self) -> None:
|
||||
from app.domain.aw import analytics
|
||||
|
||||
session = MagicMock()
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
data = [
|
||||
{"rep_name": "Alice", "total_revenue": 100_000.0,
|
||||
"total_orders": 50, "performance_score": 0.92},
|
||||
]
|
||||
|
||||
with patch.object(analytics, "append_audit"):
|
||||
analytics.persist_rep_scores(factory, data, top_n=10, trigger_source="test")
|
||||
|
||||
session.add.assert_called_once()
|
||||
model = session.add.call_args[0][0]
|
||||
assert model.rep_count == 1
|
||||
|
||||
def test_persist_product_demand_writes_record(self) -> None:
|
||||
from app.domain.aw import analytics
|
||||
|
||||
session = MagicMock()
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
data = [{"product_name": "Widget", "total_quantity": 500.0}]
|
||||
|
||||
with patch.object(analytics, "append_audit"):
|
||||
analytics.persist_product_demand(factory, data, top_n=10, trigger_source="test")
|
||||
|
||||
session.add.assert_called_once()
|
||||
|
||||
def test_persist_anomaly_run_writes_record(self) -> None:
|
||||
from app.domain.aw import analytics
|
||||
|
||||
session = MagicMock()
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
data = [
|
||||
{"is_anomaly": True, "date": "2025-01-01", "revenue": 50.0},
|
||||
{"is_anomaly": False, "date": "2025-01-02", "revenue": 1000.0},
|
||||
]
|
||||
|
||||
with patch.object(analytics, "append_audit"):
|
||||
analytics.persist_anomaly_run(factory, data, trigger_source="test")
|
||||
|
||||
session.add.assert_called_once()
|
||||
|
||||
|
||||
def test_rankings_are_sorted() -> None:
|
||||
service = AnalyticsService(StubWarehouseClient()) # type: ignore[arg-type]
|
||||
rankings = service.get_rankings(top_n=2)
|
||||
assert len(rankings) == 2
|
||||
assert rankings[0]["score"] >= rankings[1]["score"]
|
||||
# ---------------------------------------------------------------------------
|
||||
# WWI persistence layer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestWWIPersist:
|
||||
def test_persist_reorder_recommendations_writes_record(self) -> None:
|
||||
from app.domain.wwi import analytics
|
||||
|
||||
session = MagicMock()
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
data = [
|
||||
{"stock_item_key": 1, "stock_item_name": "Widget",
|
||||
"current_stock": 10.0, "urgency": "HIGH"},
|
||||
]
|
||||
|
||||
with patch.object(analytics, "append_audit"):
|
||||
analytics.persist_reorder_recommendations(factory, data, trigger_source="test")
|
||||
|
||||
session.add.assert_called_once()
|
||||
model = session.add.call_args[0][0]
|
||||
assert model.item_count == 1
|
||||
|
||||
def test_persist_supplier_scores_writes_record(self) -> None:
|
||||
from app.domain.wwi import analytics
|
||||
|
||||
session = MagicMock()
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
data = [
|
||||
{"supplier_name": "Acme", "performance_score": 0.87},
|
||||
]
|
||||
|
||||
with patch.object(analytics, "append_audit"):
|
||||
analytics.persist_supplier_scores(factory, data, top_n=10, trigger_source="test")
|
||||
|
||||
session.add.assert_called_once()
|
||||
model = session.add.call_args[0][0]
|
||||
assert model.supplier_count == 1
|
||||
|
||||
def test_generate_stock_events_skips_non_high_urgency(self) -> None:
|
||||
from app.domain.wwi import analytics
|
||||
from app.domain.wwi.models import WWIBusinessEvent
|
||||
|
||||
session = MagicMock()
|
||||
session.query.return_value.filter.return_value.first.return_value = None
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
items = [
|
||||
{"stock_item_key": 1, "stock_item_name": "Widget", "urgency": "LOW",
|
||||
"current_stock": 100.0, "avg_daily_demand": 5.0,
|
||||
"days_until_stockout": 20.0, "recommended_reorder_qty": 50},
|
||||
{"stock_item_key": 2, "stock_item_name": "Gadget", "urgency": "MEDIUM",
|
||||
"current_stock": 50.0, "avg_daily_demand": 3.0,
|
||||
"days_until_stockout": 16.0, "recommended_reorder_qty": 30},
|
||||
]
|
||||
|
||||
analytics.generate_stock_events(factory, items)
|
||||
|
||||
# No events should have been added since neither item is HIGH urgency
|
||||
session.add.assert_not_called()
|
||||
|
||||
def test_generate_stock_events_creates_event_for_high_urgency(self) -> None:
|
||||
from app.domain.wwi import analytics
|
||||
|
||||
session = MagicMock()
|
||||
# Simulate no existing event in the 24h window
|
||||
session.query.return_value.filter.return_value.first.return_value = None
|
||||
factory = _make_pg_factory(session)
|
||||
|
||||
items = [
|
||||
{"stock_item_key": 42, "stock_item_name": "Critical Part", "urgency": "HIGH",
|
||||
"current_stock": 2.0, "avg_daily_demand": 5.0,
|
||||
"days_until_stockout": None, "recommended_reorder_qty": 100},
|
||||
]
|
||||
|
||||
analytics.generate_stock_events(factory, items)
|
||||
|
||||
session.add.assert_called_once()
|
||||
event = session.add.call_args[0][0]
|
||||
assert event.event_type == "LOW_STOCK"
|
||||
assert event.entity_key == "42"
|
||||
assert "immediately" in event.message
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.security import InternalTokenManager, require_internal_principal
|
||||
|
||||
|
||||
def test_internal_token_round_trip(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(
|
||||
settings,
|
||||
"internal_service_shared_secret",
|
||||
"unit-test-shared-secret-key-at-least-32b",
|
||||
)
|
||||
monkeypatch.setattr(settings, "internal_service_token_audience", "bi-internal-test")
|
||||
monkeypatch.setattr(settings, "internal_service_allowed_issuers", "api-gateway")
|
||||
monkeypatch.setattr(settings, "internal_token_clock_skew_seconds", 0)
|
||||
|
||||
manager = InternalTokenManager()
|
||||
token = manager.mint(
|
||||
subject="user-123",
|
||||
scopes=["openid", "profile"],
|
||||
source_service="api-gateway",
|
||||
)
|
||||
|
||||
principal = manager.verify(token)
|
||||
assert principal.subject == "user-123"
|
||||
assert principal.claims["iss"] == "api-gateway"
|
||||
assert principal.claims["typ"] == "internal-service"
|
||||
|
||||
|
||||
def test_internal_token_rejects_untrusted_issuer(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.setattr(
|
||||
settings,
|
||||
"internal_service_shared_secret",
|
||||
"unit-test-shared-secret-key-at-least-32b",
|
||||
)
|
||||
monkeypatch.setattr(settings, "internal_service_token_audience", "bi-internal-test")
|
||||
monkeypatch.setattr(settings, "internal_service_allowed_issuers", "api-gateway")
|
||||
monkeypatch.setattr(settings, "internal_token_clock_skew_seconds", 0)
|
||||
|
||||
manager = InternalTokenManager()
|
||||
token = manager.mint(
|
||||
subject="user-123",
|
||||
scopes=["openid"],
|
||||
source_service="analytics",
|
||||
)
|
||||
|
||||
with pytest.raises(HTTPException) as exc:
|
||||
manager.verify(token)
|
||||
assert exc.value.status_code == 401
|
||||
assert exc.value.detail == "Internal token issuer is not allowed."
|
||||
|
||||
|
||||
def test_require_internal_principal_rejects_missing_token(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.setattr(settings, "internal_service_auth_enabled", True)
|
||||
with pytest.raises(HTTPException) as exc:
|
||||
require_internal_principal(None)
|
||||
assert exc.value.status_code == 401
|
||||
assert exc.value.detail == "Missing x-internal-service-token header."
|
||||
2002
backend/uv.lock
generated
Normal file
2002
backend/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user