Push the rest

This commit is contained in:
2026-05-11 10:58:46 +02:00
parent adb5c1a439
commit 0031caf16c
94 changed files with 11777 additions and 3474 deletions

View File

@@ -0,0 +1,154 @@
package main
import (
"context"
"fmt"
"log/slog"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"otel-bi-analytics/internal/config"
"otel-bi-analytics/internal/db"
"otel-bi-analytics/internal/handler"
"otel-bi-analytics/internal/scheduler"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
"go.opentelemetry.io/otel/propagation"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
)
func main() {
slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
Level: slog.LevelInfo,
})))
cfg := config.Load()
ctx := context.Background()
shutdown, err := setupOtel(ctx, cfg)
if err != nil {
slog.Error("failed to set up OTel", "err", err)
os.Exit(1)
}
awDB, err := db.Open(ctx, cfg.AWConnStr, "aw")
if err != nil {
slog.Error("failed to connect to AW MSSQL", "err", err)
os.Exit(1)
}
defer awDB.Close()
wwiDB, err := db.Open(ctx, cfg.WWIConnStr, "wwi")
if err != nil {
slog.Error("failed to connect to WWI MSSQL", "err", err)
os.Exit(1)
}
defer wwiDB.Close()
pgPool, err := db.OpenPostgres(ctx, cfg.PostgresDSN)
if err != nil {
slog.Error("failed to connect to PostgreSQL", "err", err)
os.Exit(1)
}
defer pgPool.Close()
sched := scheduler.New(awDB, wwiDB, pgPool, cfg.DefaultTopN)
sched.Start()
defer sched.Stop()
mux := http.NewServeMux()
h := handler.New(awDB, wwiDB, pgPool, sched, cfg.DefaultTopN, cfg.ForecastHorizonDays, cfg.DefaultHistoryDays)
h.RegisterRoutes(mux)
srv := &http.Server{
Addr: fmt.Sprintf(":%d", cfg.Port),
Handler: otelhttp.NewHandler(mux, "analytics-service"),
ReadTimeout: 60 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
}
done := make(chan struct{})
go func() {
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
<-quit
slog.Info("shutting down")
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
_ = srv.Shutdown(ctx)
shutdown(ctx)
close(done)
}()
slog.Info("analytics service started", "port", cfg.Port)
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
slog.Error("server error", "err", err)
os.Exit(1)
}
<-done
slog.Info("shutdown complete")
}
func setupOtel(ctx context.Context, cfg config.Config) (func(context.Context), error) {
res, err := resource.New(ctx,
resource.WithAttributes(
semconv.ServiceName(cfg.OtelServiceName),
semconv.ServiceNamespace(cfg.OtelServiceNamespace),
),
)
if err != nil {
return nil, fmt.Errorf("create OTel resource: %w", err)
}
traceExporter, err := otlptracehttp.New(ctx,
otlptracehttp.WithEndpointURL(cfg.OtelCollectorEndpoint+"/v1/traces"),
otlptracehttp.WithInsecure(),
)
if err != nil {
return nil, fmt.Errorf("create OTLP trace exporter: %w", err)
}
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(traceExporter),
sdktrace.WithResource(res),
)
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(
propagation.TraceContext{},
propagation.Baggage{},
))
metricExporter, err := otlpmetrichttp.New(ctx,
otlpmetrichttp.WithEndpointURL(cfg.OtelCollectorEndpoint+"/v1/metrics"),
otlpmetrichttp.WithInsecure(),
)
if err != nil {
return nil, fmt.Errorf("create OTLP metric exporter: %w", err)
}
mp := sdkmetric.NewMeterProvider(
sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter, sdkmetric.WithInterval(15*time.Second))),
sdkmetric.WithResource(res),
)
otel.SetMeterProvider(mp)
return func(ctx context.Context) {
if err := tp.Shutdown(ctx); err != nil {
slog.Error("trace provider shutdown error", "err", err)
}
if err := mp.Shutdown(ctx); err != nil {
slog.Error("metric provider shutdown error", "err", err)
}
}, nil
}

51
backend/analytics/go.mod Normal file
View File

@@ -0,0 +1,51 @@
module otel-bi-analytics
go 1.25.0
require (
github.com/jackc/pgx/v5 v5.7.2
github.com/microsoft/go-mssqldb v1.7.2
github.com/robfig/cron/v3 v3.0.1
github.com/xuri/excelize/v2 v2.8.1
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
go.opentelemetry.io/otel v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
go.opentelemetry.io/otel/metric v1.43.0
go.opentelemetry.io/otel/sdk v1.43.0
go.opentelemetry.io/otel/sdk/metric v1.43.0
go.opentelemetry.io/otel/trace v1.43.0
)
require (
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
github.com/golang-sql/sqlexp v0.1.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/richardlehane/mscfb v1.0.4 // indirect
github.com/richardlehane/msoleps v1.0.3 // indirect
github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 // indirect
github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
golang.org/x/crypto v0.49.0 // indirect
golang.org/x/net v0.52.0 // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/text v0.35.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/grpc v1.80.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
)

128
backend/analytics/go.sum Normal file
View File

@@ -0,0 +1,128 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI=
github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/richardlehane/mscfb v1.0.4 h1:WULscsljNPConisD5hR0+OyZjwK46Pfyr6mPu5ZawpM=
github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/richardlehane/msoleps v1.0.3 h1:aznSZzrwYRl3rLKRT3gUk9am7T/mLNSnJINvN0AQoVM=
github.com/richardlehane/msoleps v1.0.3/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53 h1:Chd9DkqERQQuHpXjR/HSV1jLZA6uaoiwwH3vSuF3IW0=
github.com/xuri/efp v0.0.0-20231025114914-d1ff6096ae53/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI=
github.com/xuri/excelize/v2 v2.8.1 h1:pZLMEwK8ep+CLIUWpWmvW8IWE/yxqG0I1xcN6cVMGuQ=
github.com/xuri/excelize/v2 v2.8.1/go.mod h1:oli1E4C3Pa5RXg1TBXn4ENCXDV5JUMlBluUhG7c+CEE=
github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05 h1:qhbILQo1K3mphbwKh1vNm4oGezE1eF9fQWmNiIpSfI4=
github.com/xuri/nfp v0.0.0-20230919160717-d98342af3f05/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,724 @@
package analytics
import (
"context"
"database/sql"
"fmt"
"math"
"sort"
"time"
mssqldb "otel-bi-analytics/internal/db"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var awTracer = otel.Tracer("otel-bi.analytics.aw")
// ---------------------------------------------------------------------------
// SQL queries (with fallback variants)
// ---------------------------------------------------------------------------
var awDailySalesQueries = []string{
`SELECT
CAST(d.FullDateAlternateKey AS date) AS sale_date,
SUM(f.SalesAmount) AS revenue,
SUM(f.TotalProductCost) AS cost,
SUM(f.OrderQuantity) AS quantity,
COUNT_BIG(*) AS orders
FROM dbo.FactInternetSales AS f
INNER JOIN dbo.DimDate AS d ON d.DateKey = f.OrderDateKey
GROUP BY CAST(d.FullDateAlternateKey AS date)
UNION ALL
SELECT
CAST(d.FullDateAlternateKey AS date) AS sale_date,
SUM(r.SalesAmount) AS revenue,
SUM(r.TotalProductCost) AS cost,
SUM(r.OrderQuantity) AS quantity,
COUNT_BIG(*) AS orders
FROM dbo.FactResellerSales AS r
INNER JOIN dbo.DimDate AS d ON d.DateKey = r.OrderDateKey
GROUP BY CAST(d.FullDateAlternateKey AS date)
ORDER BY sale_date`,
`SELECT
CAST(OrderDate AS date) AS sale_date,
SUM(SalesAmount) AS revenue,
SUM(TotalProductCost) AS cost,
SUM(OrderQuantity) AS quantity,
COUNT_BIG(*) AS orders
FROM dbo.FactInternetSales
GROUP BY CAST(OrderDate AS date)
ORDER BY sale_date`,
}
var awRepPerfQueries = []string{
`SELECT
e.EmployeeKey AS employee_key,
e.FirstName + ' ' + e.LastName AS rep_name,
COALESCE(e.Title, 'Sales Rep') AS rep_title,
COALESCE(st.SalesTerritoryRegion, 'Unknown') AS territory,
SUM(r.SalesAmount) AS revenue,
SUM(r.TotalProductCost) AS cost,
COUNT_BIG(*) AS orders,
AVG(r.SalesAmount) AS avg_deal_size
FROM dbo.FactResellerSales AS r
INNER JOIN dbo.DimEmployee AS e ON e.EmployeeKey = r.EmployeeKey
INNER JOIN dbo.DimSalesTerritory AS st ON st.SalesTerritoryKey = r.SalesTerritoryKey
WHERE e.SalesPersonFlag = 1
GROUP BY e.EmployeeKey, e.FirstName, e.LastName, e.Title, st.SalesTerritoryRegion
ORDER BY revenue DESC`,
`SELECT
e.EmployeeKey AS employee_key,
e.FirstName + ' ' + e.LastName AS rep_name,
COALESCE(e.Title, 'Employee') AS rep_title,
'Unknown' AS territory,
SUM(r.SalesAmount) AS revenue,
SUM(r.TotalProductCost) AS cost,
COUNT_BIG(*) AS orders,
AVG(r.SalesAmount) AS avg_deal_size
FROM dbo.FactResellerSales AS r
INNER JOIN dbo.DimEmployee AS e ON e.EmployeeKey = r.EmployeeKey
GROUP BY e.EmployeeKey, e.FirstName, e.LastName, e.Title
ORDER BY revenue DESC`,
}
var awProductDemandQueries = []string{
`SELECT
p.ProductAlternateKey AS product_id,
p.EnglishProductName AS product_name,
COALESCE(pc.EnglishProductCategoryName, 'Unknown') AS category,
SUM(f.SalesAmount) AS revenue,
SUM(f.TotalProductCost) AS cost,
SUM(f.OrderQuantity) AS quantity,
COUNT_BIG(*) AS orders
FROM dbo.FactInternetSales AS f
INNER JOIN dbo.DimProduct AS p ON p.ProductKey = f.ProductKey
LEFT JOIN dbo.DimProductSubcategory AS sc ON sc.ProductSubcategoryKey = p.ProductSubcategoryKey
LEFT JOIN dbo.DimProductCategory AS pc ON pc.ProductCategoryKey = sc.ProductCategoryKey
GROUP BY p.ProductAlternateKey, p.EnglishProductName, pc.EnglishProductCategoryName
ORDER BY revenue DESC`,
`SELECT
CAST(f.ProductKey AS nvarchar(50)) AS product_id,
COALESCE(p.EnglishProductName, CAST(f.ProductKey AS nvarchar(50))) AS product_name,
'Unknown' AS category,
SUM(f.SalesAmount) AS revenue,
SUM(f.TotalProductCost) AS cost,
SUM(f.OrderQuantity) AS quantity,
COUNT_BIG(*) AS orders
FROM dbo.FactInternetSales AS f
LEFT JOIN dbo.DimProduct AS p ON p.ProductKey = f.ProductKey
GROUP BY f.ProductKey, p.EnglishProductName
ORDER BY revenue DESC`,
}
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
type dailySalesRow struct {
Date time.Time
Revenue float64
Cost float64
Quantity float64
Orders float64
}
type SalesKPIs struct {
TotalRevenue float64 `json:"total_revenue"`
GrossMarginPct float64 `json:"gross_margin_pct"`
TotalQuantity float64 `json:"total_quantity"`
AvgOrderValue float64 `json:"avg_order_value"`
RecordsInWindow int `json:"records_in_window"`
}
type DailySalesPoint struct {
Date string `json:"date"`
Revenue float64 `json:"revenue"`
Cost float64 `json:"cost"`
Quantity float64 `json:"quantity"`
}
type ForecastPoint struct {
Date string `json:"date"`
PredictedRevenue float64 `json:"predicted_revenue"`
LowerBound float64 `json:"lower_bound"`
UpperBound float64 `json:"upper_bound"`
}
type RepScore struct {
Rank int `json:"rank"`
EmployeeKey int `json:"employee_key"`
RepName string `json:"rep_name"`
RepTitle string `json:"rep_title"`
Territory string `json:"territory"`
Revenue float64 `json:"revenue"`
Orders int `json:"orders"`
AvgDealSize float64 `json:"avg_deal_size"`
MarginPct float64 `json:"margin_pct"`
Score float64 `json:"score"`
}
type ProductDemand struct {
Rank int `json:"rank"`
ProductID string `json:"product_id"`
ProductName string `json:"product_name"`
Category string `json:"category"`
Revenue float64 `json:"revenue"`
Quantity float64 `json:"quantity"`
Orders int `json:"orders"`
MarginPct float64 `json:"margin_pct"`
DemandScore float64 `json:"demand_score"`
}
type AnomalyPoint struct {
Date string `json:"date"`
Revenue float64 `json:"revenue"`
RollingMean *float64 `json:"rolling_mean"`
LowerBand *float64 `json:"lower_band"`
UpperBand *float64 `json:"upper_band"`
IsAnomaly bool `json:"is_anomaly"`
ZScore *float64 `json:"z_score"`
Direction *string `json:"direction"`
}
type DataQualityResult struct {
Status string `json:"status"`
Checks map[string]string `json:"checks"`
FailedChecks []string `json:"failed_checks"`
}
// ---------------------------------------------------------------------------
// Data fetching
// ---------------------------------------------------------------------------
func fetchAWDailySales(ctx context.Context, db *sql.DB) ([]dailySalesRow, error) {
ctx, span := awTracer.Start(ctx, "aw.query.daily_sales")
defer span.End()
rows, err := mssqldb.QueryFirst(ctx, db, awDailySalesQueries)
if err != nil {
return nil, err
}
defer rows.Close()
byDate := make(map[string]*dailySalesRow)
var keys []string
for rows.Next() {
var r dailySalesRow
var revenue, cost, quantity, orders sql.NullFloat64
if err := rows.Scan(&r.Date, &revenue, &cost, &quantity, &orders); err != nil {
return nil, fmt.Errorf("scan daily_sales: %w", err)
}
r.Revenue = revenue.Float64
r.Cost = cost.Float64
r.Quantity = quantity.Float64
r.Orders = orders.Float64
key := r.Date.Format("2006-01-02")
if existing, ok := byDate[key]; ok {
existing.Revenue += r.Revenue
existing.Cost += r.Cost
existing.Quantity += r.Quantity
existing.Orders += r.Orders
} else {
cp := r
byDate[key] = &cp
keys = append(keys, key)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
sort.Strings(keys)
result := make([]dailySalesRow, len(keys))
for i, k := range keys {
result[i] = *byDate[k]
}
return result, nil
}
// ---------------------------------------------------------------------------
// KPIs
// ---------------------------------------------------------------------------
func AWGetSalesKPIs(ctx context.Context, db *sql.DB) (*SalesKPIs, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.kpis")
defer span.End()
series, err := fetchAWDailySales(ctx, db)
if err != nil {
return nil, err
}
cutoff := time.Now().UTC().AddDate(0, 0, -180)
var totalRevenue, totalCost, totalQuantity, totalOrders float64
var count int
for _, r := range series {
if r.Date.Before(cutoff) {
continue
}
totalRevenue += r.Revenue
totalCost += r.Cost
totalQuantity += r.Quantity
totalOrders += r.Orders
count++
}
if totalOrders < 1 {
totalOrders = 1
}
var marginPct float64
if totalRevenue > 0 {
marginPct = (totalRevenue - totalCost) / totalRevenue * 100
}
return &SalesKPIs{
TotalRevenue: round2(totalRevenue),
GrossMarginPct: round2(marginPct),
TotalQuantity: round2(totalQuantity),
AvgOrderValue: round2(totalRevenue / totalOrders),
RecordsInWindow: count,
}, nil
}
// ---------------------------------------------------------------------------
// Sales history
// ---------------------------------------------------------------------------
func AWGetSalesHistory(ctx context.Context, db *sql.DB, daysBack int) ([]DailySalesPoint, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.sales_history",
trace.WithAttributes(attribute.Int("days_back", daysBack)))
defer span.End()
series, err := fetchAWDailySales(ctx, db)
if err != nil {
return nil, err
}
cutoff := time.Now().UTC().AddDate(0, 0, -daysBack)
var result []DailySalesPoint
for _, r := range series {
if r.Date.Before(cutoff) {
continue
}
result = append(result, DailySalesPoint{
Date: r.Date.Format("2006-01-02"),
Revenue: round2(r.Revenue),
Cost: round2(r.Cost),
Quantity: round2(r.Quantity),
})
}
return result, nil
}
// ---------------------------------------------------------------------------
// Sales forecast (OLS + weekday seasonality)
// ---------------------------------------------------------------------------
func AWGetSalesForecast(ctx context.Context, db *sql.DB, horizonDays int) ([]ForecastPoint, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.forecast",
trace.WithAttributes(attribute.Int("horizon_days", horizonDays)))
defer span.End()
series, err := fetchAWDailySales(ctx, db)
if err != nil {
return nil, err
}
cutoff := time.Now().UTC().AddDate(0, 0, -720)
var window []dailySalesRow
for _, r := range series {
if !r.Date.Before(cutoff) {
window = append(window, r)
}
}
if len(window) == 0 {
return nil, nil
}
// Fill daily gaps with 0 (resample to daily)
start := window[0].Date
end := window[len(window)-1].Date
byDate := make(map[string]float64, len(window))
for _, r := range window {
byDate[r.Date.Format("2006-01-02")] = r.Revenue
}
var revenues []float64
var dates []time.Time
for d := start; !d.After(end); d = d.AddDate(0, 0, 1) {
key := d.Format("2006-01-02")
revenues = append(revenues, byDate[key])
dates = append(dates, d)
}
// OLS fit
n := len(revenues)
xs := make([]float64, n)
for i := range xs {
xs[i] = float64(i)
}
slope, intercept := ols(xs, revenues)
// Residual sigma for confidence interval
var ssRes float64
for i, y := range revenues {
pred := intercept + slope*xs[i]
d := y - pred
ssRes += d * d
}
sigma := math.Sqrt(ssRes / float64(maxInt(n-2, 1)))
// Weekday seasonality factors (Python weekday: 0=Mon)
weekdayRevenues := make([][]float64, 7)
for i, r := range revenues {
wd := (int(dates[i].Weekday()) + 6) % 7
weekdayRevenues[wd] = append(weekdayRevenues[wd], r)
}
overallMean := meanOf(revenues)
weekdayFactors := make([]float64, 7)
for wd := range weekdayFactors {
if len(weekdayRevenues[wd]) > 0 && overallMean > 0 {
f := meanOf(weekdayRevenues[wd]) / overallMean
if math.IsNaN(f) || math.IsInf(f, 0) {
f = 1.0
}
weekdayFactors[wd] = f
} else {
weekdayFactors[wd] = 1.0
}
}
// Forecast
result := make([]ForecastPoint, horizonDays)
lastDate := dates[len(dates)-1]
for i := range result {
step := i + 1
day := lastDate.AddDate(0, 0, step)
rawPred := intercept + slope*float64(n+i)
wd := (int(day.Weekday()) + 6) % 7
yhat := math.Max(rawPred*weekdayFactors[wd], 0)
ci := 1.96 * sigma * math.Sqrt(1+float64(step)/float64(maxInt(n, 1)))
result[i] = ForecastPoint{
Date: day.Format("2006-01-02"),
PredictedRevenue: round2(yhat),
LowerBound: round2(math.Max(yhat-ci, 0)),
UpperBound: round2(yhat + ci),
}
}
return result, nil
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
// ---------------------------------------------------------------------------
// Rep scores
// ---------------------------------------------------------------------------
func AWGetRepScores(ctx context.Context, db *sql.DB, topN int) ([]RepScore, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.rep_scores",
trace.WithAttributes(attribute.Int("top_n", topN)))
defer span.End()
rows, err := mssqldb.QueryFirst(ctx, db, awRepPerfQueries)
if err != nil {
return nil, err
}
defer rows.Close()
type rawRep struct {
EmployeeKey int
RepName string
RepTitle string
Territory string
Revenue float64
Cost float64
Orders float64
AvgDealSize float64
}
var raws []rawRep
for rows.Next() {
var r rawRep
var rev, cost, orders, deal sql.NullFloat64
if err := rows.Scan(&r.EmployeeKey, &r.RepName, &r.RepTitle, &r.Territory,
&rev, &cost, &orders, &deal); err != nil {
return nil, fmt.Errorf("scan rep_performance: %w", err)
}
r.Revenue = rev.Float64
r.Cost = cost.Float64
r.Orders = orders.Float64
r.AvgDealSize = deal.Float64
raws = append(raws, r)
}
if err := rows.Err(); err != nil {
return nil, err
}
var maxRevenue, maxOrders, maxDeal float64
for _, r := range raws {
maxRevenue = math.Max(maxRevenue, r.Revenue)
maxOrders = math.Max(maxOrders, r.Orders)
maxDeal = math.Max(maxDeal, r.AvgDealSize)
}
maxRevenue = maxF(maxRevenue, 1)
maxOrders = maxF(maxOrders, 1)
maxDeal = maxF(maxDeal, 1)
type scored struct {
raw rawRep
score float64
}
scoreds := make([]scored, len(raws))
for i, r := range raws {
s := 0.50*(r.Revenue/maxRevenue) +
0.30*(r.Orders/maxOrders) +
0.20*(r.AvgDealSize/maxDeal)
scoreds[i] = scored{r, s}
}
sort.Slice(scoreds, func(i, j int) bool { return scoreds[i].score > scoreds[j].score })
if topN < len(scoreds) {
scoreds = scoreds[:topN]
}
result := make([]RepScore, len(scoreds))
for i, s := range scoreds {
var marginPct float64
if s.raw.Revenue > 0 {
marginPct = (s.raw.Revenue - s.raw.Cost) / s.raw.Revenue * 100
}
result[i] = RepScore{
Rank: i + 1,
EmployeeKey: s.raw.EmployeeKey,
RepName: s.raw.RepName,
RepTitle: s.raw.RepTitle,
Territory: s.raw.Territory,
Revenue: round2(s.raw.Revenue),
Orders: int(s.raw.Orders),
AvgDealSize: round2(s.raw.AvgDealSize),
MarginPct: round2(marginPct),
Score: round2(s.score * 100),
}
}
return result, nil
}
// ---------------------------------------------------------------------------
// Product demand
// ---------------------------------------------------------------------------
func AWGetProductDemand(ctx context.Context, db *sql.DB, topN int) ([]ProductDemand, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.product_demand",
trace.WithAttributes(attribute.Int("top_n", topN)))
defer span.End()
rows, err := mssqldb.QueryFirst(ctx, db, awProductDemandQueries)
if err != nil {
return nil, err
}
defer rows.Close()
type rawProd struct {
ProductID string
ProductName string
Category string
Revenue float64
Cost float64
Quantity float64
Orders float64
}
var raws []rawProd
for rows.Next() {
var r rawProd
var rev, cost, qty, orders sql.NullFloat64
if err := rows.Scan(&r.ProductID, &r.ProductName, &r.Category, &rev, &cost, &qty, &orders); err != nil {
return nil, fmt.Errorf("scan product_demand: %w", err)
}
r.Revenue = rev.Float64
r.Cost = cost.Float64
r.Quantity = qty.Float64
r.Orders = orders.Float64
raws = append(raws, r)
}
if err := rows.Err(); err != nil {
return nil, err
}
var maxRevenue, maxOrders float64
for _, r := range raws {
maxRevenue = math.Max(maxRevenue, r.Revenue)
maxOrders = math.Max(maxOrders, r.Orders)
}
maxRevenue = maxF(maxRevenue, 1)
maxOrders = maxF(maxOrders, 1)
type scored struct {
raw rawProd
score float64
}
scoreds := make([]scored, len(raws))
for i, r := range raws {
var marginPct float64
if r.Revenue > 0 {
marginPct = (r.Revenue - r.Cost) / r.Revenue * 100
}
marginNorm := clamp01((marginPct + 100) / 200)
s := 0.40*(r.Revenue/maxRevenue) + 0.35*(r.Orders/maxOrders) + 0.25*marginNorm
scoreds[i] = scored{r, s}
}
sort.Slice(scoreds, func(i, j int) bool { return scoreds[i].score > scoreds[j].score })
if topN < len(scoreds) {
scoreds = scoreds[:topN]
}
result := make([]ProductDemand, len(scoreds))
for i, s := range scoreds {
var marginPct float64
if s.raw.Revenue > 0 {
marginPct = (s.raw.Revenue - s.raw.Cost) / s.raw.Revenue * 100
}
result[i] = ProductDemand{
Rank: i + 1,
ProductID: s.raw.ProductID,
ProductName: s.raw.ProductName,
Category: s.raw.Category,
Revenue: round2(s.raw.Revenue),
Quantity: round2(s.raw.Quantity),
Orders: int(s.raw.Orders),
MarginPct: round2(marginPct),
DemandScore: round2(s.score * 100),
}
}
return result, nil
}
// ---------------------------------------------------------------------------
// Anomaly detection (rolling z-scores)
// ---------------------------------------------------------------------------
const (
anomalyWindow = 30
anomalyThreshold = 2.0
anomalySeriesDays = 365
)
func AWRunAnomalyDetection(ctx context.Context, db *sql.DB) ([]AnomalyPoint, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.anomaly_detection")
defer span.End()
series, err := fetchAWDailySales(ctx, db)
if err != nil {
return nil, err
}
cutoff := time.Now().UTC().AddDate(0, 0, -anomalySeriesDays)
var window []dailySalesRow
for _, r := range series {
if !r.Date.Before(cutoff) {
window = append(window, r)
}
}
if len(window) < anomalyWindow {
return nil, nil
}
revenues := make([]float64, len(window))
for i, r := range window {
revenues[i] = r.Revenue
}
minPeriods := maxInt(7, anomalyWindow/4)
means, stds := rollingMeanStd(revenues, anomalyWindow, minPeriods)
result := make([]AnomalyPoint, len(window))
for i, r := range window {
pt := AnomalyPoint{
Date: r.Date.Format("2006-01-02"),
Revenue: round2(r.Revenue),
}
if !math.IsNaN(means[i]) {
m := round2(means[i])
std := stds[i]
lb := round2(means[i] - anomalyThreshold*std)
ub := round2(means[i] + anomalyThreshold*std)
pt.RollingMean = &m
pt.LowerBand = &lb
pt.UpperBand = &ub
if std > 0 {
z := round3((r.Revenue - means[i]) / std)
pt.ZScore = &z
pt.IsAnomaly = math.Abs(z) > anomalyThreshold
if r.Revenue > means[i] {
d := "high"
pt.Direction = &d
} else {
d := "low"
pt.Direction = &d
}
}
}
result[i] = pt
}
span.SetAttributes(attribute.Int("series_points", len(result)))
return result, nil
}
// ---------------------------------------------------------------------------
// Data quality
// ---------------------------------------------------------------------------
var awDQChecks = []struct {
name string
sql string
}{
{"fact_internet_sales_rows", "SELECT COUNT_BIG(*) AS cnt FROM dbo.FactInternetSales"},
{"fact_reseller_sales_rows", "SELECT COUNT_BIG(*) AS cnt FROM dbo.FactResellerSales"},
{"active_sales_reps", "SELECT COUNT_BIG(*) AS cnt FROM dbo.DimEmployee WHERE SalesPersonFlag = 1"},
{"product_count", "SELECT COUNT_BIG(*) AS cnt FROM dbo.DimProduct"},
{"latest_internet_sale", "SELECT MAX(CAST(OrderDate AS date)) AS val FROM dbo.FactInternetSales"},
}
func AWRunDataQualityCheck(ctx context.Context, db *sql.DB) (*DataQualityResult, error) {
ctx, span := awTracer.Start(ctx, "aw.analytics.data_quality")
defer span.End()
result := &DataQualityResult{
Checks: make(map[string]string),
FailedChecks: []string{},
}
for _, check := range awDQChecks {
row := db.QueryRowContext(ctx, check.sql)
var val sql.NullString
if err := row.Scan(&val); err != nil {
result.Checks[check.name] = fmt.Sprintf("ERROR: %v", err)
result.FailedChecks = append(result.FailedChecks, check.name)
continue
}
v := "NULL"
if val.Valid {
v = val.String
}
result.Checks[check.name] = v
if v == "NULL" || v == "0" {
if check.name != "latest_internet_sale" && check.name != "active_sales_reps" {
result.FailedChecks = append(result.FailedChecks, check.name)
}
}
}
if len(result.FailedChecks) > 0 {
result.Status = "fail"
} else {
result.Status = "pass"
}
return result, nil
}

View File

@@ -0,0 +1,97 @@
package analytics
import "math"
// ols returns slope and intercept for simple linear regression y = intercept + slope*x.
func ols(x, y []float64) (slope, intercept float64) {
n := float64(len(x))
if n == 0 {
return 0, 0
}
var sumX, sumY, sumXX, sumXY float64
for i := range x {
sumX += x[i]
sumY += y[i]
sumXX += x[i] * x[i]
sumXY += x[i] * y[i]
}
denom := n*sumXX - sumX*sumX
if math.Abs(denom) < 1e-10 {
return 0, sumY / n
}
slope = (n*sumXY - sumX*sumY) / denom
intercept = (sumY - slope*sumX) / n
return
}
func meanOf(values []float64) float64 {
if len(values) == 0 {
return 0
}
var s float64
for _, v := range values {
s += v
}
return s / float64(len(values))
}
// sampleStdDev computes sample standard deviation (Bessel's correction).
func sampleStdDev(values []float64, mean float64) float64 {
if len(values) < 2 {
return 0
}
var s float64
for _, v := range values {
d := v - mean
s += d * d
}
return math.Sqrt(s / float64(len(values)-1))
}
// rollingMeanStd computes per-point rolling mean and std over the given window.
// Positions with fewer than minPeriods observations get NaN.
func rollingMeanStd(values []float64, window, minPeriods int) (means, stds []float64) {
n := len(values)
means = make([]float64, n)
stds = make([]float64, n)
for i := range values {
start := i - window + 1
if start < 0 {
start = 0
}
sl := values[start : i+1]
if len(sl) < minPeriods {
means[i] = math.NaN()
stds[i] = math.NaN()
continue
}
m := meanOf(sl)
means[i] = m
stds[i] = sampleStdDev(sl, m)
}
return
}
func round2(v float64) float64 { return math.Round(v*100) / 100 }
func round3(v float64) float64 { return math.Round(v*1000) / 1000 }
func maxF(a, b float64) float64 {
if a > b {
return a
}
return b
}
func clamp01(v float64) float64 {
if v < 0 {
return 0
}
if v > 1 {
return 1
}
return v
}
func ceilInt(v float64) int {
return int(math.Ceil(v))
}

View File

@@ -0,0 +1,529 @@
package analytics
import (
"context"
"database/sql"
"fmt"
"math"
"sort"
"time"
mssqldb "otel-bi-analytics/internal/db"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var wwiTracer = otel.Tracer("otel-bi.analytics.wwi")
// ---------------------------------------------------------------------------
// SQL queries
// ---------------------------------------------------------------------------
var wwiDailySalesQueries = []string{
`SELECT
d.[Date] AS sale_date,
SUM(s.[Total Excluding Tax]) AS revenue,
SUM(s.[Total Excluding Tax] - s.[Profit]) AS cost,
SUM(CAST(s.[Quantity] AS FLOAT)) AS quantity,
COUNT_BIG(*) AS orders
FROM [Fact].[Sale] AS s
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Delivery Date Key]
GROUP BY d.[Date]
ORDER BY d.[Date]`,
`SELECT
d.[Date] AS sale_date,
SUM(s.[Total Excluding Tax]) AS revenue,
SUM(s.[Total Excluding Tax] - s.[Profit]) AS cost,
SUM(CAST(s.[Quantity] AS FLOAT)) AS quantity,
COUNT_BIG(*) AS orders
FROM [Fact].[Sale] AS s
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Invoice Date Key]
GROUP BY d.[Date]
ORDER BY d.[Date]`,
}
var wwiStockLevelsQueries = []string{
`SELECT
si.[Stock Item Key] AS stock_item_key,
si.[Stock Item] AS stock_item_name,
si.[Unit Price] AS unit_price,
si.[Lead Time Days] AS lead_time_days,
SUM(CAST(m.[Quantity] AS FLOAT)) AS current_stock
FROM [Dimension].[Stock Item] AS si
LEFT JOIN [Fact].[Movement] AS m ON m.[Stock Item Key] = si.[Stock Item Key]
WHERE si.[Stock Item Key] <> 0
GROUP BY si.[Stock Item Key], si.[Stock Item], si.[Unit Price], si.[Lead Time Days]`,
`SELECT
si.[Stock Item Key] AS stock_item_key,
si.[Stock Item] AS stock_item_name,
si.[Unit Price] AS unit_price,
si.[Lead Time Days] AS lead_time_days,
CAST(0 AS FLOAT) AS current_stock
FROM [Dimension].[Stock Item] AS si
WHERE si.[Stock Item Key] <> 0`,
}
var wwiDemandVelocityQueries = []string{
`SELECT
s.[Stock Item Key] AS stock_item_key,
SUM(CAST(s.[Quantity] AS FLOAT)) AS qty_sold_90d
FROM [Fact].[Sale] AS s
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Delivery Date Key]
WHERE d.[Date] >= DATEADD(day, -90, GETDATE()) AND s.[Stock Item Key] <> 0
GROUP BY s.[Stock Item Key]`,
`SELECT
s.[Stock Item Key] AS stock_item_key,
SUM(CAST(s.[Quantity] AS FLOAT)) AS qty_sold_90d
FROM [Fact].[Sale] AS s
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Invoice Date Key]
WHERE d.[Date] >= DATEADD(day, -90, GETDATE()) AND s.[Stock Item Key] <> 0
GROUP BY s.[Stock Item Key]`,
}
var wwiSupplierPerfQueries = []string{
`SELECT
sup.[Supplier Key] AS supplier_key,
sup.[Supplier] AS supplier_name,
sup.[Category] AS category,
COUNT_BIG(*) AS total_orders,
SUM(CAST(p.[Ordered Outers] AS FLOAT)) AS total_ordered_outers,
SUM(CAST(p.[Received Outers] AS FLOAT)) AS total_received_outers,
SUM(CASE WHEN p.[Is Order Finalized] = 1 THEN 1 ELSE 0 END) AS finalized_orders
FROM [Dimension].[Supplier] AS sup
INNER JOIN [Fact].[Purchase] AS p ON p.[Supplier Key] = sup.[Supplier Key]
WHERE sup.[Supplier Key] <> 0
GROUP BY sup.[Supplier Key], sup.[Supplier], sup.[Category]
ORDER BY total_orders DESC`,
`SELECT
sup.[Supplier Key] AS supplier_key,
sup.[Supplier] AS supplier_name,
sup.[Category] AS category,
COUNT_BIG(*) AS total_orders,
SUM(CAST(p.[Ordered Outers] AS FLOAT)) AS total_ordered_outers,
SUM(CAST(p.[Received Outers] AS FLOAT)) AS total_received_outers,
COUNT_BIG(*) AS finalized_orders
FROM [Dimension].[Supplier] AS sup
INNER JOIN [Fact].[Purchase] AS p ON p.[Supplier Key] = sup.[Supplier Key]
WHERE sup.[Supplier Key] <> 0
GROUP BY sup.[Supplier Key], sup.[Supplier], sup.[Category]
ORDER BY total_orders DESC`,
}
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
type ReorderRecommendation struct {
StockItemKey int `json:"stock_item_key"`
StockItemName string `json:"stock_item_name"`
UnitPrice float64 `json:"unit_price"`
CurrentStock float64 `json:"current_stock"`
AvgDailyDemand float64 `json:"avg_daily_demand"`
DaysUntilStockout *float64 `json:"days_until_stockout"`
RecommendedReorderQty int `json:"recommended_reorder_qty"`
Urgency string `json:"urgency"`
}
type SupplierScore struct {
Rank int `json:"rank"`
SupplierKey int `json:"supplier_key"`
SupplierName string `json:"supplier_name"`
Category string `json:"category"`
TotalOrders int `json:"total_orders"`
FillRatePct float64 `json:"fill_rate_pct"`
FinalizationRatePct float64 `json:"finalization_rate_pct"`
Score float64 `json:"score"`
}
type WhatIfResult struct {
StockItemKey int `json:"stock_item_key"`
StockItemName string `json:"stock_item_name"`
DemandMultiplier float64 `json:"demand_multiplier"`
CurrentStock float64 `json:"current_stock"`
BaseAvgDailyDemand float64 `json:"base_avg_daily_demand"`
AdjustedDailyDemand float64 `json:"adjusted_daily_demand"`
ProjectedDaysUntilStockout *float64 `json:"projected_days_until_stockout"`
ProjectedStockoutDate *string `json:"projected_stockout_date"`
RecommendedOrderQty int `json:"recommended_order_qty"`
EstimatedReorderCost float64 `json:"estimated_reorder_cost"`
}
// ---------------------------------------------------------------------------
// KPIs (same logic as AW)
// ---------------------------------------------------------------------------
func WWIGetSalesKPIs(ctx context.Context, db *sql.DB) (*SalesKPIs, error) {
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.kpis")
defer span.End()
rows, err := mssqldb.QueryFirst(ctx, db, wwiDailySalesQueries)
if err != nil {
return nil, err
}
defer rows.Close()
cutoff := time.Now().UTC().AddDate(0, 0, -180)
var totalRevenue, totalCost, totalQuantity, totalOrders float64
var count int
for rows.Next() {
var date time.Time
var revenue, cost, quantity, orders sql.NullFloat64
if err := rows.Scan(&date, &revenue, &cost, &quantity, &orders); err != nil {
return nil, fmt.Errorf("scan wwi_daily_sales: %w", err)
}
if date.Before(cutoff) {
continue
}
totalRevenue += revenue.Float64
totalCost += cost.Float64
totalQuantity += quantity.Float64
totalOrders += orders.Float64
count++
}
if err := rows.Err(); err != nil {
return nil, err
}
if totalOrders < 1 {
totalOrders = 1
}
var marginPct float64
if totalRevenue > 0 {
marginPct = (totalRevenue - totalCost) / totalRevenue * 100
}
return &SalesKPIs{
TotalRevenue: round2(totalRevenue),
GrossMarginPct: round2(marginPct),
TotalQuantity: round2(totalQuantity),
AvgOrderValue: round2(totalRevenue / totalOrders),
RecordsInWindow: count,
}, nil
}
// ---------------------------------------------------------------------------
// Reorder recommendations
// ---------------------------------------------------------------------------
func urgency(days float64) string {
if days <= 7 {
return "HIGH"
}
if days <= 14 {
return "MEDIUM"
}
return "LOW"
}
func WWIGetReorderRecommendations(ctx context.Context, db *sql.DB) ([]ReorderRecommendation, error) {
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.reorder_recommendations")
defer span.End()
// Fetch stock levels
stockRows, err := mssqldb.QueryFirst(ctx, db, wwiStockLevelsQueries)
if err != nil {
return nil, err
}
defer stockRows.Close()
type stockItem struct {
Key int
Name string
UnitPrice float64
LeadTimeDays float64
CurrentStock float64
}
byKey := make(map[int]*stockItem)
for stockRows.Next() {
var s stockItem
var price, lead, stock sql.NullFloat64
if err := stockRows.Scan(&s.Key, &s.Name, &price, &lead, &stock); err != nil {
return nil, fmt.Errorf("scan stock_levels: %w", err)
}
s.UnitPrice = price.Float64
s.LeadTimeDays = lead.Float64
if s.LeadTimeDays == 0 {
s.LeadTimeDays = 7
}
s.CurrentStock = stock.Float64
byKey[s.Key] = &s
}
if err := stockRows.Err(); err != nil {
return nil, err
}
// Fetch 90-day demand velocity
demandRows, err := mssqldb.QueryFirst(ctx, db, wwiDemandVelocityQueries)
if err != nil {
return nil, err
}
defer demandRows.Close()
demand := make(map[int]float64)
for demandRows.Next() {
var key int
var qty sql.NullFloat64
if err := demandRows.Scan(&key, &qty); err != nil {
return nil, fmt.Errorf("scan demand_velocity: %w", err)
}
demand[key] = qty.Float64
}
if err := demandRows.Err(); err != nil {
return nil, err
}
// Compute recommendations
var result []ReorderRecommendation
for _, s := range byKey {
avgDailyDemand := demand[s.Key] / 90.0
var daysUntilStockout float64
if avgDailyDemand > 0 {
daysUntilStockout = s.CurrentStock / avgDailyDemand
} else {
daysUntilStockout = math.Inf(1)
}
if daysUntilStockout > 30 && s.CurrentStock >= 0 {
continue
}
reorderQty := math.Max(math.Ceil(avgDailyDemand*s.LeadTimeDays*1.5), 1)
rec := ReorderRecommendation{
StockItemKey: s.Key,
StockItemName: s.Name,
UnitPrice: round2(s.UnitPrice),
CurrentStock: round2(s.CurrentStock),
AvgDailyDemand: round3(avgDailyDemand),
RecommendedReorderQty: int(reorderQty),
Urgency: urgency(daysUntilStockout),
}
if !math.IsInf(daysUntilStockout, 0) {
d := round2(daysUntilStockout)
rec.DaysUntilStockout = &d
}
result = append(result, rec)
}
sort.Slice(result, func(i, j int) bool {
di := math.Inf(1)
if result[i].DaysUntilStockout != nil {
di = *result[i].DaysUntilStockout
}
dj := math.Inf(1)
if result[j].DaysUntilStockout != nil {
dj = *result[j].DaysUntilStockout
}
return di < dj
})
span.SetAttributes(attribute.Int("item_count", len(result)))
return result, nil
}
// ---------------------------------------------------------------------------
// Supplier scores
// ---------------------------------------------------------------------------
func WWIGetSupplierScores(ctx context.Context, db *sql.DB, topN int) ([]SupplierScore, error) {
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.supplier_scores",
trace.WithAttributes(attribute.Int("top_n", topN)))
defer span.End()
rows, err := mssqldb.QueryFirst(ctx, db, wwiSupplierPerfQueries)
if err != nil {
return nil, err
}
defer rows.Close()
type rawSupplier struct {
Key int
Name string
Category string
TotalOrders float64
OrderedOuters float64
ReceivedOuters float64
FinalizedOrders float64
}
var raws []rawSupplier
for rows.Next() {
var r rawSupplier
var orders, ordered, received, finalized sql.NullFloat64
if err := rows.Scan(&r.Key, &r.Name, &r.Category, &orders, &ordered, &received, &finalized); err != nil {
return nil, fmt.Errorf("scan supplier_performance: %w", err)
}
r.TotalOrders = orders.Float64
r.OrderedOuters = ordered.Float64
r.ReceivedOuters = received.Float64
r.FinalizedOrders = finalized.Float64
raws = append(raws, r)
}
if err := rows.Err(); err != nil {
return nil, err
}
type scored struct {
raw rawSupplier
score float64
fill float64
final float64
}
scoreds := make([]scored, len(raws))
for i, r := range raws {
var fillRate, finalRate float64
if r.OrderedOuters > 0 {
fillRate = math.Min(r.ReceivedOuters/r.OrderedOuters*100, 100)
}
if r.TotalOrders > 0 {
finalRate = r.FinalizedOrders / r.TotalOrders * 100
}
s := 0.60*(fillRate/100) + 0.40*(finalRate/100)
scoreds[i] = scored{r, s, fillRate, finalRate}
}
sort.Slice(scoreds, func(i, j int) bool { return scoreds[i].score > scoreds[j].score })
if topN < len(scoreds) {
scoreds = scoreds[:topN]
}
result := make([]SupplierScore, len(scoreds))
for i, s := range scoreds {
result[i] = SupplierScore{
Rank: i + 1,
SupplierKey: s.raw.Key,
SupplierName: s.raw.Name,
Category: s.raw.Category,
TotalOrders: int(s.raw.TotalOrders),
FillRatePct: round2(s.fill),
FinalizationRatePct: round2(s.final),
Score: round2(s.score * 100),
}
}
return result, nil
}
// ---------------------------------------------------------------------------
// What-if scenario
// ---------------------------------------------------------------------------
func WWICreateWhatIfScenario(ctx context.Context, db *sql.DB, stockItemKey int, demandMultiplier float64) (*WhatIfResult, error) {
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.whatif_scenario",
trace.WithAttributes(
attribute.Int("stock_item_key", stockItemKey),
attribute.Float64("demand_multiplier", demandMultiplier),
))
defer span.End()
const detailQ = `SELECT
si.[Stock Item Key], si.[Stock Item], si.[Unit Price], si.[Lead Time Days],
COALESCE(SUM(CAST(m.[Quantity] AS FLOAT)), 0) AS current_stock
FROM [Dimension].[Stock Item] AS si
LEFT JOIN [Fact].[Movement] AS m ON m.[Stock Item Key] = si.[Stock Item Key]
WHERE si.[Stock Item Key] = @stock_item_key
GROUP BY si.[Stock Item Key], si.[Stock Item], si.[Unit Price], si.[Lead Time Days]`
const demandQ = `SELECT
SUM(CAST(s.[Quantity] AS FLOAT)) / NULLIF(90.0, 0) AS avg_daily_demand
FROM [Fact].[Sale] AS s
INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Delivery Date Key]
WHERE s.[Stock Item Key] = @stock_item_key
AND d.[Date] >= DATEADD(day, -90, GETDATE())`
var itemKey int
var itemName string
var unitPrice, leadTime, currentStock sql.NullFloat64
row := db.QueryRowContext(ctx, detailQ, sql.Named("stock_item_key", stockItemKey))
if err := row.Scan(&itemKey, &itemName, &unitPrice, &leadTime, &currentStock); err != nil {
if err == sql.ErrNoRows {
return nil, fmt.Errorf("stock item %d not found", stockItemKey)
}
return nil, fmt.Errorf("query stock item detail: %w", err)
}
lead := leadTime.Float64
if lead == 0 {
lead = 7
}
stock := currentStock.Float64
price := unitPrice.Float64
var baseDemand sql.NullFloat64
demRow := db.QueryRowContext(ctx, demandQ, sql.Named("stock_item_key", stockItemKey))
_ = demRow.Scan(&baseDemand)
adjustedDemand := baseDemand.Float64 * demandMultiplier
reorderQty := 0
var daysPtr *float64
var stockoutDatePtr *string
if adjustedDemand > 0 {
days := stock / adjustedDemand
d := round2(days)
daysPtr = &d
sd := time.Now().UTC().AddDate(0, 0, int(days)).Format("2006-01-02")
stockoutDatePtr = &sd
reorderQty = ceilInt(adjustedDemand * lead * 1.5)
}
return &WhatIfResult{
StockItemKey: stockItemKey,
StockItemName: itemName,
DemandMultiplier: demandMultiplier,
CurrentStock: round2(stock),
BaseAvgDailyDemand: round3(baseDemand.Float64),
AdjustedDailyDemand: round3(adjustedDemand),
ProjectedDaysUntilStockout: daysPtr,
ProjectedStockoutDate: stockoutDatePtr,
RecommendedOrderQty: reorderQty,
EstimatedReorderCost: round2(float64(reorderQty) * price),
}, nil
}
// ---------------------------------------------------------------------------
// Data quality
// ---------------------------------------------------------------------------
var wwiDQChecks = []struct {
name string
sql string
}{
{"fact_sale_rows", "SELECT COUNT_BIG(*) AS cnt FROM [Fact].[Sale]"},
{"active_suppliers", "SELECT COUNT_BIG(*) AS cnt FROM [Dimension].[Supplier] WHERE [Supplier Key] <> 0"},
{"stock_item_count", "SELECT COUNT_BIG(*) AS cnt FROM [Dimension].[Stock Item] WHERE [Stock Item Key] <> 0"},
{"stock_holdings", "SELECT COUNT(*) AS cnt FROM [Warehouse].[StockItemHoldings]"},
{"latest_sale_date", "SELECT MAX(d.[Date]) AS val FROM [Fact].[Sale] AS s INNER JOIN [Dimension].[Date] AS d ON d.[Date Key] = s.[Invoice Date Key]"},
}
func WWIRunDataQualityCheck(ctx context.Context, db *sql.DB) (*DataQualityResult, error) {
ctx, span := wwiTracer.Start(ctx, "wwi.analytics.data_quality")
defer span.End()
result := &DataQualityResult{
Checks: make(map[string]string),
FailedChecks: []string{},
}
for _, check := range wwiDQChecks {
row := db.QueryRowContext(ctx, check.sql)
var val sql.NullString
if err := row.Scan(&val); err != nil {
result.Checks[check.name] = fmt.Sprintf("ERROR: %v", err)
result.FailedChecks = append(result.FailedChecks, check.name)
continue
}
v := "NULL"
if val.Valid {
v = val.String
}
result.Checks[check.name] = v
if (v == "NULL" || v == "0") && check.name == "fact_sale_rows" {
result.FailedChecks = append(result.FailedChecks, check.name)
}
}
if len(result.FailedChecks) > 0 {
result.Status = "fail"
} else {
result.Status = "pass"
}
return result, nil
}

View File

@@ -0,0 +1,54 @@
package config
import (
"fmt"
"os"
"strconv"
)
type Config struct {
Port int
AWConnStr string
WWIConnStr string
PostgresDSN string
OtelCollectorEndpoint string
OtelServiceName string
OtelServiceNamespace string
DefaultTopN int
ForecastHorizonDays int
DefaultHistoryDays int
}
func Load() Config {
port, _ := strconv.Atoi(getEnv("PORT", "8080"))
topN, _ := strconv.Atoi(getEnv("DEFAULT_TOP_N", "10"))
forecastDays, _ := strconv.Atoi(getEnv("FORECAST_HORIZON_DAYS", "30"))
historyDays, _ := strconv.Atoi(getEnv("DEFAULT_HISTORY_DAYS", "365"))
return Config{
Port: port,
AWConnStr: mustEnv("AW_MSSQL_DSN"),
WWIConnStr: mustEnv("WWI_MSSQL_DSN"),
PostgresDSN: mustEnv("POSTGRES_DSN"),
OtelCollectorEndpoint: getEnv("OTEL_COLLECTOR_ENDPOINT", "http://localhost:4318"),
OtelServiceName: getEnv("OTEL_SERVICE_NAME", "otel-bi-analytics"),
OtelServiceNamespace: getEnv("OTEL_SERVICE_NAMESPACE", "final-thesis"),
DefaultTopN: topN,
ForecastHorizonDays: forecastDays,
DefaultHistoryDays: historyDays,
}
}
func getEnv(key, fallback string) string {
if v, ok := os.LookupEnv(key); ok && v != "" {
return v
}
return fallback
}
func mustEnv(key string) string {
v := os.Getenv(key)
if v == "" {
panic(fmt.Sprintf("required environment variable %s is not set", key))
}
return v
}

View File

@@ -0,0 +1,41 @@
package db
import (
"context"
"database/sql"
"fmt"
"log/slog"
_ "github.com/microsoft/go-mssqldb"
)
// Open creates an MSSQL connection pool and validates connectivity.
func Open(ctx context.Context, dsn, name string) (*sql.DB, error) {
pool, err := sql.Open("sqlserver", dsn)
if err != nil {
return nil, fmt.Errorf("open %s: %w", name, err)
}
pool.SetMaxOpenConns(15)
pool.SetMaxIdleConns(5)
if err := pool.PingContext(ctx); err != nil {
return nil, fmt.Errorf("ping %s: %w", name, err)
}
slog.Info("mssql connected", "db", name)
return pool, nil
}
// QueryFirst runs each SQL query in order, returning rows from the first one
// that succeeds. Used for schema-fallback queries.
func QueryFirst(ctx context.Context, pool *sql.DB, queries []string) (*sql.Rows, error) {
var lastErr error
for _, q := range queries {
rows, err := pool.QueryContext(ctx, q)
if err != nil {
slog.Warn("query variant failed, trying next", "err", err)
lastErr = err
continue
}
return rows, nil
}
return nil, fmt.Errorf("all query variants failed: %w", lastErr)
}

View File

@@ -0,0 +1,28 @@
package db
import (
"context"
"fmt"
"log/slog"
"github.com/jackc/pgx/v5/pgxpool"
)
// OpenPostgres creates a pgx connection pool and validates connectivity.
func OpenPostgres(ctx context.Context, dsn string) (*pgxpool.Pool, error) {
cfg, err := pgxpool.ParseConfig(dsn)
if err != nil {
return nil, fmt.Errorf("parse postgres DSN: %w", err)
}
pool, err := pgxpool.NewWithConfig(ctx, cfg)
if err != nil {
return nil, fmt.Errorf("create postgres pool: %w", err)
}
if err := pool.Ping(ctx); err != nil {
pool.Close()
return nil, fmt.Errorf("ping postgres: %w", err)
}
slog.Info("postgres connected", "max_conns", cfg.MaxConns)
return pool, nil
}

View File

@@ -0,0 +1,106 @@
package export
import (
"context"
"fmt"
"sort"
"github.com/xuri/excelize/v2"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var (
exportTracer = otel.Tracer("otel-bi.export")
exportMeter = otel.Meter("otel-bi.export")
exportRowsTotal, _ = exportMeter.Int64Counter(
"export.rows_total",
metric.WithDescription("Total rows exported to XLSX"),
)
exportSizeBytes, _ = exportMeter.Int64Histogram(
"export.file_size_bytes",
metric.WithDescription("XLSX file size in bytes"),
metric.WithUnit("By"),
)
)
type Column struct {
Key string
Label string
}
// ToXLSXBytes writes rows to a single-sheet Excel workbook using the given
// column spec (controls header labels and order) and returns the raw bytes.
func ToXLSXBytes(ctx context.Context, sheetName string, cols []Column, rows []map[string]any) ([]byte, error) {
ctx, span := exportTracer.Start(ctx, "export.xlsx",
trace.WithAttributes(attribute.String("sheet_name", sheetName)),
)
defer span.End()
f := excelize.NewFile()
defer f.Close()
sheet := f.GetSheetName(0)
if err := f.SetSheetName(sheet, sheetName); err != nil {
return nil, err
}
// Header row
for col, c := range cols {
cell, _ := excelize.CoordinatesToCellName(col+1, 1)
if err := f.SetCellValue(sheetName, cell, c.Label); err != nil {
return nil, err
}
}
// Data rows
for rowIdx, row := range rows {
for colIdx, c := range cols {
cell, _ := excelize.CoordinatesToCellName(colIdx+1, rowIdx+2)
_ = f.SetCellValue(sheetName, cell, fmtCell(row[c.Key]))
}
}
buf, err := f.WriteToBuffer()
if err != nil {
return nil, err
}
b := buf.Bytes()
span.SetAttributes(
attribute.Int("row_count", len(rows)),
attribute.Int("file_size_bytes", len(b)),
)
exportRowsTotal.Add(ctx, int64(len(rows)), metric.WithAttributes(attribute.String("sheet", sheetName)))
exportSizeBytes.Record(ctx, int64(len(b)), metric.WithAttributes(attribute.String("sheet", sheetName)))
return b, nil
}
// GenericXLSX converts a slice of maps to XLSX with alphabetically-sorted headers.
// Use ToXLSXBytes when column order matters.
func GenericXLSX(ctx context.Context, sheetName string, rows []map[string]any) ([]byte, error) {
if len(rows) == 0 {
return ToXLSXBytes(ctx, sheetName, nil, nil)
}
keys := make([]string, 0, len(rows[0]))
for k := range rows[0] {
keys = append(keys, k)
}
sort.Strings(keys)
cols := make([]Column, len(keys))
for i, k := range keys {
cols[i] = Column{Key: k, Label: k}
}
return ToXLSXBytes(ctx, sheetName, cols, rows)
}
func fmtCell(v any) string {
if v == nil {
return ""
}
return fmt.Sprintf("%v", v)
}

View File

@@ -0,0 +1,447 @@
package handler
import (
"database/sql"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"strconv"
"github.com/jackc/pgx/v5/pgxpool"
"otel-bi-analytics/internal/analytics"
"otel-bi-analytics/internal/export"
"otel-bi-analytics/internal/scheduler"
)
type Handler struct {
awDB *sql.DB
wwiDB *sql.DB
pgPool *pgxpool.Pool
sched *scheduler.Scheduler
defaultTopN int
defaultForecastDays int
defaultHistoryDays int
}
func New(awDB, wwiDB *sql.DB, pgPool *pgxpool.Pool, sched *scheduler.Scheduler, topN, forecastDays, historyDays int) *Handler {
return &Handler{
awDB: awDB,
wwiDB: wwiDB,
pgPool: pgPool,
sched: sched,
defaultTopN: topN,
defaultForecastDays: forecastDays,
defaultHistoryDays: historyDays,
}
}
// RegisterRoutes wires all routes into the given mux (Go 1.22 method+path syntax).
func (h *Handler) RegisterRoutes(mux *http.ServeMux) {
mux.HandleFunc("GET /health", h.Health)
mux.HandleFunc("GET /aw/sales/kpis", h.AWKPIs)
mux.HandleFunc("GET /aw/sales/history", h.AWHistory)
mux.HandleFunc("GET /aw/sales/forecast", h.AWForecast)
mux.HandleFunc("GET /aw/reps/scores", h.AWRepScores)
mux.HandleFunc("GET /aw/products/demand", h.AWProductDemand)
mux.HandleFunc("GET /aw/anomalies", h.AWAnomalies)
mux.HandleFunc("GET /aw/data-quality", h.AWDataQuality)
mux.HandleFunc("GET /aw/export/sales-history", h.ExportAWSalesHistory)
mux.HandleFunc("GET /aw/export/sales-forecast", h.ExportAWSalesForecast)
mux.HandleFunc("GET /aw/export/rep-scores", h.ExportAWRepScores)
mux.HandleFunc("GET /aw/export/product-demand", h.ExportAWProductDemand)
mux.HandleFunc("GET /wwi/sales/kpis", h.WWIKPIs)
mux.HandleFunc("GET /wwi/stock/recommendations", h.WWIReorderRecommendations)
mux.HandleFunc("GET /wwi/suppliers/scores", h.WWISupplierScores)
mux.HandleFunc("POST /wwi/scenarios", h.WWIWhatIfScenario)
mux.HandleFunc("GET /wwi/data-quality", h.WWIDataQuality)
mux.HandleFunc("GET /wwi/export/stock-recommendations", h.ExportWWIStockRecommendations)
mux.HandleFunc("GET /wwi/export/supplier-scores", h.ExportWWISupplierScores)
mux.HandleFunc("POST /scheduler/aw/{job_name}/trigger", h.TriggerAWJob)
mux.HandleFunc("POST /scheduler/wwi/{job_name}/trigger", h.TriggerWWIJob)
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func writeJSON(w http.ResponseWriter, status int, v any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
if err := json.NewEncoder(w).Encode(v); err != nil {
slog.Error("json encode failed", "err", err)
}
}
func writeError(w http.ResponseWriter, status int, msg string) {
writeJSON(w, status, map[string]string{"error": msg})
}
func queryInt(r *http.Request, key string, defaultVal int) int {
s := r.URL.Query().Get(key)
if s == "" {
return defaultVal
}
v, err := strconv.Atoi(s)
if err != nil || v <= 0 {
return defaultVal
}
return v
}
func writeXLSX(w http.ResponseWriter, filename string, rowCount int, data []byte) {
w.Header().Set("Content-Type", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
w.Header().Set("X-Row-Count", strconv.Itoa(rowCount))
w.WriteHeader(http.StatusOK)
_, _ = w.Write(data)
}
func toMaps(v any) []map[string]any {
b, _ := json.Marshal(v)
var out []map[string]any
_ = json.Unmarshal(b, &out)
if out == nil {
out = []map[string]any{}
}
return out
}
// ---------------------------------------------------------------------------
// Analytics handlers
// ---------------------------------------------------------------------------
func (h *Handler) Health(w http.ResponseWriter, r *http.Request) {
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
}
func (h *Handler) AWKPIs(w http.ResponseWriter, r *http.Request) {
result, err := analytics.AWGetSalesKPIs(r.Context(), h.awDB)
if err != nil {
slog.Error("AWGetSalesKPIs", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) AWHistory(w http.ResponseWriter, r *http.Request) {
daysBack := queryInt(r, "days_back", h.defaultHistoryDays)
result, err := analytics.AWGetSalesHistory(r.Context(), h.awDB, daysBack)
if err != nil {
slog.Error("AWGetSalesHistory", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.DailySalesPoint{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) AWForecast(w http.ResponseWriter, r *http.Request) {
horizonDays := queryInt(r, "horizon_days", h.defaultForecastDays)
result, err := analytics.AWGetSalesForecast(r.Context(), h.awDB, horizonDays)
if err != nil {
slog.Error("AWGetSalesForecast", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.ForecastPoint{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) AWRepScores(w http.ResponseWriter, r *http.Request) {
topN := queryInt(r, "top_n", h.defaultTopN)
result, err := analytics.AWGetRepScores(r.Context(), h.awDB, topN)
if err != nil {
slog.Error("AWGetRepScores", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.RepScore{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) AWProductDemand(w http.ResponseWriter, r *http.Request) {
topN := queryInt(r, "top_n", h.defaultTopN)
result, err := analytics.AWGetProductDemand(r.Context(), h.awDB, topN)
if err != nil {
slog.Error("AWGetProductDemand", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.ProductDemand{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) AWAnomalies(w http.ResponseWriter, r *http.Request) {
result, err := analytics.AWRunAnomalyDetection(r.Context(), h.awDB)
if err != nil {
slog.Error("AWRunAnomalyDetection", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.AnomalyPoint{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) AWDataQuality(w http.ResponseWriter, r *http.Request) {
result, err := analytics.AWRunDataQualityCheck(r.Context(), h.awDB)
if err != nil {
slog.Error("AWRunDataQualityCheck", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) WWIKPIs(w http.ResponseWriter, r *http.Request) {
result, err := analytics.WWIGetSalesKPIs(r.Context(), h.wwiDB)
if err != nil {
slog.Error("WWIGetSalesKPIs", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) WWIReorderRecommendations(w http.ResponseWriter, r *http.Request) {
result, err := analytics.WWIGetReorderRecommendations(r.Context(), h.wwiDB)
if err != nil {
slog.Error("WWIGetReorderRecommendations", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.ReorderRecommendation{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) WWISupplierScores(w http.ResponseWriter, r *http.Request) {
topN := queryInt(r, "top_n", h.defaultTopN)
result, err := analytics.WWIGetSupplierScores(r.Context(), h.wwiDB, topN)
if err != nil {
slog.Error("WWIGetSupplierScores", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
if result == nil {
result = []analytics.SupplierScore{}
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) WWIWhatIfScenario(w http.ResponseWriter, r *http.Request) {
var body struct {
StockItemKey int `json:"stock_item_key"`
DemandMultiplier float64 `json:"demand_multiplier"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
writeError(w, http.StatusBadRequest, "invalid request body")
return
}
if body.StockItemKey <= 0 {
writeError(w, http.StatusBadRequest, "stock_item_key must be > 0")
return
}
if body.DemandMultiplier <= 0 {
body.DemandMultiplier = 1.0
}
result, err := analytics.WWICreateWhatIfScenario(r.Context(), h.wwiDB, body.StockItemKey, body.DemandMultiplier)
if err != nil {
slog.Error("WWICreateWhatIfScenario", "err", err)
writeError(w, http.StatusNotFound, err.Error())
return
}
writeJSON(w, http.StatusOK, result)
}
func (h *Handler) WWIDataQuality(w http.ResponseWriter, r *http.Request) {
result, err := analytics.WWIRunDataQualityCheck(r.Context(), h.wwiDB)
if err != nil {
slog.Error("WWIRunDataQualityCheck", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeJSON(w, http.StatusOK, result)
}
// ---------------------------------------------------------------------------
// Export handlers
// ---------------------------------------------------------------------------
func (h *Handler) ExportAWSalesHistory(w http.ResponseWriter, r *http.Request) {
daysBack := queryInt(r, "days_back", h.defaultHistoryDays)
data, err := analytics.AWGetSalesHistory(r.Context(), h.awDB, daysBack)
if err != nil {
slog.Error("ExportAWSalesHistory", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
cols := []export.Column{
{Key: "date", Label: "Date"},
{Key: "total_revenue", Label: "Total Revenue"},
{Key: "total_orders", Label: "Total Orders"},
{Key: "avg_order_value", Label: "Avg Order Value"},
}
b, err := export.ToXLSXBytes(r.Context(), "Sales History", cols, toMaps(data))
if err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeXLSX(w, "aw_sales_history.xlsx", len(data), b)
}
func (h *Handler) ExportAWSalesForecast(w http.ResponseWriter, r *http.Request) {
horizonDays := queryInt(r, "horizon_days", h.defaultForecastDays)
data, err := analytics.AWGetSalesForecast(r.Context(), h.awDB, horizonDays)
if err != nil {
slog.Error("ExportAWSalesForecast", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
cols := []export.Column{
{Key: "date", Label: "Date"},
{Key: "predicted_revenue", Label: "Predicted Revenue"},
{Key: "lower_bound", Label: "Lower Bound"},
{Key: "upper_bound", Label: "Upper Bound"},
}
b, err := export.ToXLSXBytes(r.Context(), "Sales Forecast", cols, toMaps(data))
if err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeXLSX(w, "aw_sales_forecast.xlsx", len(data), b)
}
func (h *Handler) ExportAWRepScores(w http.ResponseWriter, r *http.Request) {
topN := queryInt(r, "top_n", h.defaultTopN)
data, err := analytics.AWGetRepScores(r.Context(), h.awDB, topN)
if err != nil {
slog.Error("ExportAWRepScores", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
cols := []export.Column{
{Key: "rep_name", Label: "Sales Rep"},
{Key: "total_revenue", Label: "Total Revenue"},
{Key: "total_orders", Label: "Total Orders"},
{Key: "avg_order_value", Label: "Avg Order Value"},
{Key: "performance_score", Label: "Performance Score"},
}
b, err := export.ToXLSXBytes(r.Context(), "Rep Scores", cols, toMaps(data))
if err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeXLSX(w, "aw_rep_scores.xlsx", len(data), b)
}
func (h *Handler) ExportAWProductDemand(w http.ResponseWriter, r *http.Request) {
topN := queryInt(r, "top_n", h.defaultTopN)
data, err := analytics.AWGetProductDemand(r.Context(), h.awDB, topN)
if err != nil {
slog.Error("ExportAWProductDemand", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
cols := []export.Column{
{Key: "product_name", Label: "Product"},
{Key: "category", Label: "Category"},
{Key: "total_quantity", Label: "Total Quantity"},
{Key: "total_revenue", Label: "Total Revenue"},
{Key: "demand_score", Label: "Demand Score"},
}
b, err := export.ToXLSXBytes(r.Context(), "Product Demand", cols, toMaps(data))
if err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeXLSX(w, "aw_product_demand.xlsx", len(data), b)
}
func (h *Handler) ExportWWIStockRecommendations(w http.ResponseWriter, r *http.Request) {
data, err := analytics.WWIGetReorderRecommendations(r.Context(), h.wwiDB)
if err != nil {
slog.Error("ExportWWIStockRecommendations", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
cols := []export.Column{
{Key: "stock_item_name", Label: "Stock Item"},
{Key: "current_stock", Label: "Current Stock"},
{Key: "avg_daily_demand", Label: "Avg Daily Demand"},
{Key: "days_until_stockout", Label: "Days Until Stockout"},
{Key: "recommended_reorder_qty", Label: "Recommended Reorder Qty"},
{Key: "urgency", Label: "Urgency"},
}
b, err := export.ToXLSXBytes(r.Context(), "Stock Recommendations", cols, toMaps(data))
if err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeXLSX(w, "wwi_stock_recommendations.xlsx", len(data), b)
}
func (h *Handler) ExportWWISupplierScores(w http.ResponseWriter, r *http.Request) {
topN := queryInt(r, "top_n", h.defaultTopN)
data, err := analytics.WWIGetSupplierScores(r.Context(), h.wwiDB, topN)
if err != nil {
slog.Error("ExportWWISupplierScores", "err", err)
writeError(w, http.StatusInternalServerError, err.Error())
return
}
cols := []export.Column{
{Key: "supplier_name", Label: "Supplier"},
{Key: "total_orders", Label: "Total Orders"},
{Key: "on_time_delivery_rate", Label: "On-Time Delivery Rate"},
{Key: "avg_lead_time_days", Label: "Avg Lead Time (Days)"},
{Key: "performance_score", Label: "Performance Score"},
}
b, err := export.ToXLSXBytes(r.Context(), "Supplier Scores", cols, toMaps(data))
if err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeXLSX(w, "wwi_supplier_scores.xlsx", len(data), b)
}
// ---------------------------------------------------------------------------
// Scheduler trigger handlers
// ---------------------------------------------------------------------------
func (h *Handler) TriggerAWJob(w http.ResponseWriter, r *http.Request) {
jobName := r.PathValue("job_name")
if err := h.sched.TriggerAWJob(jobName); err != nil {
writeError(w, http.StatusNotFound, err.Error())
return
}
writeJSON(w, http.StatusAccepted, map[string]string{"status": "triggered", "job": jobName})
}
func (h *Handler) TriggerWWIJob(w http.ResponseWriter, r *http.Request) {
jobName := r.PathValue("job_name")
if err := h.sched.TriggerWWIJob(jobName); err != nil {
writeError(w, http.StatusNotFound, err.Error())
return
}
writeJSON(w, http.StatusAccepted, map[string]string{"status": "triggered", "job": jobName})
}

View File

@@ -0,0 +1,152 @@
package persistence
import (
"context"
"crypto/rand"
"encoding/json"
"fmt"
"log/slog"
"time"
"github.com/jackc/pgx/v5/pgxpool"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var (
persistTracer = otel.Tracer("otel-bi.persistence")
persistMeter = otel.Meter("otel-bi.persistence")
persistWritesTotal, _ = persistMeter.Int64Counter(
"persistence.writes_total",
metric.WithDescription("Total persistence write operations"),
)
)
// newUUID generates a random UUID v4.
func newUUID() string {
var b [16]byte
rand.Read(b[:]) //nolint:errcheck
b[6] = (b[6] & 0x0f) | 0x40
b[8] = (b[8] & 0x3f) | 0x80
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
b[0:4], b[4:6], b[6:8], b[8:10], b[10:16])
}
// spanContext extracts trace_id and span_id from the current span as nullable strings.
func spanContext(span trace.Span) (traceID, spanID *string) {
sctx := span.SpanContext()
if !sctx.IsValid() {
return nil, nil
}
tid := sctx.TraceID().String()
sid := sctx.SpanID().String()
return &tid, &sid
}
// mustJSON marshals v to JSON bytes, returning nil on error.
func mustJSON(v any) []byte {
b, _ := json.Marshal(v)
return b
}
// ---------------------------------------------------------------------------
// Job execution tracking
// ---------------------------------------------------------------------------
func RecordJobStart(ctx context.Context, pool *pgxpool.Pool, jobName, domain string, traceID, spanID *string) string {
ctx, span := persistTracer.Start(ctx, "persistence.record_job_start")
defer span.End()
id := newUUID()
_, err := pool.Exec(ctx,
`INSERT INTO job_executions (id, started_at, job_name, domain, status, trace_id, span_id)
VALUES ($1, NOW(), $2, $3, 'running', $4, $5)`,
id, jobName, domain, traceID, spanID,
)
if err != nil {
slog.Warn("failed to record job start", "job", jobName, "err", err)
}
return id
}
func RecordJobComplete(ctx context.Context, pool *pgxpool.Pool, jobID string, startedAt time.Time, records int) {
ctx, span := persistTracer.Start(ctx, "persistence.record_job_complete")
defer span.End()
durationMs := int64(time.Since(startedAt).Milliseconds())
_, err := pool.Exec(ctx,
`UPDATE job_executions
SET status = 'success', completed_at = NOW(), duration_ms = $2, records_processed = $3
WHERE id = $1`,
jobID, durationMs, records,
)
if err != nil {
slog.Warn("failed to record job complete", "id", jobID, "err", err)
}
}
func RecordJobFailure(ctx context.Context, pool *pgxpool.Pool, jobID string, startedAt time.Time, errMsg string) {
ctx, span := persistTracer.Start(ctx, "persistence.record_job_failure")
defer span.End()
durationMs := int64(time.Since(startedAt).Milliseconds())
if len(errMsg) > 2000 {
errMsg = errMsg[:2000]
}
_, err := pool.Exec(ctx,
`UPDATE job_executions
SET status = 'failure', completed_at = NOW(), duration_ms = $2, error_message = $3
WHERE id = $1`,
jobID, durationMs, errMsg,
)
if err != nil {
slog.Warn("failed to record job failure", "id", jobID, "err", err)
}
}
// ---------------------------------------------------------------------------
// Audit log
// ---------------------------------------------------------------------------
type AuditEntry struct {
Action string
ActorType string
ActorID string
Domain string
Service string
EntityType string
Status string
Payload any
}
func AppendAudit(ctx context.Context, pool *pgxpool.Pool, e AuditEntry) {
ctx, span := persistTracer.Start(ctx, "persistence.append_audit",
trace.WithAttributes(
attribute.String("audit.action", e.Action),
attribute.String("audit.domain", e.Domain),
),
)
defer span.End()
traceID, spanID := spanContext(span)
status := e.Status
if status == "" {
status = "success"
}
payloadJSON := mustJSON(e.Payload)
_, err := pool.Exec(ctx,
`INSERT INTO audit_log
(id, occurred_at, action, status, actor_type, actor_id, domain, service, entity_type, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7, $8, $9, $10, $11::jsonb)`,
newUUID(), e.Action, status, e.ActorType, e.ActorID,
e.Domain, e.Service, e.EntityType,
traceID, spanID, payloadJSON,
)
if err != nil {
slog.Warn("failed to append audit", "action", e.Action, "err", err)
}
}

View File

@@ -0,0 +1,140 @@
package persistence
import (
"context"
"log/slog"
"github.com/jackc/pgx/v5/pgxpool"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"otel-bi-analytics/internal/analytics"
)
func PersistForecast(ctx context.Context, pool *pgxpool.Pool, data []analytics.ForecastPoint, horizonDays int, source string) {
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_forecast",
trace.WithAttributes(
attribute.Int("horizon_days", horizonDays),
attribute.Int("point_count", len(data)),
),
)
defer span.End()
traceID, spanID := spanContext(span)
_, err := pool.Exec(ctx,
`INSERT INTO aw_sales_forecasts
(id, created_at, horizon_days, point_count, trigger_source, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7::jsonb)`,
newUUID(), horizonDays, len(data), source, traceID, spanID, mustJSON(data),
)
if err != nil {
slog.Warn("failed to persist AW forecast", "err", err)
span.RecordError(err)
return
}
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_sales_forecast")))
AppendAudit(ctx, pool, AuditEntry{
Action: "forecast.generated", ActorType: actorType(source), ActorID: source,
Domain: "aw", Service: "otel-bi-analytics", EntityType: "sales_forecast",
Payload: map[string]any{"horizon_days": horizonDays, "point_count": len(data)},
})
}
func PersistRepScores(ctx context.Context, pool *pgxpool.Pool, data []analytics.RepScore, topN int, source string) {
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_rep_scores",
trace.WithAttributes(attribute.Int("rep_count", len(data))),
)
defer span.End()
traceID, spanID := spanContext(span)
_, err := pool.Exec(ctx,
`INSERT INTO aw_rep_scores
(id, computed_at, rep_count, trigger_source, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, $3, $4, $5, $6::jsonb)`,
newUUID(), len(data), source, traceID, spanID, mustJSON(data),
)
if err != nil {
slog.Warn("failed to persist AW rep scores", "err", err)
span.RecordError(err)
return
}
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_rep_scores")))
AppendAudit(ctx, pool, AuditEntry{
Action: "scores.generated", ActorType: actorType(source), ActorID: source,
Domain: "aw", Service: "otel-bi-analytics", EntityType: "rep_scores",
Payload: map[string]any{"rep_count": len(data), "top_n": topN},
})
}
func PersistProductDemand(ctx context.Context, pool *pgxpool.Pool, data []analytics.ProductDemand, topN int, source string) {
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_product_demand",
trace.WithAttributes(attribute.Int("product_count", len(data))),
)
defer span.End()
traceID, spanID := spanContext(span)
_, err := pool.Exec(ctx,
`INSERT INTO aw_product_demand
(id, computed_at, product_count, top_n, trigger_source, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7::jsonb)`,
newUUID(), len(data), topN, source, traceID, spanID, mustJSON(data),
)
if err != nil {
slog.Warn("failed to persist AW product demand", "err", err)
span.RecordError(err)
return
}
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_product_demand")))
AppendAudit(ctx, pool, AuditEntry{
Action: "scores.generated", ActorType: actorType(source), ActorID: source,
Domain: "aw", Service: "otel-bi-analytics", EntityType: "product_demand",
Payload: map[string]any{"product_count": len(data), "top_n": topN},
})
}
func PersistAnomalyRun(ctx context.Context, pool *pgxpool.Pool, data []analytics.AnomalyPoint, source string) {
ctx, span := persistTracer.Start(ctx, "persistence.aw.persist_anomaly_run")
defer span.End()
anomalyCount := 0
for _, p := range data {
if p.IsAnomaly {
anomalyCount++
}
}
span.SetAttributes(
attribute.Int("series_points", len(data)),
attribute.Int("anomaly_count", anomalyCount),
)
traceID, spanID := spanContext(span)
_, err := pool.Exec(ctx,
`INSERT INTO aw_anomaly_runs
(id, detected_at, anomaly_count, series_days, window_days, threshold_sigma, trigger_source, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, 365, 30, 2.0, $3, $4, $5, $6::jsonb)`,
newUUID(), anomalyCount, source, traceID, spanID, mustJSON(data),
)
if err != nil {
slog.Warn("failed to persist AW anomaly run", "err", err)
span.RecordError(err)
return
}
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "aw_anomaly_run")))
AppendAudit(ctx, pool, AuditEntry{
Action: "anomaly_detection.ran", ActorType: actorType(source), ActorID: source,
Domain: "aw", Service: "otel-bi-analytics", EntityType: "anomaly_detection",
Payload: map[string]any{"series_days": 365, "window_days": 30, "anomaly_count": anomalyCount},
})
}
func actorType(source string) string {
if len(source) >= 9 && source[:9] == "scheduler" {
return "scheduler"
}
return "api"
}

View File

@@ -0,0 +1,151 @@
package persistence
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"otel-bi-analytics/internal/analytics"
)
var (
businessEventsTotal, _ = persistMeter.Int64Counter(
"wwi.business_events_generated_total",
metric.WithDescription("Business events generated from reorder data"),
)
)
func PersistReorderRecommendations(ctx context.Context, pool *pgxpool.Pool, data []analytics.ReorderRecommendation, source string) {
ctx, span := persistTracer.Start(ctx, "persistence.wwi.persist_reorder_recommendations",
trace.WithAttributes(attribute.Int("item_count", len(data))),
)
defer span.End()
traceID, spanID := spanContext(span)
_, err := pool.Exec(ctx,
`INSERT INTO wwi_reorder_recommendations
(id, created_at, item_count, trigger_source, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, $3, $4, $5, $6::jsonb)`,
newUUID(), len(data), source, traceID, spanID, mustJSON(data),
)
if err != nil {
slog.Warn("failed to persist WWI reorder recommendations", "err", err)
span.RecordError(err)
return
}
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "wwi_reorder_recommendations")))
AppendAudit(ctx, pool, AuditEntry{
Action: "recommendations.generated", ActorType: actorType(source), ActorID: source,
Domain: "wwi", Service: "otel-bi-analytics", EntityType: "reorder_recommendations",
Payload: map[string]any{"item_count": len(data)},
})
}
func PersistSupplierScores(ctx context.Context, pool *pgxpool.Pool, data []analytics.SupplierScore, topN int, source string) {
ctx, span := persistTracer.Start(ctx, "persistence.wwi.persist_supplier_scores",
trace.WithAttributes(attribute.Int("supplier_count", len(data))),
)
defer span.End()
traceID, spanID := spanContext(span)
_, err := pool.Exec(ctx,
`INSERT INTO wwi_supplier_scores
(id, computed_at, supplier_count, top_n, trigger_source, trace_id, span_id, payload)
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7::jsonb)`,
newUUID(), len(data), topN, source, traceID, spanID, mustJSON(data),
)
if err != nil {
slog.Warn("failed to persist WWI supplier scores", "err", err)
span.RecordError(err)
return
}
persistWritesTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("entity", "wwi_supplier_scores")))
AppendAudit(ctx, pool, AuditEntry{
Action: "scores.generated", ActorType: actorType(source), ActorID: source,
Domain: "wwi", Service: "otel-bi-analytics", EntityType: "supplier_scores",
Payload: map[string]any{"supplier_count": len(data), "top_n": topN},
})
}
// GenerateStockEvents writes LOW_STOCK business events for HIGH-urgency items,
// deduplicating within a 24-hour window.
func GenerateStockEvents(ctx context.Context, pool *pgxpool.Pool, items []analytics.ReorderRecommendation) error {
ctx, span := persistTracer.Start(ctx, "persistence.wwi.generate_stock_events")
defer span.End()
cutoff := time.Now().UTC().Add(-24 * time.Hour)
tx, err := pool.Begin(ctx)
if err != nil {
return fmt.Errorf("begin transaction: %w", err)
}
defer tx.Rollback(ctx) //nolint:errcheck
inserted := 0
for _, item := range items {
if item.Urgency != "HIGH" {
continue
}
entityKey := fmt.Sprintf("%d", item.StockItemKey)
var existingID string
err := tx.QueryRow(ctx,
`SELECT id FROM wwi_business_events
WHERE event_type = 'LOW_STOCK' AND entity_key = $1 AND occurred_at >= $2
LIMIT 1`,
entityKey, cutoff,
).Scan(&existingID)
if err == nil {
continue // already exists within 24h
}
if !errors.Is(err, pgx.ErrNoRows) {
slog.Warn("error checking existing business event", "err", err)
continue
}
daysStr := "immediately"
if item.DaysUntilStockout != nil {
daysStr = fmt.Sprintf("%.1f days", *item.DaysUntilStockout)
}
message := fmt.Sprintf(
"Stock for '%s' will be exhausted in %s. Current stock: %.0f units, daily demand: %.1f units.",
item.StockItemName, daysStr, item.CurrentStock, item.AvgDailyDemand,
)
traceID, spanID := spanContext(span)
details := mustJSON(map[string]any{
"current_stock": item.CurrentStock,
"avg_daily_demand": item.AvgDailyDemand,
"recommended_reorder_qty": item.RecommendedReorderQty,
})
_, err = tx.Exec(ctx,
`INSERT INTO wwi_business_events
(id, occurred_at, event_type, severity, entity_key, entity_name, message, trace_id, span_id, details)
VALUES ($1, NOW(), 'LOW_STOCK', 'HIGH', $2, $3, $4, $5, $6, $7::jsonb)`,
newUUID(), entityKey, item.StockItemName, message, traceID, spanID, details,
)
if err != nil {
slog.Warn("failed to insert business event", "item", item.StockItemKey, "err", err)
continue
}
inserted++
businessEventsTotal.Add(ctx, 1, metric.WithAttributes(attribute.String("event_type", "LOW_STOCK")))
}
if err := tx.Commit(ctx); err != nil {
return fmt.Errorf("commit stock events: %w", err)
}
span.SetAttributes(attribute.Int("events_inserted", inserted))
return nil
}

View File

@@ -0,0 +1,306 @@
package scheduler
import (
"context"
"database/sql"
"fmt"
"log/slog"
"time"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/robfig/cron/v3"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"otel-bi-analytics/internal/analytics"
"otel-bi-analytics/internal/persistence"
)
var (
schedTracer = otel.Tracer("otel-bi.scheduler")
schedMeter = otel.Meter("otel-bi.scheduler")
jobDurationSeconds, _ = schedMeter.Float64Histogram(
"scheduler.job.duration_seconds",
metric.WithDescription("Scheduler job execution duration"),
metric.WithUnit("s"),
)
jobSuccessTotal, _ = schedMeter.Int64Counter(
"scheduler.job.success_total",
metric.WithDescription("Scheduler jobs completed successfully"),
)
jobFailureTotal, _ = schedMeter.Int64Counter(
"scheduler.job.failure_total",
metric.WithDescription("Scheduler jobs that failed"),
)
jobRecordsProcessed, _ = schedMeter.Int64Counter(
"scheduler.job.records_processed_total",
metric.WithDescription("Records processed by scheduler jobs"),
)
)
// Scheduler wraps robfig/cron and owns all job implementations.
type Scheduler struct {
awDB *sql.DB
wwiDB *sql.DB
pgPool *pgxpool.Pool
topN int
cron *cron.Cron
}
func New(awDB, wwiDB *sql.DB, pgPool *pgxpool.Pool, defaultTopN int) *Scheduler {
return &Scheduler{
awDB: awDB,
wwiDB: wwiDB,
pgPool: pgPool,
topN: defaultTopN,
cron: cron.New(cron.WithLocation(time.UTC), cron.WithSeconds()),
}
}
// Start registers all jobs and starts the cron runner.
func (s *Scheduler) Start() {
s.cron.AddFunc("0 0 2 * * *", s.jobAWForecast)
s.cron.AddFunc("0 30 2 * * *", s.jobAWScores)
s.cron.AddFunc("0 0 3 * * *", s.jobAWDataQuality)
s.cron.AddFunc("0 30 3 * * *", s.jobAWAnomalyDetection)
s.cron.AddFunc("0 0 * * * *", s.jobWWIReorder)
s.cron.AddFunc("0 30 3 * * *", s.jobWWISupplierScores)
s.cron.AddFunc("0 30 * * * *", s.jobWWIEvents)
s.cron.AddFunc("0 0 4 * * *", s.jobWWIDataQuality)
s.cron.Start()
slog.Info("scheduler started", "jobs", len(s.cron.Entries()))
}
// Stop gracefully stops the cron runner.
func (s *Scheduler) Stop() {
ctx := s.cron.Stop()
<-ctx.Done()
}
// TriggerAWJob runs an AW job immediately in a goroutine.
func (s *Scheduler) TriggerAWJob(jobName string) error {
fns := map[string]func(){
"forecast": s.jobAWForecast,
"scores": s.jobAWScores,
"data_quality": s.jobAWDataQuality,
"anomaly_detection": s.jobAWAnomalyDetection,
}
fn, ok := fns[jobName]
if !ok {
return fmt.Errorf("unknown AW job: %s", jobName)
}
go fn()
return nil
}
// TriggerWWIJob runs a WWI job immediately in a goroutine.
func (s *Scheduler) TriggerWWIJob(jobName string) error {
fns := map[string]func(){
"reorder": s.jobWWIReorder,
"supplier_scores": s.jobWWISupplierScores,
"events": s.jobWWIEvents,
"data_quality": s.jobWWIDataQuality,
}
fn, ok := fns[jobName]
if !ok {
return fmt.Errorf("unknown WWI job: %s", jobName)
}
go fn()
return nil
}
// ---------------------------------------------------------------------------
// runJob wraps a job function with OTel tracing, metrics, and audit logging.
// ---------------------------------------------------------------------------
func (s *Scheduler) runJob(jobName, domain string, fn func(ctx context.Context) (int, error)) {
ctx := context.Background()
ctx, span := schedTracer.Start(ctx,
"scheduler."+jobName,
trace.WithNewRoot(),
trace.WithSpanKind(trace.SpanKindInternal),
trace.WithAttributes(
attribute.String("job.name", jobName),
attribute.String("job.domain", domain),
),
)
defer span.End()
traceID, spanID := spanCtx(span)
jobID := persistence.RecordJobStart(ctx, s.pgPool, jobName, domain, traceID, spanID)
startedAt := time.Now()
slog.Info("job started", "job", jobName)
records, err := fn(ctx)
duration := time.Since(startedAt).Seconds()
attrs := metric.WithAttributes(
attribute.String("job.name", jobName),
attribute.String("job.domain", domain),
)
jobDurationSeconds.Record(ctx, duration, attrs)
if err != nil {
slog.Error("job failed", "job", jobName, "err", err, "duration_s", duration)
span.RecordError(err)
span.SetAttributes(attribute.String("job.status", "failure"))
persistence.RecordJobFailure(ctx, s.pgPool, jobID, startedAt, err.Error())
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
Action: "job.failed", ActorType: "scheduler", ActorID: jobName,
Domain: domain, Service: "otel-bi-analytics", Status: "failure",
Payload: map[string]any{"job_name": jobName, "error": err.Error()},
})
jobFailureTotal.Add(ctx, 1, attrs)
return
}
slog.Info("job completed", "job", jobName, "records", records, "duration_s", duration)
span.SetAttributes(
attribute.String("job.status", "success"),
attribute.Int("job.records_processed", records),
)
persistence.RecordJobComplete(ctx, s.pgPool, jobID, startedAt, records)
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
Action: "job.completed", ActorType: "scheduler", ActorID: jobName,
Domain: domain, Service: "otel-bi-analytics",
Payload: map[string]any{"job_name": jobName, "records_processed": records},
})
jobSuccessTotal.Add(ctx, 1, attrs)
jobRecordsProcessed.Add(ctx, int64(records), attrs)
}
func spanCtx(span trace.Span) (traceID, spanID *string) {
sctx := span.SpanContext()
if !sctx.IsValid() {
return nil, nil
}
tid := sctx.TraceID().String()
sid := sctx.SpanID().String()
return &tid, &sid
}
// ---------------------------------------------------------------------------
// AW jobs
// ---------------------------------------------------------------------------
func (s *Scheduler) jobAWForecast() {
s.runJob("aw.daily.forecast", "aw", func(ctx context.Context) (int, error) {
data, err := analytics.AWGetSalesForecast(ctx, s.awDB, 30)
if err != nil {
return 0, err
}
persistence.PersistForecast(ctx, s.pgPool, data, 30, "scheduler.aw.daily.forecast")
return len(data), nil
})
}
func (s *Scheduler) jobAWScores() {
s.runJob("aw.daily.scores", "aw", func(ctx context.Context) (int, error) {
reps, err := analytics.AWGetRepScores(ctx, s.awDB, s.topN)
if err != nil {
return 0, err
}
products, err := analytics.AWGetProductDemand(ctx, s.awDB, s.topN)
if err != nil {
return 0, err
}
persistence.PersistRepScores(ctx, s.pgPool, reps, s.topN, "scheduler.aw.daily.scores")
persistence.PersistProductDemand(ctx, s.pgPool, products, s.topN, "scheduler.aw.daily.scores")
return len(reps) + len(products), nil
})
}
func (s *Scheduler) jobAWDataQuality() {
s.runJob("aw.daily.data_quality", "aw", func(ctx context.Context) (int, error) {
report, err := analytics.AWRunDataQualityCheck(ctx, s.awDB)
if err != nil {
return 0, err
}
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
Action: "job.completed", ActorType: "scheduler", ActorID: "aw.daily.data_quality",
Domain: "aw", Service: "otel-bi-analytics", EntityType: "data_quality",
Status: report.Status,
Payload: map[string]any{"status": report.Status, "failed_checks": report.FailedChecks},
})
return len(report.Checks), nil
})
}
func (s *Scheduler) jobAWAnomalyDetection() {
s.runJob("aw.daily.anomaly_detection", "aw", func(ctx context.Context) (int, error) {
data, err := analytics.AWRunAnomalyDetection(ctx, s.awDB)
if err != nil {
return 0, err
}
persistence.PersistAnomalyRun(ctx, s.pgPool, data, "scheduler.aw.daily.anomaly_detection")
return len(data), nil
})
}
// ---------------------------------------------------------------------------
// WWI jobs
// ---------------------------------------------------------------------------
func (s *Scheduler) jobWWIReorder() {
s.runJob("wwi.hourly.reorder", "wwi", func(ctx context.Context) (int, error) {
data, err := analytics.WWIGetReorderRecommendations(ctx, s.wwiDB)
if err != nil {
return 0, err
}
persistence.PersistReorderRecommendations(ctx, s.pgPool, data, "scheduler.wwi.hourly.reorder")
if err := persistence.GenerateStockEvents(ctx, s.pgPool, data); err != nil {
slog.Warn("generate_stock_events failed", "err", err)
}
return len(data), nil
})
}
func (s *Scheduler) jobWWISupplierScores() {
s.runJob("wwi.daily.supplier_scores", "wwi", func(ctx context.Context) (int, error) {
data, err := analytics.WWIGetSupplierScores(ctx, s.wwiDB, s.topN)
if err != nil {
return 0, err
}
persistence.PersistSupplierScores(ctx, s.pgPool, data, s.topN, "scheduler.wwi.daily.supplier_scores")
return len(data), nil
})
}
func (s *Scheduler) jobWWIEvents() {
s.runJob("wwi.hourly.events", "wwi", func(ctx context.Context) (int, error) {
data, err := analytics.WWIGetReorderRecommendations(ctx, s.wwiDB)
if err != nil {
return 0, err
}
var highUrgency []analytics.ReorderRecommendation
for _, item := range data {
if item.Urgency == "HIGH" {
highUrgency = append(highUrgency, item)
}
}
if err := persistence.GenerateStockEvents(ctx, s.pgPool, highUrgency); err != nil {
slog.Warn("generate_stock_events (events job) failed", "err", err)
}
return len(highUrgency), nil
})
}
func (s *Scheduler) jobWWIDataQuality() {
s.runJob("wwi.daily.data_quality", "wwi", func(ctx context.Context) (int, error) {
report, err := analytics.WWIRunDataQualityCheck(ctx, s.wwiDB)
if err != nil {
return 0, err
}
persistence.AppendAudit(ctx, s.pgPool, persistence.AuditEntry{
Action: "job.completed", ActorType: "scheduler", ActorID: "wwi.daily.data_quality",
Domain: "wwi", Service: "otel-bi-analytics", EntityType: "data_quality",
Status: report.Status,
Payload: map[string]any{"status": report.Status, "failed_checks": report.FailedChecks},
})
return len(report.Checks), nil
})
}