282 lines
10 KiB
Python
282 lines
10 KiB
Python
from __future__ import annotations
|
|
|
|
import logging
|
|
from time import perf_counter
|
|
|
|
from opentelemetry import metrics, trace
|
|
from sqlalchemy import desc, select
|
|
from sqlalchemy.exc import SQLAlchemyError
|
|
from sqlalchemy.orm import Session, sessionmaker
|
|
|
|
from app.db.postgres_models import AuditLog, ForecastRun, RankingRun, RecommendationRun
|
|
|
|
LOGGER = logging.getLogger(__name__)
|
|
|
|
|
|
class PersistenceService:
|
|
def __init__(self, session_factory: sessionmaker[Session]) -> None:
|
|
self.session_factory = session_factory
|
|
self.tracer = trace.get_tracer(__name__)
|
|
self.meter = metrics.get_meter(__name__)
|
|
self.write_counter = self.meter.create_counter(
|
|
name="postgres_persist_writes_total",
|
|
description="Total writes to app persistence PostgreSQL",
|
|
)
|
|
self.write_latency = self.meter.create_histogram(
|
|
name="postgres_persist_write_latency_ms",
|
|
unit="ms",
|
|
description="Latency of app persistence write operations",
|
|
)
|
|
|
|
@staticmethod
|
|
def _to_audit_dict(row: AuditLog) -> dict:
|
|
return {
|
|
"id": row.id,
|
|
"created_at": row.created_at.isoformat(),
|
|
"method": row.method,
|
|
"path": row.path,
|
|
"query_string": row.query_string,
|
|
"status_code": row.status_code,
|
|
"duration_ms": row.duration_ms,
|
|
"trace_id": row.trace_id,
|
|
"span_id": row.span_id,
|
|
"client_ip": row.client_ip,
|
|
"user_agent": row.user_agent,
|
|
"details": row.details,
|
|
}
|
|
|
|
@staticmethod
|
|
def _to_forecast_dict(row: ForecastRun) -> dict:
|
|
return {
|
|
"id": row.id,
|
|
"created_at": row.created_at.isoformat(),
|
|
"horizon_days": row.horizon_days,
|
|
"point_count": row.point_count,
|
|
"trigger_source": row.trigger_source,
|
|
"trace_id": row.trace_id,
|
|
"span_id": row.span_id,
|
|
"payload": row.payload,
|
|
}
|
|
|
|
@staticmethod
|
|
def _to_ranking_dict(row: RankingRun) -> dict:
|
|
return {
|
|
"id": row.id,
|
|
"created_at": row.created_at.isoformat(),
|
|
"top_n": row.top_n,
|
|
"item_count": row.item_count,
|
|
"trigger_source": row.trigger_source,
|
|
"trace_id": row.trace_id,
|
|
"span_id": row.span_id,
|
|
"payload": row.payload,
|
|
}
|
|
|
|
@staticmethod
|
|
def _to_recommendation_dict(row: RecommendationRun) -> dict:
|
|
return {
|
|
"id": row.id,
|
|
"created_at": row.created_at.isoformat(),
|
|
"item_count": row.item_count,
|
|
"trigger_source": row.trigger_source,
|
|
"trace_id": row.trace_id,
|
|
"span_id": row.span_id,
|
|
"payload": row.payload,
|
|
}
|
|
|
|
def record_audit_log(
|
|
self,
|
|
*,
|
|
method: str,
|
|
path: str,
|
|
query_string: str,
|
|
status_code: int,
|
|
duration_ms: float,
|
|
trace_id: str | None,
|
|
span_id: str | None,
|
|
client_ip: str | None,
|
|
user_agent: str | None,
|
|
details: dict | None = None,
|
|
) -> None:
|
|
started = perf_counter()
|
|
with self.tracer.start_as_current_span("persist.audit_log"):
|
|
try:
|
|
with self.session_factory() as session:
|
|
session.add(
|
|
AuditLog(
|
|
method=method,
|
|
path=path,
|
|
query_string=query_string[:1000],
|
|
status_code=status_code,
|
|
duration_ms=duration_ms,
|
|
trace_id=trace_id,
|
|
span_id=span_id,
|
|
client_ip=client_ip,
|
|
user_agent=user_agent,
|
|
details=details or {},
|
|
)
|
|
)
|
|
session.commit()
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "audit", "status": "ok"}
|
|
)
|
|
except SQLAlchemyError as exc:
|
|
LOGGER.exception("Failed to persist audit log: %s", exc)
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "audit", "status": "error"}
|
|
)
|
|
finally:
|
|
self.write_latency.record(
|
|
(perf_counter() - started) * 1000,
|
|
attributes={"entity": "audit"},
|
|
)
|
|
|
|
def record_forecast_run(
|
|
self,
|
|
*,
|
|
horizon_days: int,
|
|
payload: list[dict],
|
|
trigger_source: str,
|
|
trace_id: str | None,
|
|
span_id: str | None,
|
|
) -> None:
|
|
started = perf_counter()
|
|
with self.tracer.start_as_current_span("persist.forecast_run"):
|
|
try:
|
|
with self.session_factory() as session:
|
|
session.add(
|
|
ForecastRun(
|
|
horizon_days=horizon_days,
|
|
point_count=len(payload),
|
|
trigger_source=trigger_source,
|
|
trace_id=trace_id,
|
|
span_id=span_id,
|
|
payload=payload,
|
|
)
|
|
)
|
|
session.commit()
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "forecast", "status": "ok"}
|
|
)
|
|
except SQLAlchemyError as exc:
|
|
LOGGER.exception("Failed to persist forecast run: %s", exc)
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "forecast", "status": "error"}
|
|
)
|
|
finally:
|
|
self.write_latency.record(
|
|
(perf_counter() - started) * 1000,
|
|
attributes={"entity": "forecast"},
|
|
)
|
|
|
|
def record_ranking_run(
|
|
self,
|
|
*,
|
|
top_n: int,
|
|
payload: list[dict],
|
|
trigger_source: str,
|
|
trace_id: str | None,
|
|
span_id: str | None,
|
|
) -> None:
|
|
started = perf_counter()
|
|
with self.tracer.start_as_current_span("persist.ranking_run"):
|
|
try:
|
|
with self.session_factory() as session:
|
|
session.add(
|
|
RankingRun(
|
|
top_n=top_n,
|
|
item_count=len(payload),
|
|
trigger_source=trigger_source,
|
|
trace_id=trace_id,
|
|
span_id=span_id,
|
|
payload=payload,
|
|
)
|
|
)
|
|
session.commit()
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "ranking", "status": "ok"}
|
|
)
|
|
except SQLAlchemyError as exc:
|
|
LOGGER.exception("Failed to persist ranking run: %s", exc)
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "ranking", "status": "error"}
|
|
)
|
|
finally:
|
|
self.write_latency.record(
|
|
(perf_counter() - started) * 1000,
|
|
attributes={"entity": "ranking"},
|
|
)
|
|
|
|
def record_recommendation_run(
|
|
self,
|
|
*,
|
|
payload: list[dict],
|
|
trigger_source: str,
|
|
trace_id: str | None,
|
|
span_id: str | None,
|
|
) -> None:
|
|
started = perf_counter()
|
|
with self.tracer.start_as_current_span("persist.recommendation_run"):
|
|
try:
|
|
with self.session_factory() as session:
|
|
session.add(
|
|
RecommendationRun(
|
|
item_count=len(payload),
|
|
trigger_source=trigger_source,
|
|
trace_id=trace_id,
|
|
span_id=span_id,
|
|
payload=payload,
|
|
)
|
|
)
|
|
session.commit()
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "recommendation", "status": "ok"}
|
|
)
|
|
except SQLAlchemyError as exc:
|
|
LOGGER.exception("Failed to persist recommendation run: %s", exc)
|
|
self.write_counter.add(
|
|
1, attributes={"entity": "recommendation", "status": "error"}
|
|
)
|
|
finally:
|
|
self.write_latency.record(
|
|
(perf_counter() - started) * 1000,
|
|
attributes={"entity": "recommendation"},
|
|
)
|
|
|
|
def list_audit_logs(self, limit: int) -> list[dict]:
|
|
with self.tracer.start_as_current_span("persist.list_audit_logs"):
|
|
with self.session_factory() as session:
|
|
rows = session.execute(
|
|
select(AuditLog).order_by(desc(AuditLog.created_at)).limit(limit)
|
|
).scalars()
|
|
return [self._to_audit_dict(row) for row in rows]
|
|
|
|
def list_forecast_runs(self, limit: int) -> list[dict]:
|
|
with self.tracer.start_as_current_span("persist.list_forecast_runs"):
|
|
with self.session_factory() as session:
|
|
rows = session.execute(
|
|
select(ForecastRun)
|
|
.order_by(desc(ForecastRun.created_at))
|
|
.limit(limit)
|
|
).scalars()
|
|
return [self._to_forecast_dict(row) for row in rows]
|
|
|
|
def list_ranking_runs(self, limit: int) -> list[dict]:
|
|
with self.tracer.start_as_current_span("persist.list_ranking_runs"):
|
|
with self.session_factory() as session:
|
|
rows = session.execute(
|
|
select(RankingRun)
|
|
.order_by(desc(RankingRun.created_at))
|
|
.limit(limit)
|
|
).scalars()
|
|
return [self._to_ranking_dict(row) for row in rows]
|
|
|
|
def list_recommendation_runs(self, limit: int) -> list[dict]:
|
|
with self.tracer.start_as_current_span("persist.list_recommendation_runs"):
|
|
with self.session_factory() as session:
|
|
rows = session.execute(
|
|
select(RecommendationRun)
|
|
.order_by(desc(RecommendationRun.created_at))
|
|
.limit(limit)
|
|
).scalars()
|
|
return [self._to_recommendation_dict(row) for row in rows]
|