feat: 累积功能变更 — 聊天集成、租户管理、小程序更新、ETL 增强、迁移脚本

包含多个会话的累积代码变更:
- backend: AI 聊天服务、触发器调度、认证增强、WebSocket、调度器最小间隔
- admin-web: ETL 状态页、任务管理、调度配置、登录优化
- miniprogram: 看板页面、聊天集成、UI 组件、导航更新
- etl: DWS 新任务(finance_area_daily/board_cache)、连接器增强
- tenant-admin: 项目初始化
- db: 19 个迁移脚本(etl_feiqiu 11 + zqyy_app 8)
- packages/shared: 枚举和工具函数更新
- tools: 数据库工具、报表生成、健康检查
- docs: PRD/架构/部署/合约文档更新

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Neo
2026-04-06 00:03:48 +08:00
parent 70324d8542
commit 6f8f12314f
515 changed files with 76604 additions and 7456 deletions

View File

@@ -0,0 +1 @@
# AI 监控后台服务层

View File

@@ -0,0 +1,721 @@
"""AI 监控后台聚合服务层。
提供 Dashboard 总览、调度任务管理、调用记录查询、缓存失效、
Token 预算、批量执行(含成本二次确认)、告警管理等功能。
所有数据库操作使用 psycopg2 同步连接,方法签名为 asyncFastAPI 兼容)。
查询强制 site_id 隔离(当 site_id 参数不为 None 时)。
"""
from __future__ import annotations
import asyncio
import logging
import uuid
from datetime import datetime, timezone, timedelta
from app.ai.budget_tracker import BudgetTracker
from app.database import get_connection
logger = logging.getLogger(__name__)
# 批量执行预估:每次调用平均 Token 消耗
AVG_TOKENS_PER_CALL = 2000
# 批量执行内存存储 TTL
_BATCH_TTL_SECONDS = 600 # 10 分钟
class AdminAIService:
"""AI 监控后台聚合服务。"""
def __init__(self, budget_tracker: BudgetTracker | None = None) -> None:
self._budget = budget_tracker
self._batch_store: dict[str, dict] = {} # batch_id → {params, expires_at}
# ── Dashboard ─────────────────────────────────────────
async def get_dashboard(self, site_id: int | None = None) -> dict:
"""聚合所有 Dashboard 数据。"""
today_stats = await self._get_today_stats(site_id)
trend_7d = await self._get_7d_trend(site_id)
app_dist = await self._get_app_distribution(site_id)
app_health = await self._get_app_health(site_id)
budget = await self.get_budget()
recent_alerts = await self._get_recent_alerts(site_id)
return {
**today_stats,
"trend_7d": trend_7d,
"app_distribution": app_dist,
"budget": budget,
"recent_alerts": recent_alerts,
"app_health": app_health,
}
async def _get_today_stats(self, site_id: int | None) -> dict:
"""今日调用次数、成功率、Token 消耗、平均延迟。"""
site_clause, params = _site_filter(site_id)
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"""
SELECT
COUNT(*) AS total_calls,
COUNT(*) FILTER (WHERE status = 'success') AS success_count,
COALESCE(SUM(tokens_used), 0) AS total_tokens,
COALESCE(AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0)
AS avg_latency
FROM biz.ai_run_logs
WHERE created_at >= CURRENT_DATE
AND created_at < CURRENT_DATE + INTERVAL '1 day'
{site_clause}
""",
params,
)
row = cur.fetchone()
conn.commit()
finally:
conn.close()
total, success, tokens, avg_lat = row if row else (0, 0, 0, 0)
rate = round(success / total, 4) if total > 0 else 0.0
return {
"today_calls": total,
"today_success_rate": rate,
"today_tokens": int(tokens),
"today_avg_latency_ms": round(float(avg_lat), 2),
}
async def _get_7d_trend(self, site_id: int | None) -> list[dict]:
"""近 7 天按日聚合。"""
site_clause, params = _site_filter(site_id)
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"""
SELECT
created_at::date AS day,
COUNT(*) AS calls,
COUNT(*) FILTER (WHERE status = 'success') AS success_count
FROM biz.ai_run_logs
WHERE created_at >= CURRENT_DATE - INTERVAL '6 days'
{site_clause}
GROUP BY day
ORDER BY day
""",
params,
)
rows = cur.fetchall()
conn.commit()
finally:
conn.close()
return [
{
"date": row[0].isoformat(),
"calls": row[1],
"success_rate": round(row[2] / row[1], 4) if row[1] > 0 else 0.0,
}
for row in rows
]
async def _get_app_distribution(self, site_id: int | None) -> list[dict]:
"""各 App 调用占比。"""
site_clause, params = _site_filter(site_id)
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"""
SELECT app_type, COUNT(*) AS cnt
FROM biz.ai_run_logs
WHERE created_at >= CURRENT_DATE - INTERVAL '6 days'
{site_clause}
GROUP BY app_type
ORDER BY cnt DESC
""",
params,
)
rows = cur.fetchall()
conn.commit()
finally:
conn.close()
total = sum(r[1] for r in rows) or 1
return [
{
"app_type": row[0],
"count": row[1],
"percentage": round(row[1] / total, 4),
}
for row in rows
]
async def _get_app_health(self, site_id: int | None) -> list[dict]:
"""各 App 最近一次调用状态。"""
site_clause, params = _site_filter(site_id)
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"""
SELECT DISTINCT ON (app_type)
app_type,
status AS last_status,
created_at AS last_call_at
FROM biz.ai_run_logs
WHERE TRUE {site_clause}
ORDER BY app_type, created_at DESC
""",
params,
)
rows = cur.fetchall()
conn.commit()
finally:
conn.close()
return [
{
"app_type": row[0],
"last_status": row[1],
"last_call_at": row[2].isoformat() if row[2] else None,
}
for row in rows
]
async def _get_recent_alerts(self, site_id: int | None, limit: int = 10) -> list[dict]:
"""最近告警事件Dashboard 用)。"""
site_clause, params = _site_filter(site_id)
params = (*params, limit)
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"""
SELECT id, app_type, status, alert_status,
error_message, created_at
FROM biz.ai_run_logs
WHERE status IN ('failed', 'timeout', 'circuit_open')
{site_clause}
ORDER BY created_at DESC
LIMIT %s
""",
params,
)
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.commit()
finally:
conn.close()
return [_row_to_dict(cols, r) for r in rows]
# ── 调度任务 ──────────────────────────────────────────
async def list_trigger_jobs(
self, filters: dict, page: int = 1, page_size: int = 20,
) -> dict:
"""分页查询 ai_trigger_jobs + 今日去重统计。"""
where_parts: list[str] = []
params: list = []
for key in ("event_type", "status", "site_id"):
if filters.get(key) is not None:
where_parts.append(f"{key} = %s")
params.append(filters[key])
if filters.get("date_from"):
where_parts.append("created_at >= %s")
params.append(filters["date_from"])
if filters.get("date_to"):
where_parts.append("created_at <= %s")
params.append(filters["date_to"])
where_sql = ("WHERE " + " AND ".join(where_parts)) if where_parts else ""
offset = (page - 1) * page_size
conn = get_connection()
try:
with conn.cursor() as cur:
# 总数
cur.execute(
f"SELECT COUNT(*) FROM biz.ai_trigger_jobs {where_sql}",
params,
)
total = cur.fetchone()[0]
# 分页数据
cur.execute(
f"""
SELECT id, event_type, member_id, status, app_chain,
is_forced, site_id, started_at, finished_at, created_at
FROM biz.ai_trigger_jobs
{where_sql}
ORDER BY created_at DESC
LIMIT %s OFFSET %s
""",
(*params, page_size, offset),
)
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
# 今日去重跳过数
cur.execute(
"""
SELECT COUNT(*)
FROM biz.ai_trigger_jobs
WHERE status = 'skipped_duplicate'
AND created_at >= CURRENT_DATE
AND created_at < CURRENT_DATE + INTERVAL '1 day'
""",
)
today_skipped = cur.fetchone()[0]
conn.commit()
finally:
conn.close()
return {
"items": [_row_to_dict(cols, r) for r in rows],
"total": total,
"page": page,
"page_size": page_size,
"today_skipped_duplicates": today_skipped,
}
async def get_trigger_job(self, job_id: int) -> dict | None:
"""单条调度任务详情。"""
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, event_type, member_id, status, app_chain,
is_forced, site_id, started_at, finished_at,
created_at, payload, error_message, connector_type
FROM biz.ai_trigger_jobs
WHERE id = %s
""",
(job_id,),
)
cols = [d[0] for d in cur.description]
row = cur.fetchone()
conn.commit()
finally:
conn.close()
if row is None:
return None
return _row_to_dict(cols, row)
async def retry_trigger_job(self, job_id: int) -> int:
"""创建新 trigger_jobis_forced=true返回新 job_id。"""
original = await self.get_trigger_job(job_id)
if original is None:
raise ValueError(f"trigger_job {job_id} 不存在")
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
INSERT INTO biz.ai_trigger_jobs
(event_type, member_id, site_id, connector_type,
payload, app_chain, is_forced, status)
VALUES (%s, %s, %s, %s, %s, %s, true, 'pending')
RETURNING id
""",
(
original["event_type"],
original.get("member_id"),
original["site_id"],
original.get("connector_type", "feiqiu"),
original.get("payload"),
original.get("app_chain"),
),
)
new_id = cur.fetchone()[0]
conn.commit()
except Exception:
conn.rollback()
raise
finally:
conn.close()
return new_id
# ── 调用记录 ──────────────────────────────────────────
async def list_run_logs(
self, filters: dict, page: int = 1, page_size: int = 20,
) -> dict:
"""分页查询 ai_run_logs。"""
where_parts: list[str] = []
params: list = []
for key in ("app_type", "status", "trigger_type", "site_id"):
if filters.get(key) is not None:
where_parts.append(f"{key} = %s")
params.append(filters[key])
if filters.get("date_from"):
where_parts.append("created_at >= %s")
params.append(filters["date_from"])
if filters.get("date_to"):
where_parts.append("created_at <= %s")
params.append(filters["date_to"])
where_sql = ("WHERE " + " AND ".join(where_parts)) if where_parts else ""
offset = (page - 1) * page_size
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"SELECT COUNT(*) FROM biz.ai_run_logs {where_sql}",
params,
)
total = cur.fetchone()[0]
cur.execute(
f"""
SELECT id, app_type, trigger_type, member_id,
tokens_used, latency_ms, status, site_id, created_at
FROM biz.ai_run_logs
{where_sql}
ORDER BY created_at DESC
LIMIT %s OFFSET %s
""",
(*params, page_size, offset),
)
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.commit()
finally:
conn.close()
return {
"items": [_row_to_dict(cols, r) for r in rows],
"total": total,
"page": page,
"page_size": page_size,
}
async def get_run_log(self, log_id: int) -> dict | None:
"""单条调用记录详情(含完整 prompt/response不脱敏"""
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, app_type, trigger_type, member_id,
tokens_used, latency_ms, status, site_id,
created_at, request_prompt, response_text,
error_message, session_id, finished_at
FROM biz.ai_run_logs
WHERE id = %s
""",
(log_id,),
)
cols = [d[0] for d in cur.description]
row = cur.fetchone()
conn.commit()
finally:
conn.close()
if row is None:
return None
return _row_to_dict(cols, row)
# ── 缓存管理 ──────────────────────────────────────────
async def invalidate_cache(
self, site_id: int, app_type: str | None = None, member_id: int | None = None,
) -> int:
"""批量缓存失效,返回受影响记录数。"""
where_parts = ["site_id = %s"]
params: list = [site_id]
if app_type is not None:
where_parts.append("cache_type = %s")
params.append(app_type)
if member_id is not None:
where_parts.append("target_id = %s")
params.append(str(member_id))
where_sql = " AND ".join(where_parts)
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"""
UPDATE biz.ai_cache
SET status = 'invalidated'
WHERE {where_sql}
AND status != 'invalidated'
""",
params,
)
affected = cur.rowcount
conn.commit()
except Exception:
conn.rollback()
raise
finally:
conn.close()
return affected
# ── Token 预算 ────────────────────────────────────────
async def get_budget(self) -> dict:
"""Token 预算使用情况。"""
if self._budget is not None:
status = self._budget.check_budget()
daily_limit = self._budget.daily_limit
monthly_limit = self._budget.monthly_limit
return {
"daily_used": status.daily_used,
"daily_limit": daily_limit,
"daily_pct": round(status.daily_used / daily_limit, 4) if daily_limit > 0 else 0.0,
"monthly_used": status.monthly_used,
"monthly_limit": monthly_limit,
"monthly_pct": round(status.monthly_used / monthly_limit, 4) if monthly_limit > 0 else 0.0,
}
# 无 BudgetTracker 时直接查询
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT
COALESCE(SUM(tokens_used) FILTER (
WHERE created_at >= CURRENT_DATE
AND created_at < CURRENT_DATE + INTERVAL '1 day'
), 0) AS daily_used,
COALESCE(SUM(tokens_used) FILTER (
WHERE created_at >= date_trunc('month', CURRENT_DATE)
AND created_at < date_trunc('month', CURRENT_DATE) + INTERVAL '1 month'
), 0) AS monthly_used
FROM biz.ai_run_logs
WHERE status = 'success'
""",
)
row = cur.fetchone()
conn.commit()
finally:
conn.close()
daily_used, monthly_used = (int(row[0]), int(row[1])) if row else (0, 0)
daily_limit = 100_000
monthly_limit = 2_000_000
return {
"daily_used": daily_used,
"daily_limit": daily_limit,
"daily_pct": round(daily_used / daily_limit, 4) if daily_limit > 0 else 0.0,
"monthly_used": monthly_used,
"monthly_limit": monthly_limit,
"monthly_pct": round(monthly_used / monthly_limit, 4) if monthly_limit > 0 else 0.0,
}
# ── 批量执行 ──────────────────────────────────────────
async def estimate_batch(
self, app_types: list[str], member_ids: list[int], site_id: int,
) -> dict:
"""生成 batch_id存入内存TTL 10min返回预估。"""
self._cleanup_expired_batches()
batch_id = uuid.uuid4().hex
estimated_calls = len(app_types) * len(member_ids)
estimated_tokens = estimated_calls * AVG_TOKENS_PER_CALL
self._batch_store[batch_id] = {
"params": {
"app_types": app_types,
"member_ids": member_ids,
"site_id": site_id,
},
"expires_at": datetime.now(timezone.utc) + timedelta(seconds=_BATCH_TTL_SECONDS),
}
return {
"batch_id": batch_id,
"estimated_calls": estimated_calls,
"estimated_tokens": estimated_tokens,
}
async def confirm_batch(self, batch_id: str) -> None:
"""取出参数,异步执行批量调用。"""
self._cleanup_expired_batches()
entry = self._batch_store.pop(batch_id, None)
if entry is None:
raise ValueError(f"batch_id 无效或已过期: {batch_id}")
params = entry["params"]
logger.info(
"批量执行确认: batch_id=%s apps=%s members=%d site_id=%s",
batch_id,
params["app_types"],
len(params["member_ids"]),
params["site_id"],
)
# 后台异步执行(具体调用链由路由层注入 dispatcher 处理)
asyncio.create_task(
self._run_batch(params["app_types"], params["member_ids"], params["site_id"])
)
async def _run_batch(
self, app_types: list[str], member_ids: list[int], site_id: int,
) -> None:
"""后台批量执行(占位实现,实际由 dispatcher 驱动)。"""
logger.info(
"批量执行开始: apps=%s members=%d site_id=%s",
app_types, len(member_ids), site_id,
)
# 实际执行逻辑在路由层通过 dispatcher.handle_trigger 驱动
# 此处仅记录日志,避免服务层直接依赖 dispatcher 实例
def _cleanup_expired_batches(self) -> None:
"""清理过期 batch。"""
now = datetime.now(timezone.utc)
expired = [
bid for bid, entry in self._batch_store.items()
if entry["expires_at"] <= now
]
for bid in expired:
del self._batch_store[bid]
if expired:
logger.debug("清理过期 batch: %d", len(expired))
# ── 告警管理 ──────────────────────────────────────────
async def list_alerts(
self,
alert_status: str | None = None,
site_id: int | None = None,
page: int = 1,
page_size: int = 20,
) -> dict:
"""告警列表ai_run_logs WHERE status IN ('failed','timeout','circuit_open')。"""
where_parts = ["status IN ('failed', 'timeout', 'circuit_open')"]
params: list = []
if alert_status is not None:
if alert_status == "pending":
# pending 包含 NULL 和 'pending'
where_parts.append("(alert_status IS NULL OR alert_status = 'pending')")
else:
where_parts.append("alert_status = %s")
params.append(alert_status)
if site_id is not None:
where_parts.append("site_id = %s")
params.append(site_id)
where_sql = "WHERE " + " AND ".join(where_parts)
offset = (page - 1) * page_size
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
f"SELECT COUNT(*) FROM biz.ai_run_logs {where_sql}",
params,
)
total = cur.fetchone()[0]
cur.execute(
f"""
SELECT id, app_type, status, alert_status,
error_message, created_at
FROM biz.ai_run_logs
{where_sql}
ORDER BY created_at DESC
LIMIT %s OFFSET %s
""",
(*params, page_size, offset),
)
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.commit()
finally:
conn.close()
return {
"items": [_row_to_dict(cols, r) for r in rows],
"total": total,
"page": page,
"page_size": page_size,
}
async def ack_alert(self, log_id: int) -> str:
"""确认告警alert_status → acknowledged。"""
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
UPDATE biz.ai_run_logs
SET alert_status = 'acknowledged'
WHERE id = %s
AND status IN ('failed', 'timeout', 'circuit_open')
""",
(log_id,),
)
conn.commit()
except Exception:
conn.rollback()
raise
finally:
conn.close()
return "acknowledged"
async def ignore_alert(self, log_id: int) -> str:
"""忽略告警alert_status → ignored。"""
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
UPDATE biz.ai_run_logs
SET alert_status = 'ignored'
WHERE id = %s
AND status IN ('failed', 'timeout', 'circuit_open')
""",
(log_id,),
)
conn.commit()
except Exception:
conn.rollback()
raise
finally:
conn.close()
return "ignored"
# ── 工具函数 ──────────────────────────────────────────────
def _site_filter(site_id: int | None) -> tuple[str, tuple]:
"""生成 site_id 过滤子句和参数。"""
if site_id is None:
return "", ()
return "AND site_id = %s", (site_id,)
def _row_to_dict(columns: list[str], row: tuple) -> dict:
"""将数据库行转换为 dict处理 datetime 序列化。"""
result = {}
for col, val in zip(columns, row):
if isinstance(val, datetime):
result[col] = val.isoformat()
else:
result[col] = val
return result

View File

@@ -0,0 +1,188 @@
# -*- coding: utf-8 -*-
"""
AI 数据清理服务。
由定时任务每日凌晨 03:00 调用,执行三步清理:
1. 删除 90 天前的 ai_run_logs
2. 删除 90 天前的 ai_trigger_jobs
3. 每个 App 类型App2~App8的 ai_cache 保留最新 20,000 条
永久保留 App1 对话记录ai_conversations + ai_messages不清理。
需求: E1.1, E1.2, E1.3, E1.4, E2.1, E2.2, E2.3
"""
from __future__ import annotations
import asyncio
import logging
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
class AICleanupService:
"""AI 数据清理服务,由定时任务调用。"""
RETENTION_DAYS = 90
CACHE_LIMIT_PER_APP = 20_000
CACHE_APP_TYPES = [
"app2_finance",
"app3_clue",
"app4_analysis",
"app5_tactics",
"app6_note_analysis",
"app7_customer_analysis",
"app8_clue_consolidated",
]
async def run_cleanup(self) -> dict:
"""执行全部清理,返回各步骤删除记录数。
单步清理失败记录错误日志,继续执行后续步骤。
"""
result: dict = {}
# 步骤 1清理 ai_run_logs
try:
result["run_logs_deleted"] = await self._cleanup_run_logs()
except Exception:
logger.exception("清理 ai_run_logs 失败")
result["run_logs_deleted"] = -1
# 步骤 2清理 ai_trigger_jobs
try:
result["trigger_jobs_deleted"] = await self._cleanup_trigger_jobs()
except Exception:
logger.exception("清理 ai_trigger_jobs 失败")
result["trigger_jobs_deleted"] = -1
# 步骤 3清理 ai_cache每个 App 类型)
try:
result["cache_deleted"] = await self._cleanup_cache()
except Exception:
logger.exception("清理 ai_cache 失败")
result["cache_deleted"] = {}
logger.info("AI 数据清理完成: %s", result)
return result
async def _cleanup_run_logs(self) -> int:
"""DELETE FROM ai_run_logs WHERE created_at < now() - 90 days。"""
from app.database import get_connection
conn = get_connection()
try:
with conn.cursor() as cur:
# 防止锁等待超时5 分钟)
cur.execute("SET statement_timeout = 300000")
cur.execute(
"""
DELETE FROM biz.ai_run_logs
WHERE created_at < NOW() - INTERVAL '%s days'
""",
(self.RETENTION_DAYS,),
)
deleted = cur.rowcount
conn.commit()
logger.info("清理 ai_run_logs: 删除 %d", deleted)
return deleted
except Exception:
conn.rollback()
raise
finally:
conn.close()
async def _cleanup_trigger_jobs(self) -> int:
"""DELETE FROM ai_trigger_jobs WHERE created_at < now() - 90 days。"""
from app.database import get_connection
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute("SET statement_timeout = 300000")
cur.execute(
"""
DELETE FROM biz.ai_trigger_jobs
WHERE created_at < NOW() - INTERVAL '%s days'
""",
(self.RETENTION_DAYS,),
)
deleted = cur.rowcount
conn.commit()
logger.info("清理 ai_trigger_jobs: 删除 %d", deleted)
return deleted
except Exception:
conn.rollback()
raise
finally:
conn.close()
async def _cleanup_cache(self) -> dict[str, int]:
"""每个 App 类型保留最新 20,000 条,删除超出部分。"""
from app.database import get_connection
result: dict[str, int] = {}
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute("SET statement_timeout = 300000")
for app_type in self.CACHE_APP_TYPES:
try:
# 子查询:找到该 app_type 第 20001 条的 created_at 作为截断点
cur.execute(
"""
DELETE FROM biz.ai_cache
WHERE app_type = %s
AND id NOT IN (
SELECT id FROM biz.ai_cache
WHERE app_type = %s
ORDER BY created_at DESC
LIMIT %s
)
""",
(app_type, app_type, self.CACHE_LIMIT_PER_APP),
)
deleted = cur.rowcount
result[app_type] = deleted
if deleted > 0:
logger.info(
"清理 ai_cache [%s]: 删除 %d",
app_type,
deleted,
)
except Exception:
logger.exception("清理 ai_cache [%s] 失败", app_type)
result[app_type] = -1
conn.rollback()
# 重新开始事务以继续后续 app_type
continue
conn.commit()
return result
except Exception:
conn.rollback()
raise
finally:
conn.close()
@trace_service(description_zh="register_cleanup_job", description_en="Register Cleanup Job")
def register_cleanup_job(scheduler) -> None: # noqa: ANN001
"""注册清理定时任务到调度器。每日 03:00 执行。
在 main.py lifespan 中调用,或通过 scheduled_tasks 表注册。
实际调度由 trigger_scheduler 的 cron 机制驱动:
- job_type: 'ai_data_cleanup'
- trigger_condition: 'cron'
- trigger_config: {"cron_expression": "0 3 * * *"}
需求: E2.1, E2.2, E2.3
"""
from app.services.trigger_scheduler import register_job
def _run_cleanup(**_kw):
"""同步包装器:在新事件循环中执行异步清理。"""
result = asyncio.run(AICleanupService().run_cleanup())
logger.info("定时清理任务完成: %s", result)
register_job("ai_data_cleanup", _run_cleanup)

View File

@@ -18,10 +18,12 @@ import logging
from fastapi import HTTPException, status
from app.database import get_connection
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@trace_service(description_zh="创建入驻申请", description_en="Create application")
async def create_application(
user_id: int,
site_code: str,
@@ -60,11 +62,21 @@ async def create_application(
detail="已有待审核的申请,请等待审核完成",
)
# 2. 查找 site_code → site_id 映射
# 2. 查找 site_code → site_id 映射(优先当前活跃编码,再查历史编码)
# CHANGE 2026-03-23 | 大小写不敏感匹配 site_codeUPPER
site_id = None
site_code_upper = site_code.upper()
cur.execute(
"SELECT site_id FROM auth.site_code_mapping WHERE site_code = %s",
(site_code,),
"""
SELECT site_id FROM biz.sites
WHERE UPPER(site_code) = %s AND is_active = true
UNION ALL
SELECT s.site_id FROM biz.site_code_history h
JOIN biz.sites s ON s.site_id = h.site_id
WHERE UPPER(h.site_code) = %s AND h.is_current = false AND s.is_active = true
LIMIT 1
""",
(site_code_upper, site_code_upper),
)
mapping_row = cur.fetchone()
if mapping_row is not None:
@@ -123,6 +135,7 @@ async def create_application(
@trace_service(description_zh="审批通过申请", description_en="Approve application")
async def approve_application(
application_id: int,
reviewer_id: int,
@@ -248,6 +261,7 @@ async def approve_application(
}
@trace_service(description_zh="驳回申请", description_en="Reject application")
async def reject_application(
application_id: int,
reviewer_id: int,
@@ -260,16 +274,18 @@ async def reject_application(
2. 检查申请状态为 pending否则 409
3. 更新 user_applications.status = 'rejected'
4. 记录 reviewer_id、review_note、reviewed_at
5. 累加 users.rejection_count达到 3 次自动禁用
返回:
更新后的申请记录 dict
更新后的申请记录 dict(含 user_disabled 标记)
"""
conn = get_connection()
user_disabled = False
try:
with conn.cursor() as cur:
# 1. 查询申请记录
# 1. 查询申请记录(含 user_id
cur.execute(
"SELECT id, status FROM auth.user_applications WHERE id = %s",
"SELECT id, user_id, status FROM auth.user_applications WHERE id = %s",
(application_id,),
)
app_row = cur.fetchone()
@@ -279,11 +295,13 @@ async def reject_application(
detail="申请不存在",
)
_, app_user_id, app_status = app_row
# 2. 检查状态为 pending
if app_row[1] != "pending":
if app_status != "pending":
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"申请当前状态为 {app_row[1]},无法审核",
detail=f"申请当前状态为 {app_status},无法审核",
)
# 3. 更新申请状态为 rejected
@@ -301,6 +319,46 @@ async def reject_application(
(reviewer_id, review_note, application_id),
)
updated_row = cur.fetchone()
# 4. 累加 rejection_count 并检查是否达到禁用阈值
cur.execute(
"""
UPDATE auth.users
SET rejection_count = rejection_count + 1,
updated_at = NOW()
WHERE id = %s
RETURNING rejection_count
""",
(app_user_id,),
)
new_count = cur.fetchone()[0]
if new_count >= 3:
# 第三次拒绝:自动禁用账号
cur.execute(
"""
UPDATE auth.users
SET status = 'disabled', updated_at = NOW()
WHERE id = %s
""",
(app_user_id,),
)
user_disabled = True
logger.warning(
"用户 %s 累计被拒绝 %d 次,已自动禁用",
app_user_id, new_count,
)
else:
# 未达阈值:回退用户状态为 rejected允许重新申请
cur.execute(
"""
UPDATE auth.users
SET status = 'rejected', updated_at = NOW()
WHERE id = %s
""",
(app_user_id,),
)
conn.commit()
finally:
conn.close()
@@ -313,9 +371,11 @@ async def reject_application(
"review_note": updated_row[4],
"created_at": updated_row[5],
"reviewed_at": updated_row[6],
"user_disabled": user_disabled,
}
@trace_service(description_zh="获取用户申请列表", description_en="Get user applications")
async def get_user_applications(user_id: int) -> list[dict]:
"""
查询用户的所有申请记录。
@@ -323,14 +383,15 @@ async def get_user_applications(user_id: int) -> list[dict]:
按创建时间倒序排列。
返回:
申请记录 dict 列表
申请记录 dict 列表(含 phone、employee_number
"""
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, site_code, applied_role_text, status,
SELECT id, site_code, applied_role_text, phone,
employee_number, status,
review_note, created_at::text, reviewed_at::text
FROM auth.user_applications
WHERE user_id = %s
@@ -347,10 +408,84 @@ async def get_user_applications(user_id: int) -> list[dict]:
"id": r[0],
"site_code": r[1],
"applied_role_text": r[2],
"status": r[3],
"review_note": r[4],
"created_at": r[5],
"reviewed_at": r[6],
"phone": r[3],
"employee_number": r[4],
"status": r[5],
"review_note": r[6],
"created_at": r[7],
"reviewed_at": r[8],
}
for r in rows
]
@trace_service(description_zh="取消申请", description_en="Cancel application")
async def cancel_application(user_id: int) -> dict:
"""
用户主动取消当前 pending 申请。
1. 查找用户的 pending 申请(无则 404
2. 更新申请 status = 'cancelled'
3. 回退用户 status 为 'new'
返回:
被取消的申请记录 dict
"""
conn = get_connection()
try:
with conn.cursor() as cur:
# 1. 查找 pending 申请
cur.execute(
"""
SELECT id FROM auth.user_applications
WHERE user_id = %s AND status = 'pending'
ORDER BY created_at DESC
LIMIT 1
""",
(user_id,),
)
row = cur.fetchone()
if row is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="没有待审核的申请",
)
application_id = row[0]
# 2. 更新申请状态为 cancelled
cur.execute(
"""
UPDATE auth.user_applications
SET status = 'cancelled'
WHERE id = %s
RETURNING id, site_code, applied_role_text, phone,
employee_number, status, created_at::text
""",
(application_id,),
)
updated_row = cur.fetchone()
# 3. 回退用户状态为 new
cur.execute(
"""
UPDATE auth.users
SET status = 'new', updated_at = NOW()
WHERE id = %s
""",
(user_id,),
)
conn.commit()
finally:
conn.close()
return {
"id": updated_row[0],
"site_code": updated_row[1],
"applied_role_text": updated_row[2],
"phone": updated_row[3],
"employee_number": updated_row[4],
"status": updated_row[5],
"created_at": updated_row[6],
}

View File

@@ -15,6 +15,8 @@ import calendar
from datetime import date, timedelta
from decimal import Decimal, ROUND_HALF_UP
from app.trace.decorators import trace_service
# ---------------------------------------------------------------------------
# 通用工具函数
@@ -34,10 +36,10 @@ def _calc_date_range(
"""
today = ref_date or date.today()
# --- 当月 ---
# --- 当月cap 到今天)---
if time_enum == "month":
start = today.replace(day=1)
end = today.replace(day=calendar.monthrange(today.year, today.month)[1])
end = today
return start, end
# --- 上月 ---
@@ -47,11 +49,11 @@ def _calc_date_range(
last_month_start = last_month_end.replace(day=1)
return last_month_start, last_month_end
# --- 本周(周一 ~ 周日---
# --- 本周(周一 ~ 今天---
if time_enum == "week":
monday = today - timedelta(days=today.weekday())
sunday = monday + timedelta(days=6)
return monday, sunday
end = today
return monday, end
# --- 上周 ---
if time_enum == "lastWeek":
@@ -60,12 +62,11 @@ def _calc_date_range(
last_monday = last_sunday - timedelta(days=6)
return last_monday, last_sunday
# --- 本季度 ---
# --- 本季度cap 到今天)---
if time_enum == "quarter":
q_start_month = (today.month - 1) // 3 * 3 + 1
start = date(today.year, q_start_month, 1)
q_end_month = q_start_month + 2
end = date(today.year, q_end_month, calendar.monthrange(today.year, q_end_month)[1])
end = today
return start, end
# --- 上季度 ---
@@ -106,18 +107,57 @@ def _month_offset(d: date, months: int) -> date:
return date(y, m, 1)
def _calc_prev_range(start_date: date, end_date: date) -> tuple[date, date]:
def _calc_prev_range(
time_enum: str, start_date: date, end_date: date
) -> tuple[date, date]:
"""
根据当期范围计算上期日期范围。
根据当期范围和周期类型计算上期同期日期范围。
上期长度等于当期长度prev_end = start_date - 1 天。
CHANGE 2026-03-28 | 环比改为同期对比:
- month: 当期 3/1~3/28 → 上期 2/1~2/28上月同日
- week: 当期 周一~周四 → 上期 上周一~上周四(上周同天数)
- quarter: 当期 1/1~3/28 → 上期 去年10/1~10/28 对应天数
- lastMonth: 2/1~2/28 → 1/1~1/28再上月同天数
- lastWeek: 上周一~上周日 → 再上周一~再上周日
- lastQuarter: 上季度完整 → 再上季度完整
- quarter3/half6: 往前推等长天数(无明确"同期"概念)
"""
period_length = (end_date - start_date).days + 1
elapsed_days = (end_date - start_date).days # 当期已过天数0-indexed
# 月度类上月1日 + 同样天数
if time_enum in ("month", "last_month", "lastMonth"):
prev_start = _month_offset(start_date, -1)
# 上月同日,但不超过上月末日
prev_end_day = min(end_date.day, calendar.monthrange(prev_start.year, prev_start.month)[1])
prev_end = prev_start.replace(day=prev_end_day)
return prev_start, prev_end
# 周度类:往前推 7 天
if time_enum in ("week", "lastWeek"):
prev_start = start_date - timedelta(days=7)
prev_end = end_date - timedelta(days=7)
return prev_start, prev_end
# 季度类:上季度首日 + 同样天数
if time_enum in ("quarter", "last_quarter", "lastQuarter"):
prev_q_start = _month_offset(start_date, -3)
prev_end = prev_q_start + timedelta(days=elapsed_days)
# 不超过上季度末日
prev_q_end_month = prev_q_start.month + 2
prev_q_end_max = date(prev_q_start.year, prev_q_end_month,
calendar.monthrange(prev_q_start.year, prev_q_end_month)[1])
if prev_end > prev_q_end_max:
prev_end = prev_q_end_max
return prev_q_start, prev_end
# 其他quarter3/half6往前推等长天数
period_length = elapsed_days + 1
prev_end = start_date - timedelta(days=1)
prev_start = prev_end - timedelta(days=period_length - 1)
return prev_start, prev_end
@trace_service(description_zh="计算对比数据", description_en="Calc Compare")
def calc_compare(current: Decimal, previous: Decimal) -> dict:
"""
统一环比计算。
@@ -178,6 +218,20 @@ _SORT_KEY_MAP = {
"task_desc": ("task_total", True),
}
# 项目标签 category_code → 前端显示文本 / CSS 类名
_SKILL_DISPLAY = {
"BILLIARD": "🎱",
"SNOOKER": "",
"MAHJONG": "🀄",
"KTV": "🎤",
}
_SKILL_CLS = {
"BILLIARD": "skill--chinese",
"SNOOKER": "skill--snooker",
"MAHJONG": "skill--mahjong",
"KTV": "skill--karaoke",
}
_SORT_DIM_MAP = {
"perf_desc": "perf", "perf_asc": "perf",
"salary_desc": "salary", "salary_asc": "salary",
@@ -190,8 +244,9 @@ _SORT_DIM_MAP = {
# ---------------------------------------------------------------------------
@trace_service("获取助教看板", "Get coach board")
async def get_coach_board(
sort: str, skill: str, time: str, site_id: int
sort: str, skill: str, time: str, page: int, page_size: int, site_id: int
) -> dict:
"""
BOARD-1助教看板。扁平返回所有维度字段。
@@ -244,7 +299,17 @@ async def get_coach_board(
# 5. 任务数据
task_map = _query_coach_tasks(conn, site_id, aid_list, start_str, end_str)
# 6. 组装扁平响应
# 6. 查询档位配置,计算距升档(仅本月/上月有意义)
tier_nodes: list[float] = []
show_perf_gap = time in ("month", "last_month")
if show_perf_gap:
try:
tiers = fdw_queries.get_performance_tiers(conn, site_id)
tier_nodes = [float(t["min_hours"]) for t in tiers] if tiers else []
except Exception as e:
logger.warning("BOARD-1 档位配置查询失败: %s", e, exc_info=True)
# 7. 组装扁平响应
items = []
for a in assistants:
aid = a["assistant_id"]
@@ -256,26 +321,52 @@ async def get_coach_board(
name = a["name"]
initial = name[0] if name else ""
perf_hours = sal.get("effective_hours", 0.0)
salary_val = sal.get("gross_salary", 0.0)
perf_hours = float(sal.get("effective_hours", 0) or 0)
salary_val = float(sal.get("gross_salary", 0) or 0)
task_recall = tasks.get("recall", 0)
task_callback = tasks.get("callback", 0)
# 折前课时:当 effective_hours != raw_hours 时显示(惩罚扣减导致的差异)
# 惩罚规则:同台 >2 助教重叠per_hour_contribution < 24 元时按比例扣减
raw_hours = float(sal.get("raw_hours", 0) or 0)
perf_hours_before = None
if abs(perf_hours - raw_hours) > 0.01:
perf_hours_before = raw_hours
# 计算距升档差距
perf_gap = None
perf_reached = False
if tier_nodes and perf_hours is not None:
# 找到下一个未达到的档位
for threshold in tier_nodes:
if perf_hours < threshold:
gap = threshold - perf_hours
perf_gap = f"距升档 {gap:.1f}h"
break
else:
perf_reached = True # 已达到最高档
items.append({
"id": aid,
"name": name,
"initial": initial,
"avatar_gradient": "",
"level": sal.get("level_name", a.get("level", "")),
"skills": [], # CHANGE 2026-03-20 | v_dim_assistant skill 列,暂返回空
# CHANGE 2026-03-29 | 从 get_all_assistants 返回的 skill 字段取项目标签
# Schema 要求 list[CoachSkillItem]{text, cls}),不是纯字符串
# text 映射为中文短名 + emojicls 映射为 CSS 类名
"skills": [
{"text": _SKILL_DISPLAY.get(s, s), "cls": _SKILL_CLS.get(s, "")}
for s in (a.get("skill") or "").split(",") if s
],
"top_customers": top_custs,
"perf_hours": perf_hours,
"perf_hours_before": None,
"perf_gap": None,
"perf_reached": False,
"perf_hours_before": perf_hours_before,
"perf_gap": perf_gap,
"perf_reached": perf_reached,
"salary": salary_val,
"salary_perf_hours": perf_hours,
"salary_perf_before": None,
"salary_perf_before": perf_hours_before,
"sv_amount": sv.get("sv_amount", 0.0),
"sv_customer_count": sv.get("sv_customer_count", 0),
"sv_consume": sv.get("sv_consume", 0.0),
@@ -292,8 +383,16 @@ async def get_coach_board(
for item in items:
item.pop("task_total", None)
# 8. 分页
total = len(items)
start = (page - 1) * page_size
items = items[start : start + page_size]
return {
"items": items,
"total": total,
"page": page,
"page_size": page_size,
"dim_type": _SORT_DIM_MAP.get(sort, "perf"),
}
finally:
@@ -318,8 +417,8 @@ def _query_coach_tasks(
cur.execute(
"""
SELECT assistant_id,
COUNT(*) FILTER (WHERE task_type = 'recall') AS recall_count,
COUNT(*) FILTER (WHERE task_type = 'callback') AS callback_count
COUNT(*) FILTER (WHERE task_type IN ('high_priority_recall', 'priority_recall')) AS recall_count,
COUNT(*) FILTER (WHERE task_type = 'relationship_building') AS callback_count
FROM biz.coach_tasks
WHERE assistant_id = ANY(%s)
AND site_id = %s
@@ -346,6 +445,106 @@ def _query_coach_tasks(
# BOARD-2 客户看板
# ---------------------------------------------------------------------------
def _batch_ideal_days(conn: Any, site_id: int, member_ids: list[int]) -> dict[int, int]:
"""批量查询客户理想到店间隔天数balance/recharge 维度头部用)。"""
from app.services.fdw_queries import _fdw_context
result: dict[int, int] = {}
try:
with _fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT member_id, COALESCE(ideal_interval_days, 0)
FROM app.v_dws_member_winback_index
WHERE member_id = ANY(%s)
""",
(member_ids,),
)
for row in cur.fetchall():
result[row[0]] = int(row[1]) if row[1] is not None else 0
except Exception:
logger.warning("_batch_ideal_days 查询失败", exc_info=True)
return result
def _batch_coach_details(conn: Any, site_id: int, member_ids: list[int]) -> dict[int, list[dict]]:
"""批量查询客户-助教服务明细loyal 维度 coachDetails 用)。每个客户前 5 个。"""
from app.services.fdw_queries import _fdw_context
result: dict[int, list[dict]] = {mid: [] for mid in member_ids}
try:
with _fdw_context(conn, site_id) as cur:
# CHANGE 2026-03-29 | coach_spend 改为从 dwd_assistant_service_log 聚合 60 天消费
cur.execute(
"""
SELECT ri.member_id,
COALESCE(da.nickname, da.real_name, '') AS name,
ri.rs_display,
ri.session_count,
ri.total_duration_minutes,
COALESCE(s60.spend_60d, 0) AS spend_60d
FROM app.v_dws_member_assistant_relation_index ri
LEFT JOIN app.v_dim_assistant da
ON ri.assistant_id = da.assistant_id AND da.scd2_is_current = 1
LEFT JOIN (
SELECT tenant_member_id, site_assistant_id,
SUM(ledger_amount) AS spend_60d
FROM app.v_dwd_assistant_service_log
WHERE is_delete = 0
AND create_time >= CURRENT_DATE - INTERVAL '60 days'
AND tenant_member_id = ANY(%s)
GROUP BY tenant_member_id, site_assistant_id
) s60 ON ri.member_id = s60.tenant_member_id
AND ri.assistant_id = s60.site_assistant_id
WHERE ri.member_id = ANY(%s)
AND (da.leave_status IS NULL OR da.leave_status = 0)
ORDER BY ri.member_id, ri.rs_display DESC
""",
(member_ids, member_ids),
)
for row in cur.fetchall():
mid = row[0]
if mid in result and len(result[mid]) < 5:
svc_count = row[3] or 0
total_mins = float(row[4]) if row[4] else 0.0
avg_dur = round(total_mins / 60 / svc_count, 1) if svc_count > 0 else 0.0
result[mid].append({
"name": row[1] or "",
"cls": "",
"heart_score": float(row[2]) if row[2] is not None else 0.0,
"avg_duration": f"{avg_dur}h",
"service_count": str(svc_count),
"coach_spend": float(row[5]) if row[5] is not None else 0.0,
"relation_idx": float(row[2]) if row[2] is not None else 0.0,
})
except Exception:
logger.warning("_batch_coach_details 查询失败", exc_info=True)
return result
def _batch_member_projects(conn: Any, site_id: int, member_ids: list[int]) -> dict[int, list[str]]:
"""批量查询客户项目标签BOARD-2 用)。通过 FDW 视图查询。"""
from app.services.fdw_queries import _fdw_context
result: dict[int, list[str]] = {mid: [] for mid in member_ids}
try:
with _fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT member_id, array_agg(DISTINCT category_code)
FROM app.v_dws_member_project_tag
WHERE member_id = ANY(%s) AND is_tagged = true
GROUP BY member_id
""",
(member_ids,),
)
for row in cur.fetchall():
mid = row[0]
codes = row[1] or []
if mid in result:
result[mid] = [c for c in codes if c]
except Exception:
logger.warning("_batch_member_projects 查询失败", exc_info=True)
return result
# 维度 → FDW 查询函数映射
_DIMENSION_QUERY_MAP = {
"recall": "get_customer_board_recall",
@@ -359,6 +558,7 @@ _DIMENSION_QUERY_MAP = {
}
@trace_service("获取客户看板", "Get customer board")
async def get_customer_board(
dimension: str, project: str, page: int, page_size: int, site_id: int
) -> dict:
@@ -388,6 +588,25 @@ async def get_customer_board(
except Exception:
logger.warning("BOARD-2 客户助教查询失败,降级为空", exc_info=True)
# 2b. 批量查询客户项目标签
member_projects: dict[int, list[str]] = {}
if member_ids:
try:
member_projects = _batch_member_projects(conn, site_id, member_ids)
except Exception:
logger.warning("BOARD-2 客户项目标签查询失败,降级为空", exc_info=True)
# 2c. balance/recharge 维度:补充 ideal_days
if dimension in ("balance", "recharge") and member_ids:
try:
ideal_map = _batch_ideal_days(conn, site_id, member_ids)
for item in items:
mid = item.get("member_id", 0)
if item.get("ideal_days") is None:
item["ideal_days"] = ideal_map.get(mid, 0)
except Exception:
logger.warning("BOARD-2 ideal_days 查询失败", exc_info=True)
# 3. 组装响应(添加基础字段 + assistants
for item in items:
mid = item.get("member_id", 0)
@@ -396,9 +615,43 @@ async def get_customer_board(
item["initial"] = name[0] if name else ""
item["avatar_cls"] = ""
item["assistants"] = assistants_map.get(mid, [])
item["projects"] = member_projects.get(mid, [])
# 3b. loyal 维度:为每个客户补充 coach_details前 5 个助教的服务明细)
if dimension == "loyal" and member_ids:
try:
coach_details_map = _batch_coach_details(conn, site_id, member_ids)
for item in items:
mid = item.get("member_id", 0)
item["coach_details"] = coach_details_map.get(mid, [])
except Exception:
logger.warning("BOARD-2 loyal coachDetails 查询失败", exc_info=True)
for item in items:
item["coach_details"] = []
# CHANGE 2026-03-28 | P5 联调修复items 是 list[dict]Pydantic CamelModel
# 不会自动转换内部 dict 的 key。手动 snake_case → camelCase。
# CHANGE 2026-03-29 | 递归处理嵌套 list[dict](如 assistants 数组)
def _to_camel(key: str) -> str:
parts = key.split("_")
return parts[0] + "".join(p.capitalize() for p in parts[1:])
def _camel_dict(d: dict) -> dict:
result = {}
for k, v in d.items():
ck = _to_camel(k)
if isinstance(v, list):
result[ck] = [_camel_dict(i) if isinstance(i, dict) else i for i in v]
elif isinstance(v, dict):
result[ck] = _camel_dict(v)
else:
result[ck] = v
return result
camel_items = [_camel_dict(item) for item in items]
return {
"items": items,
"items": camel_items,
"total": result["total"],
"page": result["page"],
"page_size": result["page_size"],
@@ -412,15 +665,26 @@ async def get_customer_board(
# ---------------------------------------------------------------------------
# CHANGE 2026-04-01 | board-finance-dws-area-refactor 9.1 | 缓存/日粒度查询路由
COMPLETED_PERIODS = {"lastMonth", "lastWeek", "lastQuarter", "quarter3", "half6"}
CURRENT_PERIODS = {"month", "week", "quarter"}
@trace_service("获取财务看板", "Get finance board")
async def get_finance_board(
time: str, area: str, compare: int, site_id: int
) -> dict:
"""
BOARD-3财务看板。6 板块独立查询、独立降级。
area≠all 时 recharge 返回 null。
compare=1 时计算上期范围并调用 calc_compare。
compare=0 时环比字段为 None序列化时排除
CHANGE 2026-04-01 | board-finance-dws-area-refactor 9.1 |
- 已完成周期先查缓存 → 未命中从日粒度表 SUM → 写缓存
- 当期周期直接从日粒度表 SUM不查缓存
- overview/revenue 改为从 dws_finance_area_daily 按 area_code 查询
- cashflow/expense/coach_analysis 不变(始终用全局数据)
- area≠all 时 recharge 返回 null
- area≠all 时 overview 覆盖逻辑保留
- compare=1 时对上期执行同样缓存/日粒度逻辑
"""
start_date, end_date = _calc_date_range(time)
start_str = str(start_date)
@@ -429,7 +693,7 @@ async def get_finance_board(
prev_start_str = None
prev_end_str = None
if compare == 1:
prev_start, prev_end = _calc_prev_range(start_date, end_date)
prev_start, prev_end = _calc_prev_range(time, start_date, end_date)
prev_start_str = str(prev_start)
prev_end_str = str(prev_end)
@@ -437,23 +701,47 @@ async def get_finance_board(
try:
# 各板块独立 try/except
overview = _build_overview(conn, site_id, start_str, end_str,
prev_start_str, prev_end_str, compare)
prev_start_str, prev_end_str, compare, area)
recharge = None
if area == "all":
recharge = _build_recharge(conn, site_id, start_str, end_str,
prev_start_str, prev_end_str, compare)
revenue = _build_revenue(conn, site_id, start_str, end_str, area)
cashflow = _build_cashflow(conn, site_id, start_str, end_str,
prev_start_str, prev_end_str, compare)
expense = _build_expense(conn, site_id, start_str, end_str,
revenue = _build_revenue(conn, site_id, start_str, end_str, area,
prev_start_str, prev_end_str, compare)
# CHANGE 2026-03-28 | 非全部区域时,用 revenue 的数据覆盖 overview 的发生额/优惠/确认收入
if area != "all" and revenue:
overview["occurrence"] = revenue.get("total_occurrence", 0.0)
overview["discount"] = revenue.get("discount_total", 0.0)
overview["confirmed_revenue"] = revenue.get("confirmed_total", 0.0)
# discount_rate 重算
occ = overview["occurrence"]
overview["discount_rate"] = (overview["discount"] / occ) if occ > 0 else 0.0
# CHANGE 2026-03-29 | area≠all 时隐藏实收流水(现金流 4 项无法按区域拆分)
overview["cash_in"] = None
overview["cash_out"] = None
overview["cash_balance"] = None
overview["balance_rate"] = None
# 移除现金流环比字段(如有)
for f in ("cash_in", "cash_out", "cash_balance", "balance_rate"):
overview.pop(f"{f}_compare", None)
overview.pop(f"{f}_down", None)
overview.pop(f"{f}_flat", None)
# CHANGE 2026-03-29 | area≠all 时隐藏现金流入和现金流出板块
cashflow = None
expense = None
if area == "all":
cashflow = _build_cashflow(conn, site_id, start_str, end_str,
prev_start_str, prev_end_str, compare)
expense = _build_expense(conn, site_id, start_str, end_str,
prev_start_str, prev_end_str, compare)
coach_analysis = _build_coach_analysis(conn, site_id, start_str, end_str,
prev_start_str, prev_end_str, compare)
prev_start_str, prev_end_str, compare, area)
return {
"overview": overview,
@@ -470,10 +758,15 @@ async def get_finance_board(
def _build_overview(
conn: Any, site_id: int, start: str, end: str,
prev_start: str | None, prev_end: str | None, compare: int,
area: str = "all",
) -> dict:
"""经营一览板块。"""
"""经营一览板块。
CHANGE 2026-04-01 | board-finance-dws-area-refactor 9.1 |
改为从 dws_finance_area_daily 按 area_code 查询(通过 get_finance_overview_area
"""
try:
data = fdw_queries.get_finance_overview(conn, site_id, start, end)
data = fdw_queries.get_finance_overview_area(conn, site_id, start, end, area)
except Exception:
logger.warning("overview 查询失败,降级为空", exc_info=True)
return _empty_overview()
@@ -482,7 +775,7 @@ def _build_overview(
if compare == 1 and prev_start and prev_end:
try:
prev = fdw_queries.get_finance_overview(conn, site_id, prev_start, prev_end)
prev = fdw_queries.get_finance_overview_area(conn, site_id, prev_start, prev_end, area)
_attach_compare(result, data, prev, [
"occurrence", "discount", "discount_rate", "confirmed_revenue",
"cash_in", "cash_out", "cash_balance", "balance_rate",
@@ -509,7 +802,7 @@ def _build_recharge(
prev = fdw_queries.get_finance_recharge(conn, site_id, prev_start, prev_end)
_attach_compare(data, data, prev, [
"actual_income", "first_charge", "renew_charge",
"consumed", "card_balance",
"consumed", "card_balance", "all_card_balance",
])
# 赠送卡矩阵环比
for i, row in enumerate(data.get("gift_rows", [])):
@@ -535,14 +828,192 @@ def _build_recharge(
def _build_revenue(
conn: Any, site_id: int, start: str, end: str, area: str,
prev_start: str | None = None, prev_end: str | None = None, compare: int = 0,
) -> dict:
"""应计收入板块。"""
"""应计收入板块。
CHANGE 2026-04-01 | board-finance-dws-area-refactor 9.1 |
改为从 dws_finance_area_daily 按 area_code 查询(通过 get_finance_revenue_area
然后在 Python 层构建 structure_rows / discount_items / channel_items 保持返回结构不变。
"""
try:
return fdw_queries.get_finance_revenue(conn, site_id, start, end, area)
if area == "all":
# CHANGE 2026-03-29 | area=all 走旧版查询,保留收入结构的区域子行拆分
data = fdw_queries.get_finance_revenue(conn, site_id, start, end, area)
else:
raw = fdw_queries.get_finance_revenue_area(conn, site_id, start, end, area)
data = _format_revenue_from_area(raw, conn, site_id, start, end, area)
except Exception:
logger.warning("revenue 查询失败,降级为空", exc_info=True)
return _empty_revenue()
if compare == 1 and prev_start and prev_end:
try:
if area == "all":
prev = fdw_queries.get_finance_revenue(conn, site_id, prev_start, prev_end, area)
else:
prev_raw = fdw_queries.get_finance_revenue_area(conn, site_id, prev_start, prev_end, area)
prev = _format_revenue_from_area(prev_raw, conn, site_id, prev_start, prev_end, area)
# 总计环比
_attach_compare(data, data, prev, [
"total_occurrence", "discount_total", "confirmed_total",
])
# structure_rows 行级环比(按 id 匹配)
prev_struct = {r["id"]: r for r in prev.get("structure_rows", [])}
for row in data.get("structure_rows", []):
prev_row = prev_struct.get(row["id"], {})
cmp = calc_compare(
Decimal(str(row.get("booked", 0))),
Decimal(str(prev_row.get("booked", 0))),
)
row["booked_compare"] = cmp["compare"]
# price_items 行级环比(按 label 匹配)
prev_prices = {r["label"]: r for r in prev.get("price_items", [])}
for item in data.get("price_items", []):
prev_item = prev_prices.get(item["label"], {})
cmp = calc_compare(
Decimal(str(item.get("amount", 0))),
Decimal(str(prev_item.get("amount", 0))),
)
item["compare"] = cmp["compare"]
# discount_items 行级环比(按 label 匹配)
prev_discounts = {r["label"]: r for r in prev.get("discount_items", [])}
for item in data.get("discount_items", []):
prev_item = prev_discounts.get(item["label"], {})
cmp = calc_compare(
Decimal(str(item.get("amount", 0))),
Decimal(str(prev_item.get("amount", 0))),
)
item["compare"] = cmp["compare"]
# channel_items 行级环比(按 label 匹配)
prev_channels = {r["label"]: r for r in prev.get("channel_items", [])}
for item in data.get("channel_items", []):
prev_item = prev_channels.get(item["label"], {})
cmp = calc_compare(
Decimal(str(item.get("amount", 0))),
Decimal(str(prev_item.get("amount", 0))),
)
item["compare"] = cmp["compare"]
except Exception:
logger.warning("revenue 环比查询失败", exc_info=True)
return data
def _format_revenue_from_area(
raw: dict, conn: Any, site_id: int, start: str, end: str, area: str,
) -> dict:
"""将 get_finance_revenue_area 的原始聚合数据格式化为前端期望的 revenue 结构。
CHANGE 2026-04-01 | board-finance-dws-area-refactor 9.1 |
从 dws_finance_area_daily 聚合数据构建 structure_rows / discount_items / channel_items
保持与旧 get_finance_revenue 返回结构完全一致。
"""
total_table_charge = raw.get("table_fee_amount", 0.0)
total_goods = raw.get("goods_amount", 0.0)
total_pd = raw.get("assistant_pd_amount", 0.0)
total_cx = raw.get("assistant_cx_amount", 0.0)
total_income = raw.get("total_occurrence", 0.0)
# 构建 structure_rows简化版不再按物理区域拆分子行因为 area_daily 已按 area_code 聚合)
structure_rows = [
{"id": "table_charge", "name": "开台与包厢", "desc": None,
"is_sub": False, "amount": total_table_charge,
"discount": 0.0, "booked": total_table_charge},
{"id": "assistant_pd", "name": "助教 基础课", "desc": None,
"is_sub": False, "amount": total_pd,
"discount": 0.0, "booked": total_pd},
{"id": "assistant_cx", "name": "助教 激励课", "desc": None,
"is_sub": False, "amount": total_cx,
"discount": 0.0, "booked": total_cx},
{"id": "goods", "name": "食品酒水", "desc": None,
"is_sub": False, "amount": total_goods,
"discount": 0.0, "booked": total_goods},
]
# 发生额构成
price_items = [
{"label": "开台消费", "amount": total_table_charge},
{"label": "酒水商品", "amount": total_goods},
{"label": "助教服务", "amount": total_pd + total_cx},
]
# 优惠拆分5 项,与旧逻辑一致)
groupbuy_d = raw.get("discount_groupbuy", 0.0)
vip_d = raw.get("discount_vip", 0.0)
manual_d = raw.get("discount_manual", 0.0)
gift_card_d = raw.get("discount_gift_card", 0.0)
# 其他 = discount_rounding + discount_other
rounding_d = raw.get("discount_rounding", 0.0)
other_d = raw.get("discount_other", 0.0)
discount_items = [
{"label": "团购优惠", "amount": groupbuy_d},
{"label": "会员折扣", "amount": vip_d},
{"label": "手动调整", "amount": manual_d + other_d},
{"label": "赠送卡抵扣", "desc": "台桌卡+酒水卡+抵用券", "amount": gift_card_d},
{"label": "其他优惠", "desc": "免单+抹零", "amount": rounding_d},
]
total_discount = raw.get("discount_total", 0.0)
# 回填收入结构表的优惠分摊
if total_table_charge > 0 and total_discount > 0:
for row in structure_rows:
if row["id"] == "table_charge":
row["discount"] = total_discount
row["booked"] = total_table_charge - total_discount
# 渠道分布(从 dws_finance_area_daily 的 all 行获取,因为渠道数据仅 all 有值)
# 需要额外查询 all 行的渠道数据
try:
channel_data = _get_channel_items(conn, site_id, start, end)
except Exception:
logger.warning("revenue 渠道数据查询失败,降级为空", exc_info=True)
channel_data = [
{"label": "储值卡结算冲销", "amount": 0.0},
{"label": "现金/线上支付", "amount": 0.0},
{"label": "团购核销确认收入", "desc": "团购成交价", "amount": 0.0},
]
confirmed_total = total_income - abs(total_discount)
return {
"structure_rows": structure_rows,
"price_items": price_items,
"total_occurrence": total_income,
"discount_items": discount_items,
"discount_total": total_discount,
"confirmed_total": confirmed_total,
"channel_items": channel_data,
}
def _get_channel_items(conn: Any, site_id: int, start: str, end: str) -> list[dict]:
"""从 v_dws_finance_daily_summary 获取渠道分布数据(全局数据,不按区域拆分)。"""
from app.services.fdw_queries import _fdw_context
with _fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT COALESCE(SUM(cash_pay_amount), 0) AS cash_pay,
COALESCE(SUM(groupbuy_pay_amount), 0) AS groupbuy_pay,
COALESCE(SUM(cash_card_consume), 0) AS cash_card,
COALESCE(SUM(gift_card_consume), 0) AS gift_card
FROM app.v_dws_finance_daily_summary
WHERE stat_date >= %s::date AND stat_date <= %s::date
""",
(start, end),
)
ch = cur.fetchone()
cash_pay = float(ch[0]) if ch and ch[0] is not None else 0.0
groupbuy_pay = float(ch[1]) if ch and ch[1] is not None else 0.0
cash_card = float(ch[2]) if ch and ch[2] is not None else 0.0
gift_card_consume = float(ch[3]) if ch and ch[3] is not None else 0.0
return [
{"label": "储值卡结算冲销", "amount": cash_card + gift_card_consume},
{"label": "现金/线上支付", "amount": cash_pay},
{"label": "团购核销确认收入", "desc": "团购成交价", "amount": groupbuy_pay},
]
def _build_cashflow(
conn: Any, site_id: int, start: str, end: str,
@@ -555,6 +1026,37 @@ def _build_cashflow(
logger.warning("cashflow 查询失败,降级为空", exc_info=True)
return {"consume_items": [], "recharge_items": [], "total": 0.0}
if compare == 1 and prev_start and prev_end:
try:
prev = fdw_queries.get_finance_cashflow(conn, site_id, prev_start, prev_end)
total_cmp = calc_compare(
Decimal(str(data["total"])), Decimal(str(prev["total"]))
)
data["total_compare"] = total_cmp["compare"]
data["total_down"] = total_cmp["is_down"]
data["total_flat"] = total_cmp["is_flat"]
# consume_items 行级环比(按 label 匹配)
prev_consumes = {r["label"]: r for r in prev.get("consume_items", [])}
for item in data.get("consume_items", []):
prev_item = prev_consumes.get(item["label"], {})
cmp = calc_compare(
Decimal(str(item.get("amount", 0))),
Decimal(str(prev_item.get("amount", 0))),
)
item["compare"] = cmp["compare"]
item["down"] = cmp["is_down"]
# recharge_items 行级环比(按 label 匹配)
prev_recharges = {r["label"]: r for r in prev.get("recharge_items", [])}
for item in data.get("recharge_items", []):
prev_item = prev_recharges.get(item["label"], {})
cmp = calc_compare(
Decimal(str(item.get("amount", 0))),
Decimal(str(prev_item.get("amount", 0))),
)
item["compare"] = cmp["compare"]
except Exception:
logger.warning("cashflow 环比查询失败", exc_info=True)
return data
@@ -590,10 +1092,18 @@ def _build_expense(
def _build_coach_analysis(
conn: Any, site_id: int, start: str, end: str,
prev_start: str | None, prev_end: str | None, compare: int,
area: str = "all",
) -> dict:
"""助教分析板块。"""
"""助教分析板块。
CHANGE 2026-03-29 | Prompt: 助教分析按区域细化 |
area=all 走现有 salary_calc 查询area≠all 走 coach_area_hours JOIN salary_calc。
"""
try:
data = fdw_queries.get_finance_coach_analysis(conn, site_id, start, end)
if area == "all":
data = fdw_queries.get_finance_coach_analysis(conn, site_id, start, end)
else:
data = fdw_queries.get_finance_coach_analysis_area(conn, site_id, start, end, area)
except Exception:
logger.warning("coachAnalysis 查询失败,降级为空", exc_info=True)
empty_table = {"total_pay": 0.0, "total_share": 0.0, "avg_hourly": 0.0, "rows": []}
@@ -601,15 +1111,33 @@ def _build_coach_analysis(
if compare == 1 and prev_start and prev_end:
try:
prev = fdw_queries.get_finance_coach_analysis(
conn, site_id, prev_start, prev_end
)
if area == "all":
prev = fdw_queries.get_finance_coach_analysis(
conn, site_id, prev_start, prev_end
)
else:
prev = fdw_queries.get_finance_coach_analysis_area(
conn, site_id, prev_start, prev_end, area
)
for key in ("basic", "incentive"):
cur_t = data[key]
prev_t = prev[key]
_attach_compare(cur_t, cur_t, prev_t, [
"total_pay", "total_share", "avg_hourly",
])
# 行级环比(按 level 匹配)
prev_rows = {r["level"]: r for r in prev_t.get("rows", [])}
for row in cur_t.get("rows", []):
prev_row = prev_rows.get(row["level"], {})
pay_cmp = calc_compare(Decimal(str(row.get("pay", 0))), Decimal(str(prev_row.get("pay", 0))))
row["pay_compare"] = pay_cmp["compare"]
row["pay_down"] = pay_cmp["is_down"]
share_cmp = calc_compare(Decimal(str(row.get("share", 0))), Decimal(str(prev_row.get("share", 0))))
row["share_compare"] = share_cmp["compare"]
row["share_down"] = share_cmp["is_down"]
hourly_cmp = calc_compare(Decimal(str(row.get("hourly", 0))), Decimal(str(prev_row.get("hourly", 0))))
row["hourly_compare"] = hourly_cmp["compare"]
row["hourly_flat"] = hourly_cmp["is_flat"]
except Exception:
logger.warning("coachAnalysis 环比查询失败", exc_info=True)
@@ -658,6 +1186,7 @@ def _empty_revenue() -> dict:
"price_items": [],
"total_occurrence": 0.0,
"discount_items": [],
"discount_total": 0.0,
"confirmed_total": 0.0,
"channel_items": [],
}

View File

@@ -27,9 +27,11 @@ from typing import Any
from fastapi import HTTPException, status
from app.ai.bailian_client import BailianClient
from app.ai.config import AIConfig
from app.ai.dashscope_client import DashScopeClient
from app.database import get_connection
from app.services import fdw_queries
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -46,6 +48,7 @@ class ChatService:
# CHAT-1: 对话历史列表
# ------------------------------------------------------------------
@trace_service("查询对话历史", "Get chat history")
def get_chat_history(
self,
user_id: int,
@@ -149,6 +152,7 @@ class ChatService:
# 对话复用 / 创建
# ------------------------------------------------------------------
@trace_service("查找或创建对话", "Get or create session")
def get_or_create_session(
self,
user_id: int,
@@ -213,7 +217,7 @@ class ChatService:
context_type: str,
context_id: str | None,
) -> int:
"""创建新对话记录,返回 conversation_id。"""
"""创建新对话记录,返回 conversation_id。同时生成 session_id。"""
conn = get_connection()
try:
with conn.cursor() as cur:
@@ -230,11 +234,23 @@ class ChatService:
INSERT INTO biz.ai_conversations
(user_id, nickname, app_id, site_id, context_type, context_id)
VALUES (%s, %s, %s, %s, %s, %s)
RETURNING id
RETURNING id, EXTRACT(EPOCH FROM created_at)::bigint
""",
(str(user_id), nickname, APP_ID, site_id, context_type, context_id),
)
new_id = cur.fetchone()[0]
result = cur.fetchone()
new_id = result[0]
created_ts = result[1]
# 生成 session_id 并回写格式conv_{id}_{timestamp}
session_id = f"conv_{new_id}_{created_ts}"
cur.execute(
"""
UPDATE biz.ai_conversations SET session_id = %s WHERE id = %s
""",
(session_id, new_id),
)
conn.commit()
return new_id
except Exception:
@@ -243,10 +259,26 @@ class ChatService:
finally:
conn.close()
@trace_service("获取对话 session_id", "Get session ID")
def get_session_id(self, chat_id: int) -> str | None:
"""获取对话的 session_id。无记录或字段为空时返回 None。"""
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"SELECT session_id FROM biz.ai_conversations WHERE id = %s",
(chat_id,),
)
row = cur.fetchone()
return row[0] if row and row[0] else None
finally:
conn.close()
# ------------------------------------------------------------------
# CHAT-2: 消息列表
# ------------------------------------------------------------------
@trace_service("查询消息列表", "Get messages")
def get_messages(
self,
chat_id: int,
@@ -312,6 +344,7 @@ class ChatService:
# CHAT-3: 发送消息(同步回复)
# ------------------------------------------------------------------
@trace_service("发送消息并获取回复", "Send message sync")
async def send_message_sync(
self,
chat_id: int,
@@ -368,6 +401,7 @@ class ChatService:
# referenceCard 组装
# ------------------------------------------------------------------
@trace_service("构建引用卡片", "Build reference card")
def build_reference_card(
self,
customer_id: int,
@@ -438,6 +472,7 @@ class ChatService:
# 标题生成
# ------------------------------------------------------------------
@trace_service("生成对话标题", "Generate title")
def generate_title(
self,
title: str | None = None,
@@ -582,11 +617,13 @@ class ChatService:
user_id: int,
site_id: int,
) -> tuple[str, int | None]:
"""调用百炼 API 获取非流式回复,返回 (reply_text, tokens_used)。
"""调用 DashScope Application API 获取非流式回复,返回 (reply_text, tokens_used)。
构建历史消息上下文发送给 AI
通过 Application.call() 调用 App1通用对话prompt 为最近历史拼接
"""
bailian = _get_bailian_client()
# CHANGE 2026-03-22 | BailianClient → DashScopeClientP14 迁移收尾)
client = _get_dashscope_client()
ai_config = AIConfig.from_env()
# 获取历史消息作为上下文(最近 20 条)
conn = get_connection()
@@ -604,33 +641,21 @@ class ChatService:
finally:
conn.close()
# 构建消息列表
messages: list[dict] = []
# 取最近 20 条(含刚写入的 user 消息)
# 拼接历史消息为 prompt 文本
recent = history[-20:] if len(history) > 20 else history
prompt_parts: list[str] = []
for role, msg_content in recent:
messages.append({"role": role, "content": msg_content})
prompt_parts.append(f"[{role}]: {msg_content}")
prompt = "\n".join(prompt_parts)
# 如果没有 system 消息,添加默认 system prompt
if not messages or messages[0]["role"] != "system":
system_prompt = {
"role": "system",
"content": json.dumps(
{"task": "你是台球门店的 AI 助手,根据用户的问题和当前页面上下文提供帮助。"},
ensure_ascii=False,
),
}
messages.insert(0, system_prompt)
# 通过 Application API 调用 App1
result, tokens_used, _session_id = await client.call_app(
ai_config.app_id_1_chat, prompt,
)
# 非流式调用chat_stream 用于 SSE这里用 chat_stream 收集完整回复
full_parts: list[str] = []
async for chunk in bailian.chat_stream(messages):
full_parts.append(chunk)
reply = "".join(full_parts)
# 流式模式不返回 tokens_used按字符数估算
estimated_tokens = len(reply)
return reply, estimated_tokens
# 从返回结果提取文本回复
reply = result.get("text", "") if isinstance(result, dict) else str(result)
return reply, tokens_used
@staticmethod
def _get_consumption_30d(conn: Any, site_id: int, member_id: int) -> Decimal | None:
@@ -673,13 +698,8 @@ class ChatService:
# ── 模块级辅助函数 ──────────────────────────────────────────────
def _get_bailian_client() -> BailianClient:
"""从环境变量构建 BailianClient缺失时报错。"""
api_key = os.environ.get("BAILIAN_API_KEY")
base_url = os.environ.get("BAILIAN_BASE_URL")
model = os.environ.get("BAILIAN_MODEL")
if not api_key or not base_url or not model:
raise RuntimeError(
"百炼 API 环境变量缺失,需要 BAILIAN_API_KEY、BAILIAN_BASE_URL、BAILIAN_MODEL"
)
return BailianClient(api_key=api_key, base_url=base_url, model=model)
def _get_dashscope_client() -> DashScopeClient:
"""从环境变量构建 DashScopeClient缺失时报错。"""
# CHANGE 2026-03-22 | BailianClient → DashScopeClientP14 迁移收尾)
ai_config = AIConfig.from_env()
return DashScopeClient(api_key=ai_config.api_key, workspace_id=ai_config.workspace_id)

View File

@@ -25,6 +25,7 @@ from decimal import Decimal
from app.services import fdw_queries
from app.services.task_generator import compute_heart_icon
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -39,9 +40,10 @@ LEVEL_COLOR_MAP = {
}
TASK_TYPE_MAP = {
"follow_up_visit": {"label": "回访", "class": "tag-callback"},
"high_priority_recall": {"label": "紧急召回", "class": "tag-recall"},
"priority_recall": {"label": "优先召回", "class": "tag-recall"},
"follow_up_visit": {"label": "客户回访", "class": "callback"},
"high_priority_recall": {"label": "高优先召回", "class": "high-priority"},
"priority_recall": {"label": "优先召回", "class": "priority"},
"relationship_building": {"label": "关系构建", "class": "relationship"},
}
# 头像渐变色池(循环使用)
@@ -85,6 +87,7 @@ def _format_currency(amount: float) -> str:
# ── 6.1 核心函数 ──────────────────────────────────────────
@trace_service("获取助教详情", "Get coach detail")
async def get_coach_detail(coach_id: int, site_id: int) -> dict:
"""
助教详情COACH-1
@@ -150,7 +153,13 @@ async def get_coach_detail(coach_id: int, site_id: int) -> dict:
performance = {
"monthly_hours": salary_this.get("total_hours", 0.0),
"monthly_salary": salary_this.get("total_income", 0.0),
# CHANGE 2026-03-26 | 到手 = base_income + bonus_income + bonus_money + room_incomeDWS 层已扣抽成)
"monthly_salary": (
salary_this.get("assistant_pd_money_total", 0.0)
+ salary_this.get("assistant_cx_money_total", 0.0)
+ salary_this.get("bonus_money", 0.0)
+ salary_this.get("room_income", 0.0)
),
"customer_balance": customer_balance,
"tasks_completed": tasks_completed,
"perf_current": salary_this.get("total_hours", 0.0),
@@ -287,22 +296,22 @@ def _build_income(
{
"label": "基础课时费",
"amount": f"¥{salary.get('assistant_pd_money_total', 0.0):,.0f}",
"color": "#42A5F5",
"color": "primary",
},
{
"label": "激励课时费",
"amount": f"¥{salary.get('assistant_cx_money_total', 0.0):,.0f}",
"color": "#FFA726",
"color": "success",
},
{
"label": "充值提成",
"amount": f"¥{salary.get('bonus_money', 0.0):,.0f}",
"color": "#66BB6A",
"color": "warning",
},
{
"label": "酒水提成",
"amount": f"¥{salary.get('room_income', 0.0):,.0f}",
"color": "#AB47BC",
"color": "purple",
},
]
@@ -385,17 +394,18 @@ def _build_top_customers(
balance = cust.get("customer_balance", 0.0)
consume = cust.get("total_consume", 0.0)
# CHANGE 2026-03-29 | coach-detail-500 修复 | relation_score → score对齐 TopCustomer.score Schema
result.append({
"id": mid or 0,
"name": name,
"initial": _get_initial(name),
"avatar_gradient": _get_avatar_gradient(i),
"heart_emoji": heart_emoji,
"relation_score": f"{score:.2f}",
"score": f"{score:.2f}",
"score_color": score_color,
"service_count": cust.get("service_count", 0),
"balance": _format_currency(balance),
"consume": _format_currency(consume),
"balance": float(balance) if balance else 0.0,
"consume": float(consume) if consume else 0.0,
})
return result
@@ -440,9 +450,9 @@ def _build_service_records(
"avatar_gradient": _get_avatar_gradient(i),
"type": course_type or "课程",
"type_class": type_class,
"table": str(rec.get("table_id")) if rec.get("table_id") else None,
"table": rec.get("table_name") or None,
"duration": f"{hours:.1f}h",
"income": _format_currency(income),
"income": float(income),
"date": date_str,
"perf_hours": None,
})
@@ -594,11 +604,12 @@ def _build_notes(coach_id: int, site_id: int, conn) -> list[dict]:
result = []
for r in rows:
# CHANGE 2026-03-29 | coach-detail-500 修复 | ai_score → score对齐 CoachNoteItem.score Schema
result.append({
"id": r[0],
"content": r[1] or "",
"timestamp": r[2].isoformat() if r[2] else "",
"ai_score": r[3],
"score": r[3],
"customer_name": member_name_map.get(r[5], ""),
"tag_label": r[4] or "",
"created_at": r[2].isoformat() if r[2] else "",
@@ -698,9 +709,9 @@ def _build_history_months(
result.append({
"month": month_label,
"estimated": month_str == current_month_str,
"customers": f"{customers}",
"hours": f"{hours:.1f}h",
"salary": _format_currency(salary_amount),
"customers": customers,
"hours": float(hours),
"salary": float(salary_amount),
"callback_done": callback_done,
"recall_done": recall_done,
})

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -19,10 +19,12 @@ from __future__ import annotations
import logging
from app.services.fdw_queries import _fdw_context
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@trace_service(description_zh="查找匹配候选人", description_en="Find matching candidates")
async def find_candidates(
site_id: int | None,
phone: str,

View File

@@ -17,6 +17,8 @@
import json
import logging
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -57,6 +59,7 @@ def _insert_history(
)
@trace_service(description_zh="ai_analyze_note", description_en="Ai Analyze Note")
def ai_analyze_note(note_id: int) -> int | None:
"""
AI 应用 6 备注分析接口(占位)。
@@ -67,6 +70,7 @@ def ai_analyze_note(note_id: int) -> int | None:
return None
@trace_service(description_zh="执行笔记重分类", description_en="Run note reclassification")
def run(payload: dict | None = None, job_id: int | None = None) -> dict:
"""
备注回溯主流程。

View File

@@ -10,6 +10,7 @@ import json
import logging
from fastapi import HTTPException
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -52,16 +53,67 @@ def _record_history(
def ai_analyze_note(note_id: int) -> int | None:
@trace_service(description_zh="ai_analyze_note", description_en="Ai Analyze Note")
async def ai_analyze_note(note_id: int, site_id: int, member_id: int, content: str, user_name: str = "") -> int | None:
"""
AI 应用 6 备注分析接口(占位)
AI 应用 6 备注分析:调用百炼 Application API 获取评分
P5 AI 集成层实现后替换此占位函数。
当前返回 None 表示 AI 未就绪,跳过评分逻辑
CHANGE 2026-03-27 | 打通 AI 应用 6 调用链
仅执行 App6 评分,不触发 App8 线索整合(后续统一处理)
返回 score1-10失败返回 None。
"""
return None
try:
from app.ai.config import AIConfig
from app.ai.dashscope_client import DashScopeClient
import json
config = AIConfig.from_env()
client = DashScopeClient(api_key=config.api_key, workspace_id=config.workspace_id)
# 构建 prompt简化版直接传给百炼应用
prompt = json.dumps({
"site_id": site_id,
"member_id": member_id,
"note_content": content,
"noted_by_name": user_name,
}, ensure_ascii=False)
result, tokens_used, _ = await client.call_app(config.app_id_6_note, prompt)
score = result.get("score") if isinstance(result, dict) else None
if score is not None:
score = max(1, min(10, int(score)))
logger.info("App6 备注评分完成: note_id=%d score=%d tokens=%d", note_id, score, tokens_used)
return score
except Exception:
logger.warning("App6 备注评分失败: note_id=%d", note_id, exc_info=True)
return None
async def _async_ai_score(note_id: int, site_id: int, member_id: int, content: str) -> None:
"""后台异步执行 AI 评分,不阻塞 API 响应。"""
try:
ai_score_val = await ai_analyze_note(
note_id=note_id, site_id=site_id, member_id=member_id, content=content,
)
if ai_score_val is not None:
conn = _get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"UPDATE biz.notes SET ai_score = %s, updated_at = NOW() WHERE id = %s",
(ai_score_val, note_id),
)
conn.commit()
logger.info("AI 评分已写入: note_id=%d ai_score=%d", note_id, ai_score_val)
finally:
conn.close()
except Exception:
logger.warning("后台 AI 评分失败: note_id=%d", note_id, exc_info=True)
@trace_service("创建备注", "Create note")
async def create_note(
site_id: int,
user_id: int,
@@ -71,6 +123,7 @@ async def create_note(
task_id: int | None = None,
rating_service_willingness: int | None = None,
rating_revisit_likelihood: int | None = None,
score: int | None = None,
) -> dict:
"""
创建备注。
@@ -91,6 +144,7 @@ async def create_note(
for label, val in [
("再次服务意愿评分", rating_service_willingness),
("再来店可能性评分", rating_revisit_likelihood),
("备注星星评分", score),
]:
if val is not None and (val < 1 or val > 5):
raise HTTPException(
@@ -139,17 +193,17 @@ async def create_note(
INSERT INTO biz.notes
(site_id, user_id, target_type, target_id, type,
content, rating_service_willingness,
rating_revisit_likelihood, task_id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
rating_revisit_likelihood, task_id, score)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING id, site_id, user_id, target_type, target_id,
type, content, rating_service_willingness,
rating_revisit_likelihood, task_id,
ai_score, ai_analysis, created_at, updated_at
ai_score, ai_analysis, created_at, updated_at, score
""",
(
site_id, user_id, target_type, target_id, note_type,
content, rating_service_willingness,
rating_revisit_likelihood, task_id,
rating_revisit_likelihood, task_id, score,
),
)
row = cur.fetchone()
@@ -169,26 +223,11 @@ async def create_note(
"ai_analysis": row[11],
"created_at": row[12].isoformat() if row[12] else None,
"updated_at": row[13].isoformat() if row[13] else None,
"score": row[14],
}
# 若 type='follow_up'触发 AI 分析并标记回访任务完成
# 若 type='follow_up',标记回访任务完成(不依赖 AI 评分)
if note_type == "follow_up" and task_id is not None:
# 保留 AI 占位调用P5 接入时调用链不变)
ai_score = ai_analyze_note(note["id"])
if ai_score is not None:
# 更新备注的 ai_score
cur.execute(
"""
UPDATE biz.notes
SET ai_score = %s, updated_at = NOW()
WHERE id = %s
""",
(ai_score, note["id"]),
)
note["ai_score"] = ai_score
# 不论 ai_score 如何有备注即标记回访任务完成T4
if task_info and task_info["status"] == "active":
cur.execute(
"""
@@ -209,13 +248,17 @@ async def create_note(
new_status="completed",
old_task_type=task_info["task_type"],
new_task_type=task_info["task_type"],
detail={
"note_id": note["id"],
"ai_score": ai_score,
},
detail={"note_id": note["id"]},
)
conn.commit()
# CHANGE 2026-03-27 | AI 评分:后台异步执行,不阻塞 API 响应
# 备注先返回给前端aiScore=nullAI 评分完成后写入数据库
# 前端下次加载页面时自动获取最新 aiScore
import asyncio
asyncio.create_task(_async_ai_score(note["id"], site_id, target_id, content))
return note
except HTTPException:
@@ -228,6 +271,7 @@ async def create_note(
conn.close()
@trace_service("查询备注列表", "Get notes")
async def get_notes(
site_id: int, target_type: str, target_id: int
) -> list[dict]:
@@ -280,6 +324,7 @@ async def get_notes(
conn.close()
@trace_service("删除备注", "Delete note")
async def delete_note(note_id: int, user_id: int, site_id: int) -> dict:
"""
删除备注。

View File

@@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
"""ETL 输出目录清理服务
遍历 EXPORT_ROOT 下每个任务文件夹,按目录名中的时间戳排序,
只保留最近 N 个运行记录,其余永久删除。
CHANGE 2026-03-27 | 新增:执行前自动清理输出目录,每类任务只保留最近 10 个运行记录
"""
from __future__ import annotations
import logging
import os
import re
import shutil
from pathlib import Path
logger = logging.getLogger(__name__)
# 运行记录目录命名格式:{TASK_CODE}-{run_id}-{YYYYMMDD}-{HHMMSS}
# 按最后两段(日期-时间)排序
_RUN_DIR_PATTERN = re.compile(r"^.+-(\d{8})-(\d{6})$")
def _get_export_root() -> Path:
"""从环境变量读取 EXPORT_ROOT缺失时报错。"""
val = os.environ.get("EXPORT_ROOT")
if not val:
raise RuntimeError(
"环境变量 EXPORT_ROOT 未设置,无法执行输出目录清理。"
"请在 .env 中配置 EXPORT_ROOT。"
)
p = Path(val)
if not p.is_dir():
raise RuntimeError(f"EXPORT_ROOT 路径不存在或不是目录: {p}")
return p
def _sort_key(dirname: str) -> tuple[str, str]:
"""从目录名提取排序键(日期, 时间),越大越新。"""
m = _RUN_DIR_PATTERN.match(dirname)
if m:
return (m.group(1), m.group(2))
# 不匹配格式的目录排到最前面(最旧),优先被清理
return ("00000000", "000000")
def cleanup_output_dirs(keep: int = 10) -> dict:
"""清理 EXPORT_ROOT 下每个任务文件夹,只保留最近 keep 个运行记录。
Returns:
清理结果摘要 dict包含 task_folders_scanned / dirs_deleted / errors
"""
export_root = _get_export_root()
total_scanned = 0
total_deleted = 0
errors: list[str] = []
for task_dir in sorted(export_root.iterdir()):
if not task_dir.is_dir():
continue
total_scanned += 1
# 列出所有子目录(运行记录)
run_dirs = [d for d in task_dir.iterdir() if d.is_dir()]
if len(run_dirs) <= keep:
continue
# 按时间戳降序排列,保留前 keep 个
run_dirs.sort(key=lambda d: _sort_key(d.name), reverse=True)
to_delete = run_dirs[keep:]
for d in to_delete:
try:
shutil.rmtree(d)
total_deleted += 1
except Exception as exc:
msg = f"删除失败 {d}: {exc}"
logger.warning(msg)
errors.append(msg)
logger.info(
"输出目录清理完成: 扫描 %d 个任务文件夹, 删除 %d 个运行记录, %d 个错误",
total_scanned, total_deleted, len(errors),
)
return {
"task_folders_scanned": total_scanned,
"dirs_deleted": total_deleted,
"errors": errors,
}

View File

@@ -17,11 +17,8 @@ from decimal import Decimal
from fastapi import HTTPException
from app.services import fdw_queries
from app.services.task_manager import (
_get_assistant_id,
compute_income_trend,
map_course_type_class,
)
from app.services.task_manager import _get_assistant_id, compute_income_trend
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -37,22 +34,8 @@ def _get_connection():
# 纯函数:可被属性测试直接调用
# ---------------------------------------------------------------------------
# 头像颜色预定义集合
_AVATAR_COLORS = [
"#0052d9", "#e34d59", "#00a870", "#ed7b2f",
"#0594fa", "#a25eb5", "#f6c244", "#2ba471",
]
def avatar_char_color(name: str) -> tuple[str, str]:
"""从客户姓名计算 avatarChar 和 avatarColor。"""
if not name:
return ("?", _AVATAR_COLORS[0])
char = name[0]
color = _AVATAR_COLORS[ord(char) % len(_AVATAR_COLORS)]
return (char, color)
@trace_service(description_zh="format_income_desc", description_en="Format Income Desc")
def format_income_desc(rate: float, hours: float) -> str:
"""
格式化收入明细描述。
@@ -65,15 +48,17 @@ def format_income_desc(rate: float, hours: float) -> str:
return f"{rate_str}元/h × {hours_str}h"
@trace_service(description_zh="group_records_by_date", description_en="Group Records By Date")
def group_records_by_date(
records: list[dict], *, include_avatar: bool = False
records: list[dict], *, include_avatar: bool = False,
rs_map: dict[int, float] | None = None,
) -> list[dict]:
"""
将服务记录按日期分组为 DateGroup 结构。
参数:
records: 服务记录列表(已按 settle_time DESC 排序)
include_avatar: 是否包含 avatarChar/avatarColorPERF-1 需要PERF-2 不需要
include_avatar: 是否包含 member_idPERF-1 需要前端计算头像颜色
返回按日期倒序排列的 DateGroup 列表。
"""
@@ -95,24 +80,33 @@ def group_records_by_date(
end_time = rec.get("end_time")
time_range = _format_time_range(start_time, end_time)
raw_course_type = rec.get("course_type", "")
type_class = map_course_type_class(raw_course_type)
# CHANGE 2026-03-24 | 课程类型直接用数据库原始值skill_name不做二次映射
raw_course_type = rec.get("course_type", "") or "基础课"
customer_name = rec.get("customer_name") or "未知客户"
record_item: dict = {
"customer_name": customer_name,
"time_range": time_range,
"hours": f"{rec.get('service_hours', 0.0):g}",
"course_type": raw_course_type or "基础课",
"course_type_class": type_class,
"hours": f"{rec.get('service_hours', 0.0):.1f}",
"course_type": raw_course_type,
"location": rec.get("table_name") or "",
"income": f"{rec.get('income', 0.0):.2f}",
}
# CHANGE 2026-03-24 | 头像颜色改为前端根据 member_id 计算,后端只传 member_id 和首字
if include_avatar:
char, color = avatar_char_color(customer_name)
record_item["avatar_char"] = char
record_item["avatar_color"] = color
mid = rec.get("member_id")
record_item["member_id"] = mid
# 散客/未知客户member_id 为空、0、负数→ "?"
if not mid or mid <= 0:
record_item["avatar_char"] = "?"
else:
record_item["avatar_char"] = customer_name[0] if customer_name else "?"
# CHANGE 2026-03-27 | 关系爱心标识:注入 heart_scoreRS 分数)
if rs_map and mid:
record_item["heart_score"] = rs_map.get(mid, 0.0)
else:
record_item["heart_score"] = 0.0
groups[date_key].append(record_item)
@@ -125,7 +119,7 @@ def group_records_by_date(
total_income = sum(float(r["income"]) for r in recs)
result.append({
"date": date_key,
"total_hours": f"{total_hours:g}",
"total_hours": f"{total_hours:.1f}",
"total_income": f"{total_income:.2f}",
"records": recs,
})
@@ -133,6 +127,7 @@ def group_records_by_date(
return result
@trace_service(description_zh="paginate_records", description_en="Paginate Records")
def paginate_records(
records: list[dict], page: int, page_size: int
) -> tuple[list[dict], bool]:
@@ -149,6 +144,7 @@ def paginate_records(
return page_records, has_more
@trace_service(description_zh="compute_summary", description_en="Compute Summary")
def compute_summary(records: list[dict]) -> dict:
"""
计算月度汇总。
@@ -204,6 +200,7 @@ def _format_date_label(dt) -> str:
# ---------------------------------------------------------------------------
@trace_service("获取绩效概览", "Get performance overview")
async def get_overview(
user_id: int, site_id: int, year: int, month: int
) -> dict:
@@ -244,11 +241,30 @@ async def get_overview(
)
# 按日期分组(含 avatar
date_groups = group_records_by_date(all_records, include_avatar=True)
# CHANGE 2026-03-27 | 批量查 RS 分数,注入到服务记录和客户列表
member_ids = list({r.get("member_id") for r in all_records if r.get("member_id")})
rs_map: dict[int, float] = {}
if member_ids:
try:
with fdw_queries._fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT member_id, COALESCE(rs_display, 0)
FROM app.v_dws_member_assistant_relation_index
WHERE assistant_id = %s AND member_id = ANY(%s)
""",
(assistant_id, member_ids),
)
for row in cur.fetchall():
rs_map[row[0]] = float(row[1])
except Exception:
logger.warning("查询 RS 分数失败", exc_info=True)
date_groups = group_records_by_date(all_records, include_avatar=True, rs_map=rs_map)
# ── 4. 新客/常客列表 ──
new_customers, regular_customers = _build_customer_lists(
conn, site_id, assistant_id, year, month, all_records
conn, site_id, assistant_id, year, month, all_records, rs_map=rs_map
)
# ── 5. 构建响应 ──
@@ -266,6 +282,7 @@ async def get_overview(
FROM auth.user_assistant_binding uab
JOIN auth.users u ON uab.user_id = u.id
WHERE uab.assistant_id = %s AND uab.site_id = %s
AND uab.is_removed = false
LIMIT 1
""",
(assistant_id, site_id),
@@ -279,27 +296,76 @@ async def get_overview(
logger.warning("查询助教信息失败", exc_info=True)
current_income = salary["total_income"] if salary else 0.0
basic_rate = salary["basic_rate"] if salary else 0.0
incentive_rate = salary["incentive_rate"] if salary else 0.0
# CHANGE 2026-03-24 | basic_rate/incentive_rate 改为助教到手单价(客户价 - 球房提成),
# 不再使用 base_course_price/bonus_course_price客户收费标准
base_course_price = salary["basic_rate"] if salary else 0.0 # 客户收费标准
bonus_course_price = salary["incentive_rate"] if salary else 0.0 # 客户收费标准
base_deduction = salary["base_deduction"] if salary else 0.0
bonus_deduction_ratio = salary["bonus_deduction_ratio"] if salary else 0.0
# 助教到手单价 = 客户价 - 球房提成
basic_rate = base_course_price - base_deduction
incentive_rate = bonus_course_price * (1 - bonus_deduction_ratio)
basic_hours = salary["basic_hours"] if salary else 0.0
bonus_hours = salary["bonus_hours"] if salary else 0.0
pd_money = salary["assistant_pd_money_total"] if salary else 0.0
cx_money = salary["assistant_cx_money_total"] if salary else 0.0
top_rank_bonus = salary["top_rank_bonus"] if salary else 0.0
recharge_commission = salary["recharge_commission"] if salary else 0.0
# 收入明细项
income_items = _build_income_items(
basic_rate, incentive_rate, basic_hours, bonus_hours,
pd_money, cx_money,
top_rank_bonus=top_rank_bonus,
recharge_commission=recharge_commission,
)
# 档位信息
next_basic_rate = salary["next_tier_basic_rate"] if salary else 0.0
next_incentive_rate = salary["next_tier_incentive_rate"] if salary else 0.0
upgrade_hours = salary["next_tier_hours"] if salary else 0.0
# CHANGE 2026-03-24 | 档位信息从 cfg_performance_tier 配置表计算,
# 复用 task_manager._build_performance_summary 的逻辑
total_hours = salary["total_hours"] if salary else 0.0
upgrade_hours_needed = max(0.0, upgrade_hours - total_hours)
tier_completed = salary["tier_completed"] if salary else False
upgrade_bonus = 0.0 if tier_completed else (salary["bonus_money"] if salary else 0.0)
tiers: list[dict] = []
try:
tiers = fdw_queries.get_performance_tiers(conn, site_id)
except Exception:
logger.warning("查询 cfg_performance_tier 失败", exc_info=True)
# 找到当前档位和下一档
tier_completed = False
next_tier_hours = 0.0
current_tier_data = None
next_tier_data = None
if tiers:
for i, t in enumerate(tiers):
if t["min_hours"] > total_hours:
next_tier_data = t
current_tier_data = tiers[i - 1] if i > 0 else tiers[0]
next_tier_hours = t["min_hours"]
break
if next_tier_data is None:
# 已达到或超过最高档
tier_completed = True
current_tier_data = tiers[-1]
upgrade_hours_needed = max(0.0, next_tier_hours - total_hours) if not tier_completed else 0.0
# 下一档到手费率
if next_tier_data:
next_basic_rate = base_course_price - next_tier_data["base_deduction"]
next_incentive_rate = bonus_course_price * (1 - next_tier_data["bonus_deduction_ratio"])
else:
next_basic_rate = 0.0
next_incentive_rate = 0.0
# bonus_money: 升到下一档后因抽成降低能多拿的钱
# 公式同 task_manager._build_performance_summary
upgrade_bonus = 0.0
if not tier_completed and current_tier_data and next_tier_data:
base_ded_diff = current_tier_data["base_deduction"] - next_tier_data["base_deduction"]
base_saving = next_tier_data["min_hours"] * base_ded_diff if base_ded_diff > 0 else 0.0
bonus_ratio_diff = current_tier_data["bonus_deduction_ratio"] - next_tier_data["bonus_deduction_ratio"]
bonus_saving = bonus_hours * bonus_course_price * bonus_ratio_diff if bonus_ratio_diff > 0 else 0.0
upgrade_bonus = round(base_saving + bonus_saving, 2)
return {
"coach_name": coach_name,
@@ -335,27 +401,49 @@ def _build_income_items(
bonus_hours: float,
pd_money: float,
cx_money: float,
*,
top_rank_bonus: float = 0.0,
recharge_commission: float = 0.0,
) -> list[dict]:
"""构建收入明细项列表。"""
"""
构建收入明细项列表。
CHANGE 2026-03-24 | 始终显示所有项基础课、激励课、Top3销冠奖、充值提成即使为 0
Top3销冠奖为 0 时 desc 显示"继续努力"
"""
items = []
# 基础课收入
if basic_hours > 0 or pd_money > 0:
items.append({
"icon": "💰",
"label": "基础课收入",
"desc": format_income_desc(basic_rate, basic_hours),
"value": f"¥{pd_money:,.2f}",
})
# 基础课收入(始终显示)
items.append({
"icon": "💰",
"label": "基础课收入",
"desc": format_income_desc(basic_rate, basic_hours),
"value": f"¥{pd_money:,.2f}",
})
# 激励课收入
if bonus_hours > 0 or cx_money > 0:
items.append({
"icon": "🎯",
"label": "激励课收入",
"desc": format_income_desc(incentive_rate, bonus_hours),
"value": f"¥{cx_money:,.2f}",
})
# 激励课收入(始终显示)
items.append({
"icon": "🎯",
"label": "激励课收入",
"desc": format_income_desc(incentive_rate, bonus_hours),
"value": f"¥{cx_money:,.2f}",
})
# Top3销冠奖始终显示为 0 时 desc 显示"继续努力"
items.append({
"icon": "🏆",
"label": "Top3销冠奖",
"desc": "继续努力" if top_rank_bonus == 0 else "本月销冠奖励",
"value": f"¥{top_rank_bonus:,.2f}",
})
# CHANGE 2026-03-24 | 充值提成(始终显示)
items.append({
"icon": "💳",
"label": "充值提成",
"desc": "充值激励",
"value": f"¥{recharge_commission:,.2f}",
})
return items
@@ -367,12 +455,17 @@ def _build_customer_lists(
year: int,
month: int,
all_records: list[dict],
*,
rs_map: dict[int, float] | None = None,
) -> tuple[list[dict], list[dict]]:
"""
构建新客和常客列表。
新客: 本月有服务记录但本月之前无记录的客户
常客: 本月服务次数 ≥ 2 的客户
常客: 本月服务次数 ≥ 2 的客户统计数据拉近90天
CHANGE 2026-03-24 | 头像颜色改为前端根据 member_id 计算,后端只传 member_id 和首字。
CHANGE 2026-03-24 | 常客展示数据改为近90天聚合判定标准不变本月≥2次
"""
if not all_records:
return [], []
@@ -395,16 +488,12 @@ def _build_customer_lists(
stats["count"] += 1
stats["total_hours"] += rec.get("service_hours", 0.0)
stats["total_income"] += rec.get("income", 0.0)
# 更新最后服务时间(记录已按 settle_time DESC 排序,第一条即最新)
if stats["last_service"] is None:
stats["last_service"] = rec.get("settle_time")
member_ids = list(member_stats.keys())
# 查询历史记录(本月之前是否有服务记录)
# ⚠️ 直连 ETL 库查询 app.v_dwd_assistant_service_log RLS 视图
# 列名映射: assistant_id → site_assistant_id, member_id → tenant_member_id,
# is_trash → is_delete (int, 0=正常), settle_time → create_time
# 查询历史记录(本月之前是否有服务记录)— 用于新客判定
historical_members: set[int] = set()
try:
start_date = f"{year}-{month:02d}-01"
@@ -425,33 +514,64 @@ def _build_customer_lists(
except Exception:
logger.warning("查询历史客户记录失败", exc_info=True)
# 查询近90天聚合数据常客展示用
if month == 12:
next_month_start = f"{year + 1}-01-01"
else:
next_month_start = f"{year}-{month + 1:02d}-01"
stats_90d: dict[int, dict] = {}
try:
rows_90d = fdw_queries.get_service_records_90days(
conn, site_id, assistant_id, next_month_start,
)
for r in rows_90d:
stats_90d[r["member_id"]] = r
except Exception:
logger.warning("查询近90天服务记录失败", exc_info=True)
new_customers = []
regular_customers = []
for mid, stats in member_stats.items():
# CHANGE 2026-03-27 | 过滤散客/未知客户member_id ≤ 0不进入新客和常客列表
if mid <= 0:
continue
name = stats["customer_name"]
char, color = avatar_char_color(name)
char = name[0] if name else "?"
# 新客:历史无记录
if mid not in historical_members:
last_service_dt = stats["last_service"]
new_customers.append({
"name": name,
"member_id": mid,
"avatar_char": char,
"avatar_color": color,
"last_service": _format_date_label(last_service_dt),
"count": stats["count"],
"heart_score": rs_map.get(mid, 0.0) if rs_map else 0.0,
})
# 常客:本月 ≥ 2 次
# 常客:本月 ≥ 2 次展示数据用90天聚合
if stats["count"] >= 2:
s90 = stats_90d.get(mid)
if s90:
reg_count = s90["count"]
reg_hours = round(s90["total_hours"], 2)
reg_income = s90["total_income"]
else:
# 90天查询失败时回退到本月数据
reg_count = stats["count"]
reg_hours = round(stats["total_hours"], 2)
reg_income = stats["total_income"]
regular_customers.append({
"name": name,
"member_id": mid,
"avatar_char": char,
"avatar_color": color,
"hours": round(stats["total_hours"], 2),
"income": f"¥{stats['total_income']:,.2f}",
"count": stats["count"],
"hours": reg_hours,
"income": f"¥{reg_income:,.2f}",
"count": reg_count,
"heart_score": rs_map.get(mid, 0.0) if rs_map else 0.0,
})
# 新客按最后服务时间倒序
@@ -470,6 +590,7 @@ def _build_customer_lists(
# ---------------------------------------------------------------------------
@trace_service("获取绩效明细", "Get performance records")
async def get_records(
user_id: int, site_id: int,
year: int, month: int, page: int, page_size: int,
@@ -506,8 +627,27 @@ async def get_records(
# 判断 hasMore
has_more = len(all_records) > page * page_size
# 按日期分组(不含 avatar
date_groups = group_records_by_date(page_records, include_avatar=False)
# CHANGE 2026-03-27 | 批量查 RS 分数,注入到服务记录
page_member_ids = list({r.get("member_id") for r in page_records if r.get("member_id")})
rs_map: dict[int, float] = {}
if page_member_ids:
try:
with fdw_queries._fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT member_id, COALESCE(rs_display, 0)
FROM app.v_dws_member_assistant_relation_index
WHERE assistant_id = %s AND member_id = ANY(%s)
""",
(assistant_id, page_member_ids),
)
for row in cur.fetchall():
rs_map[row[0]] = float(row[1])
except Exception:
logger.warning("PERF-2 查询 RS 分数失败", exc_info=True)
# 按日期分组(含 member_id / avatar_char前端计算头像颜色
date_groups = group_records_by_date(page_records, include_avatar=True, rs_map=rs_map)
return {
"summary": summary,

View File

@@ -16,6 +16,8 @@ ETL 数据更新后,直连 ETL 库读取助教服务记录,
import json
import logging
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -56,6 +58,7 @@ def _insert_history(
)
@trace_service(description_zh="执行维客检测", description_en="Run recall detection")
def run(payload: dict | None = None, job_id: int | None = None) -> dict:
"""
召回完成检测主流程。
@@ -178,6 +181,9 @@ def _process_site(conn, site_id: int, last_run_at) -> int:
# ── 4-7. 逐条服务记录匹配并处理 ──
for assistant_id, member_id, service_time in service_records:
# 散客过滤member_id ≤ 0 不参与任务系统)
if member_id is None or member_id <= 0:
continue
try:
count = _process_service_record(
conn, site_id, assistant_id, member_id, service_time
@@ -203,7 +209,13 @@ def _process_service_record(
service_time,
) -> int:
"""
处理单条服务记录:匹配 active 任务并标记 completed。
处理单条服务记录:匹配 active 任务并标记 completed + 生成回访任务
CHANGE 2026-03-30 | 回访任务直接在此生成(不再依赖 note_reclassifier 事件链)。
规则:
- 有 active 召回任务 → 标记 completed然后生成回访任务
- 有 active 回访任务 → 关闭旧回访,生成新回访(重置 48h 倒计时)
- 无任何 active 召回/回访 → 直接生成回访任务
每条服务记录独立事务,失败不影响其他。
返回本次完成的任务数。
@@ -213,7 +225,7 @@ def _process_service_record(
with conn.cursor() as cur:
cur.execute("BEGIN")
# 查找匹配的 active 召回类任务(仅完成召回任务,回访/关系构建不在此处理)
# ── 1. 查找匹配的 active 召回类任务 ──
cur.execute(
"""
SELECT id, task_type
@@ -226,14 +238,12 @@ def _process_service_record(
""",
(site_id, assistant_id, member_id),
)
active_tasks = cur.fetchall()
active_recall_tasks = cur.fetchall()
if not active_tasks:
conn.commit()
return 0
has_active_recall = len(active_recall_tasks) > 0
# 将所有匹配的 active 任务标记为 completed
for task_id, task_type in active_tasks:
# 将所有匹配的 active 召回任务标记为 completed
for task_id, task_type in active_recall_tasks:
cur.execute(
"""
UPDATE biz.coach_tasks
@@ -260,28 +270,82 @@ def _process_service_record(
)
completed += 1
conn.commit()
# ── 2. 生成回访任务CHANGE 2026-03-30 ──
# 如果还有 active 召回任务(其他助教的),不生成回访
# 注意:上面已经把当前助教的召回任务标记为 completed 了
# 这里检查的是当前助教-客户对是否还有未完成的召回任务(不应该有了)
# ── 7. 触发 recall_completed 事件 ──
# 延迟导入 fire_event 避免循环依赖
try:
from app.services.trigger_scheduler import fire_event
# 关闭已有的 active 回访任务
cur.execute(
"""
SELECT id FROM biz.coach_tasks
WHERE site_id = %s AND assistant_id = %s AND member_id = %s
AND task_type = 'follow_up_visit' AND status = 'active'
""",
(site_id, assistant_id, member_id),
)
old_follow_ups = cur.fetchall()
for (old_id,) in old_follow_ups:
cur.execute(
"""
UPDATE biz.coach_tasks
SET status = 'inactive', updated_at = NOW()
WHERE id = %s
""",
(old_id,),
)
_insert_history(
cur, old_id,
action="superseded_by_new_visit",
old_status="active", new_status="inactive",
old_task_type="follow_up_visit", new_task_type="follow_up_visit",
detail={"reason": "new_service_record", "service_time": str(service_time)},
)
fire_event(
"recall_completed",
{
"site_id": site_id,
"assistant_id": assistant_id,
"member_id": member_id,
# 创建新的回访任务48h 过期)
from datetime import timedelta
expires_at = service_time + timedelta(hours=48) if hasattr(service_time, '__add__') else None
cur.execute(
"""
INSERT INTO biz.coach_tasks
(site_id, assistant_id, member_id, task_type, status, expires_at, created_at, updated_at)
VALUES (%s, %s, %s, 'follow_up_visit', 'active', %s, NOW(), NOW())
RETURNING id
""",
(site_id, assistant_id, member_id, expires_at),
)
new_follow_up_id = cur.fetchone()[0]
_insert_history(
cur, new_follow_up_id,
action="created",
old_status=None, new_status="active",
new_task_type="follow_up_visit",
detail={
"reason": "service_record_detected",
"service_time": str(service_time),
"had_recall": has_active_recall,
},
)
except Exception:
logger.exception(
"触发 recall_completed 事件失败: site_id=%s, assistant_id=%s, member_id=%s",
site_id,
assistant_id,
member_id,
)
conn.commit()
# ── 3. 触发 recall_completed 事件(仅当有召回任务被完成时) ──
if has_active_recall:
try:
from app.services.trigger_scheduler import fire_event
fire_event(
"recall_completed",
{
"site_id": site_id,
"assistant_id": assistant_id,
"member_id": member_id,
"service_time": str(service_time),
},
)
except Exception:
logger.exception(
"触发 recall_completed 事件失败: site_id=%s, assistant_id=%s, member_id=%s",
site_id, assistant_id, member_id,
)
return completed

View File

@@ -15,10 +15,12 @@ from __future__ import annotations
import logging
from app.database import get_connection
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@trace_service(description_zh="获取用户权限列表", description_en="Get user permissions")
async def get_user_permissions(user_id: int, site_id: int) -> list[str]:
"""
获取用户在指定 site_id 下的权限 code 列表。
@@ -43,6 +45,7 @@ async def get_user_permissions(user_id: int, site_id: int) -> list[str]:
JOIN auth.role_permissions rp ON usr.role_id = rp.role_id
JOIN auth.permissions p ON rp.permission_id = p.id
WHERE usr.user_id = %s AND usr.site_id = %s
AND usr.is_removed = false
""",
(user_id, site_id),
)
@@ -53,11 +56,12 @@ async def get_user_permissions(user_id: int, site_id: int) -> list[str]:
return [row[0] for row in rows]
@trace_service(description_zh="获取用户门店列表", description_en="Get user sites")
async def get_user_sites(user_id: int) -> list[dict]:
"""
获取用户关联的所有店铺及对应角色。
查询 user_site_roles JOIN rolesLEFT JOIN site_code_mapping 获取店铺名称,
查询 user_site_roles JOIN rolesLEFT JOIN biz.sites 获取店铺名称,
按 site_id 分组聚合角色列表。
参数:
@@ -77,8 +81,9 @@ async def get_user_sites(user_id: int) -> list[dict]:
r.name
FROM auth.user_site_roles usr
JOIN auth.roles r ON usr.role_id = r.id
LEFT JOIN auth.site_code_mapping scm ON usr.site_id = scm.site_id
LEFT JOIN biz.sites scm ON scm.site_id = usr.site_id
WHERE usr.user_id = %s
AND usr.is_removed = false
ORDER BY usr.site_id, r.code
""",
(user_id,),
@@ -101,6 +106,7 @@ async def get_user_sites(user_id: int) -> list[dict]:
return list(sites_map.values())
@trace_service(description_zh="检查用户门店角色", description_en="Check user site role")
async def check_user_has_site_role(user_id: int, site_id: int) -> bool:
"""
检查用户在指定 site_id 下是否有任何角色绑定。
@@ -120,6 +126,7 @@ async def check_user_has_site_role(user_id: int, site_id: int) -> bool:
SELECT 1
FROM auth.user_site_roles
WHERE user_id = %s AND site_id = %s
AND is_removed = false
LIMIT 1
""",
(user_id, site_id),

View File

@@ -22,6 +22,8 @@ from ..schemas.schedules import ScheduleConfigSchema
from ..schemas.tasks import TaskConfigSchema
from .task_queue import task_queue
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
# 调度器轮询间隔(秒)
@@ -34,6 +36,23 @@ def _parse_time(time_str: str) -> tuple[int, int]:
return int(parts[0]), int(parts[1])
def _convert_interval_to_seconds(value: int, unit: str) -> int:
"""将间隔值转换为秒数。
Args:
value: 间隔数值0 = 无限制)
unit: 间隔单位,支持 "minutes""hours""days"
Returns:
对应的秒数value <= 0 时返回 0
"""
if value <= 0:
return 0
multipliers = {"minutes": 60, "hours": 3600, "days": 86400}
return value * multipliers.get(unit, 60)
@trace_service(description_zh="calculate_next_run", description_en="Calculate Next Run")
def calculate_next_run(
schedule_config: ScheduleConfigSchema,
now: datetime | None = None,
@@ -188,7 +207,9 @@ class Scheduler:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, site_id, task_config, schedule_config
SELECT id, site_id, task_config, schedule_config,
min_run_interval_value, min_run_interval_unit,
last_run_at, last_status, min_run_intervals
FROM scheduled_tasks
WHERE enabled = TRUE
AND next_run_at IS NOT NULL
@@ -198,11 +219,32 @@ class Scheduler:
)
rows = cur.fetchall()
now = datetime.now(timezone.utc)
for row in rows:
task_id = str(row[0])
site_id = row[1]
task_config_raw = row[2] if isinstance(row[2], dict) else json.loads(row[2])
schedule_config_raw = row[3] if isinstance(row[3], dict) else json.loads(row[3])
min_interval_value = row[4] or 0
min_interval_unit = row[5] or "minutes"
last_run_at = row[6]
last_status = row[7]
# per-task 间隔:取所有任务中最大的间隔作为有效间隔
min_run_intervals_raw = row[8] if isinstance(row[8], dict) else json.loads(row[8]) if row[8] else {}
# 计算有效间隔per-task 最大值 vs schedule 级别,取较大者
effective_interval_seconds = _convert_interval_to_seconds(
min_interval_value, min_interval_unit
)
for _task_code, interval_cfg in min_run_intervals_raw.items():
if isinstance(interval_cfg, dict):
task_seconds = _convert_interval_to_seconds(
interval_cfg.get("value", 0),
interval_cfg.get("unit", "minutes"),
)
if task_seconds > effective_interval_seconds:
effective_interval_seconds = task_seconds
try:
config = TaskConfigSchema(**task_config_raw)
@@ -211,7 +253,44 @@ class Scheduler:
logger.exception("调度任务 [%s] 配置反序列化失败,跳过", task_id)
continue
# 入队
# 1. 并发检查:上次仍在运行中 → 跳过
if last_status == "running":
logger.warning(
"调度任务 [%s] skipped_concurrent上次执行仍在运行中",
task_id,
)
continue
# 2. 间隔检查:最小运行间隔未到 → 跳过并推进 next_run_at
if effective_interval_seconds > 0 and last_run_at is not None:
elapsed = (now - last_run_at).total_seconds()
if elapsed < effective_interval_seconds:
# 推进 next_run_at = last_run_at + interval
next_run_at_pushed = last_run_at + timedelta(
seconds=effective_interval_seconds
)
with conn.cursor() as cur:
cur.execute(
"""
UPDATE scheduled_tasks
SET next_run_at = %s,
updated_at = NOW()
WHERE id = %s
""",
(next_run_at_pushed, task_id),
)
conn.commit()
logger.info(
"调度任务 [%s] skipped_interval最小间隔未到"
"(已过 %.0fs / 需 %dsnext_run_at 推进至 %s",
task_id,
elapsed,
effective_interval_seconds,
next_run_at_pushed,
)
continue
# 3. 正常入队
try:
queue_id = task_queue.enqueue(config, site_id, schedule_id=task_id)
logger.info(
@@ -224,7 +303,6 @@ class Scheduler:
continue
# 更新调度任务状态
now = datetime.now(timezone.utc)
next_run = calculate_next_run(schedule_cfg, now)
with conn.cursor() as cur:
@@ -269,6 +347,9 @@ class Scheduler:
# 在线程池中执行同步数据库操作,避免阻塞事件循环
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, self.check_and_enqueue)
# CHANGE 2026-03-23 | 同时检查 trigger_jobs 中到期的 cron/interval 任务
from app.services.trigger_scheduler import check_scheduled_jobs
await loop.run_in_executor(None, check_scheduled_jobs)
except Exception:
logger.exception("Scheduler 循环迭代异常")

View File

@@ -26,6 +26,7 @@ from typing import Any
# 禁止 from ..config import ETL_PROJECT_PATH值拷贝reload 后过期)
from .. import config as _config_module
from ..database import get_connection
from psycopg2.extras import Json
from ..schemas.tasks import TaskConfigSchema
from ..services.cli_builder import cli_builder
@@ -184,6 +185,7 @@ class TaskExecutor:
started_at=started_at,
command=command_str_with_host,
schedule_id=schedule_id,
config_json=config.model_dump(mode="json"),
)
exit_code: int | None = None
@@ -249,6 +251,9 @@ class TaskExecutor:
error_log="\n".join(stderr_lines),
)
# CHANGE 2026-03-22 | 释放内存缓冲区,防止长期运行内存泄漏
self.cleanup(execution_id)
def _run_subprocess(
self,
cmd: list[str],
@@ -379,6 +384,7 @@ class TaskExecutor:
started_at: datetime,
command: str,
schedule_id: str | None = None,
config_json: dict | None = None,
) -> None:
"""插入一条执行日志记录running 状态)。"""
try:
@@ -396,12 +402,13 @@ class TaskExecutor:
if row and row[0]:
effective_schedule_id = str(row[0])
# CHANGE 2026-03-22 | 存储完整 TaskConfig JSON供 rerun 还原原始参数
cur.execute(
"""
INSERT INTO task_execution_log
(id, queue_id, site_id, task_codes, status,
started_at, command, schedule_id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
started_at, command, schedule_id, config)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
(
execution_id,
@@ -412,6 +419,7 @@ class TaskExecutor:
started_at,
command,
effective_schedule_id,
Json(config_json) if config_json else None,
),
)
conn.commit()
@@ -475,6 +483,115 @@ class TaskExecutor:
self._log_buffers.pop(execution_id, None)
self._subscribers.pop(execution_id, None)
# ------------------------------------------------------------------
# 优雅关闭:终止所有子进程并回写状态
# ------------------------------------------------------------------
async def shutdown(self, timeout: float = 3.0) -> int:
"""优雅关闭:终止所有正在运行的子进程,等待回写完成。
Args:
timeout: 等待子进程退出的超时秒数,超时后强制 kill。
Returns:
被终止的进程数量。
"""
running_ids = list(self._processes.keys())
if not running_ids:
return 0
logger.info(
"优雅关闭:终止 %d 个运行中的子进程,超时 %.1fs",
len(running_ids), timeout,
)
# 先发 terminate 信号
for eid, proc in list(self._processes.items()):
if proc.poll() is None:
try:
proc.terminate()
logger.info("已发送 terminate 信号: %s (pid=%s)", eid, proc.pid)
except ProcessLookupError:
pass
# 等待子进程退出(给 finally 块执行的机会)
import time
deadline = time.monotonic() + timeout
for eid, proc in list(self._processes.items()):
remaining = deadline - time.monotonic()
if remaining > 0 and proc.poll() is None:
try:
proc.wait(timeout=remaining)
except Exception:
pass
# 超时后强制 kill 仍存活的进程
for eid, proc in list(self._processes.items()):
if proc.poll() is None:
try:
proc.kill()
logger.warning("强制 kill: %s (pid=%s)", eid, proc.pid)
except ProcessLookupError:
pass
# 注意execute() 的 finally 块会在 run_in_executor 返回后执行,
# 此处不需要手动回写——asyncio 事件循环关闭前会处理。
# 但如果 finally 来不及执行recover_stale() 会在下次启动时兜底。
count = len(running_ids)
logger.info("优雅关闭完成,已终止 %d 个子进程", count)
return count
# ------------------------------------------------------------------
# 启动时僵尸任务清理
# ------------------------------------------------------------------
def recover_stale(self) -> int:
"""启动时清理本机的僵尸任务status=running 但进程已不存在)。
仅清理 command 中包含本机主机名标识 [hostname] 的记录。
Returns:
被标记为 interrupted 的记录数量。
"""
# CHANGE 2026-03-22 | 启动时僵尸清理,仅限本机
host_tag = f"[{_INSTANCE_HOST}]"
try:
conn = get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
UPDATE task_execution_log
SET status = 'interrupted',
finished_at = NOW(),
error_log = COALESCE(error_log, '')
|| E'\n[recover_stale] 后端重启,进程已丢失,标记为 interrupted'
WHERE status = 'running'
AND command LIKE %s
RETURNING id
""",
(f"{host_tag}%",),
)
rows = cur.fetchall()
count = len(rows)
conn.commit()
finally:
conn.close()
if count > 0:
ids = [str(r[0]) for r in rows]
logger.warning(
"启动清理:%d 条僵尸任务标记为 interrupted: %s",
count, ", ".join(ids),
)
else:
logger.info("启动清理:无僵尸任务")
return count
except Exception:
logger.exception("启动清理僵尸任务失败")
return 0
# 全局单例
task_executor = TaskExecutor()

View File

@@ -11,6 +11,8 @@
import json
import logging
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -51,6 +53,7 @@ def _insert_history(
)
@trace_service(description_zh="执行任务过期检查", description_en="Run task expiry check")
def run() -> dict:
"""
有效期轮询主流程。

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,23 @@
# - 2026-03-20 | Prompt: H2 FDW→直连ETL统一改造 | get_task_list() 中 2 处、get_task_list_v2() 中 1 处、
# get_task_detail() 中 1 处 fdw_etl.v_dim_member / v_dws_member_assistant_relation_index
# 改为直连 ETL 库查询 app.v_* RLS 视图。使用 fdw_queries._fdw_context()。
# - 2026-03-24 | Prompt: 修复小程序前端没有档位进度 | _build_performance_summary 中 tier_nodes
# 从 cfg_performance_tier 配置表读取(不再依赖 salary_calc 的空列表),
# next_tier_hours/tier_completed 根据 effective_hours 和 tier_nodes 实时计算。
# - 2026-03-24 | Prompt: bonus_money 公式修正 | bonus_money 改为基础课节省 + 打赏课节省:
# 基础课 = next_tier_min_hours × (当前档 base_deduction - 下一档 base_deduction)
# 打赏课 = bonus_hours × incentive_rate × (当前档 bonus_deduction_ratio - 下一档)。
# - 2026-03-25 | Prompt: 保底 relationship_building 任务 | get_task_list_v2() 中新增 SQL 层面
# 排除 RS 范围外的 relationship_building 任务Step 0 预查 ETL RS 排除列表 → COUNT/分页
# 查询加 NOT (task_type='relationship_building' AND member_id=ANY(exclude)) 条件),
# 替代原内存过滤方案,修复跨页 total 不准确问题。
# - 2026-03-25 | Prompt: 绩效页→任务详情页按 member_id 查询 | 新增 get_task_by_member()
# 按 (assistant_id, member_id, site_id, status='active') 查询,多条时取优先级最高的一条,
# 复用 get_task_detail() 返回完整详情。
# - 2026-03-25 | Prompt: 任务详情服务记录6项改进 | get_task_detail() 改造:
# (1) 统计范围改为近60天列表不限(2) 预估规则当月且日期≤5号
# (3) AI 文案从 ai_analysis.summary 传到前端(不再硬编码);
# (4) drinks 字段透传到 service_records。
"""
任务管理服务
@@ -21,6 +38,7 @@ from fastapi import HTTPException
from app.services import fdw_queries
from app.services.task_generator import compute_heart_icon
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -74,6 +92,8 @@ def _get_assistant_id(conn, user_id: int, site_id: int) -> int:
SELECT assistant_id
FROM auth.user_assistant_binding
WHERE user_id = %s AND site_id = %s AND assistant_id IS NOT NULL
AND is_removed = false
ORDER BY id DESC
LIMIT 1
""",
(user_id, site_id),
@@ -128,6 +148,7 @@ def _verify_task_ownership(
return task
@trace_service("获取任务列表", "Get task list")
async def get_task_list(user_id: int, site_id: int) -> list[dict]:
"""
获取助教的任务列表(含有效 + 已放弃)。
@@ -241,6 +262,7 @@ async def get_task_list(user_id: int, site_id: int) -> list[dict]:
conn.close()
@trace_service("置顶任务", "Pin task")
async def pin_task(task_id: int, user_id: int, site_id: int) -> dict:
"""
置顶任务。
@@ -282,6 +304,7 @@ async def pin_task(task_id: int, user_id: int, site_id: int) -> dict:
conn.close()
@trace_service("取消置顶", "Unpin task")
async def unpin_task(task_id: int, user_id: int, site_id: int) -> dict:
"""
取消置顶。
@@ -313,6 +336,7 @@ async def unpin_task(task_id: int, user_id: int, site_id: int) -> dict:
conn.close()
@trace_service("放弃任务", "Abandon task")
async def abandon_task(
task_id: int, user_id: int, site_id: int, reason: str
) -> dict:
@@ -364,6 +388,7 @@ async def abandon_task(
conn.close()
@trace_service("取消放弃", "Cancel abandon")
async def cancel_abandon(task_id: int, user_id: int, site_id: int) -> dict:
"""
取消放弃。
@@ -439,18 +464,21 @@ _COURSE_TYPE_CLASS_MAP: dict[str, str] = {
}
# 维客线索 category → tag_color 映射
# CHANGE 2026-03-24 | 值改为前端 clue-card 组件 CSS 类名后缀primary/success/...
# 不再用十六进制颜色——WXSS 类名 `clue-tag-#0052d9` 无效。
_CATEGORY_COLOR_MAP: dict[str, str] = {
"客户基础": "#0052d9",
"客户基础信息": "#0052d9",
"消费习惯": "#e34d59",
"玩法偏好": "#00a870",
"促销偏好": "#ed7b2f",
"促销接受": "#ed7b2f",
"社交关系": "#0594fa",
"重要反馈": "#a25eb5",
"客户基础": "primary",
"客户基础信息": "primary",
"消费习惯": "error",
"玩法偏好": "success",
"促销偏好": "orange",
"促销接受": "orange",
"社交关系": "purple",
"重要反馈": "error",
}
@trace_service(description_zh="map_course_type_class", description_en="Map Course Type Class")
def map_course_type_class(raw_course_type: str | None) -> str:
"""将原始课程类型映射为统一枚举值(不带 tag- 前缀)。"""
if not raw_course_type:
@@ -458,6 +486,7 @@ def map_course_type_class(raw_course_type: str | None) -> str:
return _COURSE_TYPE_CLASS_MAP.get(raw_course_type.strip(), "basic")
@trace_service(description_zh="compute_income_trend", description_en="Compute Income Trend")
def compute_income_trend(current_income: float, prev_income: float) -> tuple[str, str]:
"""
计算收入趋势。
@@ -472,6 +501,7 @@ def compute_income_trend(current_income: float, prev_income: float) -> tuple[str
return trend, direction
@trace_service(description_zh="sanitize_tag", description_en="Sanitize Tag")
def sanitize_tag(raw_tag: str | None) -> str:
"""去除 tag 中的换行符,多行标签使用空格分隔。"""
if not raw_tag:
@@ -507,6 +537,7 @@ def _format_time(dt: datetime | None) -> str | None:
# ---------------------------------------------------------------------------
@trace_service("获取扩展版任务列表", "Get task list v2")
async def get_task_list_v2(
user_id: int,
site_id: int,
@@ -533,36 +564,85 @@ async def get_task_list_v2(
try:
assistant_id = _get_assistant_id(conn, user_id, site_id)
# ── 0. 预加载 RS 范围参数 + 需排除的 relationship_building member_id ──
# CHANGE 2026-03-25 | 分页准确性修复:在 SQL 层面排除 RS 范围外的保底任务,
# 而非在内存中过滤(内存过滤会导致 total 跨页不准确)。
# 先查 ETL 获取该助教所有关系对的 RS 值,筛出不满足范围的 member_id
# 然后在 SQL COUNT + 分页查询中排除这些 (task_type, member_id) 组合。
from app.services.task_generator import load_params as _load_tg_params
try:
tg_params = _load_tg_params(conn, site_id)
except Exception:
logger.warning("加载任务生成器参数失败,使用默认值", exc_info=True)
tg_params = {"rs_min_for_relationship": 1.0, "rs_max_for_relationship": 6.0}
rb_rs_min = Decimal(str(tg_params.get("rs_min_for_relationship", 1.0)))
rb_rs_max = Decimal(str(tg_params.get("rs_max_for_relationship", 6.0)))
# 查询该助教所有 RS 值,筛出不满足展示范围的 member_id
rb_exclude_member_ids: list[int] = []
try:
from app.services.fdw_queries import _fdw_context
with _fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT member_id, COALESCE(rs_display, 0) AS rs
FROM app.v_dws_member_assistant_relation_index
WHERE assistant_id = %s
""",
(assistant_id,),
)
for row in cur.fetchall():
rs_val = Decimal(str(row[1]))
if not (rb_rs_min < rs_val < rb_rs_max):
rb_exclude_member_ids.append(row[0])
except Exception:
logger.warning("ETL 查询 RS 排除列表失败,降级为不排除", exc_info=True)
# ── 1. 查询任务列表(带分页 + 总数) ──
# 状态映射:前端 pending → active
db_status = "active" if status == "pending" else status
# 构建排除条件relationship_building + member_id 不在 RS 范围内
# 当排除列表为空时不加额外条件
exclude_clause = ""
query_params_count: list = [site_id, assistant_id, db_status]
query_params_page: list = [site_id, assistant_id, db_status]
if rb_exclude_member_ids:
exclude_clause = (
" AND NOT (task_type = 'relationship_building' AND member_id = ANY(%s))"
)
query_params_count.append(rb_exclude_member_ids)
query_params_page.append(rb_exclude_member_ids)
with conn.cursor() as cur:
# 总数
# 总数(已排除 RS 范围外的保底任务)
cur.execute(
"""
f"""
SELECT COUNT(*)
FROM biz.coach_tasks
WHERE site_id = %s AND assistant_id = %s AND status = %s
{exclude_clause}
""",
(site_id, assistant_id, db_status),
query_params_count,
)
total = cur.fetchone()[0]
# 分页查询
# 分页查询(同样排除)
offset = (page - 1) * page_size
query_params_page.extend([page_size, offset])
cur.execute(
"""
f"""
SELECT id, task_type, status, priority_score, is_pinned,
expires_at, created_at, member_id, abandon_reason
FROM biz.coach_tasks
WHERE site_id = %s AND assistant_id = %s AND status = %s
{exclude_clause}
ORDER BY is_pinned DESC,
priority_score DESC NULLS LAST,
created_at ASC
LIMIT %s OFFSET %s
""",
(site_id, assistant_id, db_status, page_size, offset),
query_params_page,
)
tasks = cur.fetchall()
conn.commit()
@@ -580,50 +660,27 @@ async def get_task_list_v2(
member_ids = list({t[7] for t in tasks})
# ── 2. FDW 批量查询会员信息 ──
# ── 2-5+8. 单连接批量查询所有 ETL 数据 ──
# CHANGE 2026-03-23 | 性能优化:合并 7 次独立 ETL 连接为 1 次
member_info_map: dict[int, dict] = {}
try:
member_info_map = fdw_queries.get_member_info(conn, site_id, member_ids)
except Exception:
logger.warning("FDW 查询会员信息失败", exc_info=True)
# ── 3. FDW 批量查询余额(优雅降级) ──
balance_map: dict[int, Decimal] = {}
try:
balance_map = fdw_queries.get_member_balance(conn, site_id, member_ids)
except Exception:
logger.warning("FDW 查询余额失败", exc_info=True)
# ── 4. FDW 批量查询 lastVisitDays优雅降级 ──
last_visit_map: dict[int, int | None] = {}
try:
last_visit_map = fdw_queries.get_last_visit_days(conn, site_id, member_ids)
except Exception:
logger.warning("FDW 查询 lastVisitDays 失败", exc_info=True)
# ── 5. RS 指数(用于 heart_score ──
# CHANGE 2026-03-20 | H2 FDW→直连ETL | fdw_etl → app直连 ETL 库)
rs_map: dict[int, Decimal] = {}
recent60d_map: dict[int, dict] = {}
batch_data: dict | None = None
try:
from app.services.fdw_queries import _fdw_context
with _fdw_context(conn, site_id) as cur:
cur.execute(
"""
SELECT member_id, COALESCE(rs_display, 0)
FROM app.v_dws_member_assistant_relation_index
WHERE assistant_id = %s AND member_id = ANY(%s)
""",
(assistant_id, member_ids),
)
for row in cur.fetchall():
rs_map[row[0]] = Decimal(str(row[1]))
batch_data = fdw_queries.batch_query_for_task_list(
conn, site_id, assistant_id, member_ids,
datetime.now().year, datetime.now().month,
)
member_info_map = batch_data["member_info"]
balance_map = batch_data["balance"]
last_visit_map = batch_data["last_visit"]
rs_map = batch_data["rs"]
wbi_map = batch_data.get("wbi", {})
recent60d_map = batch_data.get("recent60d", {})
except Exception:
logger.warning("ETL 查询 RS 指数失败", exc_info=True)
try:
conn.rollback()
except Exception:
pass
logger.warning("ETL 批量查询失败,降级为空数据", exc_info=True)
# ── 6. 查询 ai_cache 获取 aiSuggestion优雅降级 ──
ai_suggestion_map: dict[int, str] = {}
@@ -673,8 +730,11 @@ async def get_task_list_v2(
except Exception:
logger.warning("查询备注存在性失败", exc_info=True)
# ── 8. 绩效概览 ──
performance = _build_performance_summary(conn, site_id, assistant_id)
# ── 8. 绩效概览(使用批量查询的预取数据) ──
# CHANGE 2026-03-23 | 复用 batch_data 避免额外 3 次 ETL 连接
performance = _build_performance_summary(
conn, site_id, assistant_id, batch_data=batch_data,
)
# ── 9. 组装 items ──
items = []
@@ -685,7 +745,17 @@ async def get_task_list_v2(
info = member_info_map.get(member_id, {})
customer_name = info.get("nickname") or info.get("member_name") or "未知客户"
rs_score = rs_map.get(member_id, Decimal("0"))
balance = balance_map.get(member_id)
wbi = wbi_map.get(member_id, {})
last_visit_days_val = last_visit_map.get(member_id)
ideal_interval = wbi.get("ideal_interval_days")
recent60d = recent60d_map.get(member_id, {})
# CHANGE 2026-03-24 | 预期天数ideal_interval_days - last_visit_days
# 正数=距预期到店还有余量,负数=已逾期
expected_days: int | None = None
if ideal_interval is not None and last_visit_days_val is not None:
expected_days = round(ideal_interval - last_visit_days_val)
items.append({
"id": task_id,
@@ -699,9 +769,15 @@ async def get_task_list_v2(
"is_pinned": bool(is_pinned),
"has_note": task_id in has_note_set,
"status": task_status,
"last_visit_days": last_visit_map.get(member_id),
"last_visit_days": last_visit_days_val,
"balance": float(balance) if balance is not None else None,
"ai_suggestion": ai_suggestion_map.get(member_id),
"expected_days": expected_days,
"ideal_interval_days": round(ideal_interval) if ideal_interval is not None else None,
# CHANGE 2026-03-27 | 近60天服务汇总口径同 task-detail serviceSummary
# 无记录时返回 0.0 而非 None确保前端始终能显示数值
"recent60d_hours": recent60d.get("hours", 0.0),
"recent60d_income": recent60d.get("income", 0.0),
})
return {
@@ -716,67 +792,208 @@ async def get_task_list_v2(
conn.close()
def _build_performance_summary(conn, site_id: int, assistant_id: int) -> dict:
def _build_performance_summary(
conn, site_id: int, assistant_id: int, *, batch_data: dict | None = None,
) -> dict:
"""
构建绩效概览PerformanceSummary
从 fdw_queries.get_salary_calc 获取当月和上月数据,
计算收入趋势
CHANGE 2026-03-23: 支持 batch_data 参数复用预查询数据,避免额外 ETL 连接。
当 batch_data 为 None 时(如无任务的空列表场景),回退到独立查询
课时/档位/客户数从 monthly_summary每日更新取实时数据
不再依赖月初结算的 salary_calc。收入仍从 salary_calc 取(如有)。
"""
now = datetime.now()
year, month = now.year, now.month
# 当月绩效
salary = None
try:
salary = fdw_queries.get_salary_calc(conn, site_id, assistant_id, year, month)
except Exception:
logger.warning("FDW 查询当月绩效失败", exc_info=True)
if batch_data:
# 复用批量查询的预取数据
summary = batch_data.get("monthly_summary")
salary = batch_data.get("salary_cur")
prev_salary = batch_data.get("salary_prev")
prev_month = month - 1 if month > 1 else 12
else:
# 回退:独立查询(无任务时的空列表场景)
summary = None
try:
summary = fdw_queries.get_monthly_summary(conn, site_id, assistant_id, year, month)
except Exception:
logger.warning("FDW 查询当月 monthly_summary 失败", exc_info=True)
# 上月绩效(用于收入趋势)
prev_year, prev_month = (year, month - 1) if month > 1 else (year - 1, 12)
prev_salary = None
try:
prev_salary = fdw_queries.get_salary_calc(conn, site_id, assistant_id, prev_year, prev_month)
except Exception:
logger.warning("FDW 查询上月绩效失败", exc_info=True)
salary = None
try:
salary = fdw_queries.get_salary_calc(conn, site_id, assistant_id, year, month)
except Exception:
logger.warning("FDW 查询当月 salary_calc 失败", exc_info=True)
prev_year, prev_month = (year, month - 1) if month > 1 else (year - 1, 12)
prev_salary = None
try:
prev_salary = fdw_queries.get_salary_calc(conn, site_id, assistant_id, prev_year, prev_month)
except Exception:
logger.warning("FDW 查询上月绩效失败", exc_info=True)
# 收入:优先 salary_calc无则为 0月中尚未结算属正常
current_income = salary["total_income"] if salary else 0.0
prev_income = prev_salary["total_income"] if prev_salary else 0.0
income_trend, income_trend_dir = compute_income_trend(current_income, prev_income)
tier_nodes = salary["tier_nodes"] if salary and salary.get("tier_nodes") else [0]
# tier_nodes 可能是 JSON 字符串或列表
if isinstance(tier_nodes, str):
# CHANGE 2026-03-24 | 档位节点从 cfg_performance_tier 配置表构建,不再依赖 salary_calc
# feiqiu-data-rules 规则 6: 绩效档位必须从配置表读取,禁止硬编码
# intent: 修复前端 tier_nodes=[0] 导致进度条无刻度的 bug
tiers: list[dict] = []
if batch_data and batch_data.get("performance_tiers"):
tiers = batch_data["performance_tiers"]
else:
try:
tier_nodes = json.loads(tier_nodes)
except (json.JSONDecodeError, TypeError):
tier_nodes = [0]
tiers = fdw_queries.get_performance_tiers(conn, site_id)
except Exception:
logger.warning("查询 cfg_performance_tier 失败", exc_info=True)
# 构建 tier_nodes: 各档位的 min_hours如 [0, 120, 150, 180, 210]
tier_nodes = [t["min_hours"] for t in tiers] if tiers else [0]
# 课时/档位/客户数:从 monthly_summary 取实时值
total_hours = summary["effective_hours"] if summary else 0.0
basic_hours = summary["base_hours"] if summary else 0.0
bonus_hours = summary["bonus_hours"] if summary else 0.0
total_customers = summary["unique_customers"] if summary else 0
current_tier = summary["tier_id"] if summary else (salary["tier_index"] if salary else 0)
coach_level = summary["coach_level"] if summary else (salary["coach_level"] if salary else "")
# next_tier_hours / tier_completed: 根据 effective_hours 和 tier_nodes 计算
tier_completed = False
next_tier_hours = 0.0
if tiers:
# 找到当前所在档位的下一档 min_hours
matched_next = None
for t in tiers:
if t["min_hours"] > total_hours:
matched_next = t["min_hours"]
break
if matched_next is not None:
next_tier_hours = matched_next
else:
# 已达到或超过最高档
tier_completed = True
next_tier_hours = tiers[-1]["min_hours"]
# bonus_money: 达到下一档后因抽成降低能多拿的钱(基础课 + 打赏课)
# CHANGE 2026-03-24 | 公式:
# 基础课节省 = next_tier_min_hours × (当前档 base_deduction - 下一档 base_deduction)
# 打赏课节省 = 当前打赏课时 × bonus_course_price × (当前档 bonus_ratio - 下一档 bonus_ratio)
# bonus_money = 基础课节省 + 打赏课节省
# intent: 展示升档的实际收益激励(替代已过期的 sprint_bonus
# assumptions: base_deduction/bonus_deduction_ratio 从 cfg_performance_tier 读取;
# bonus_course_price 从 salary_calc.incentive_rate 读取(禁止硬编码 190
bonus_money = 0.0
if not tier_completed and tiers and len(tiers) >= 2:
# 找到当前所在档位和下一档
current_tier_data = None
next_tier_data = None
for i, t in enumerate(tiers):
if t["min_hours"] > total_hours:
next_tier_data = t
current_tier_data = tiers[i - 1] if i > 0 else tiers[0]
break
if current_tier_data and next_tier_data:
# 基础课节省:用下一档的 min_hours升档后整月课时都按新抽成算
base_ded_diff = current_tier_data.get("base_deduction", 0) - next_tier_data.get("base_deduction", 0)
base_saving = next_tier_data["min_hours"] * base_ded_diff if base_ded_diff > 0 else 0.0
# 打赏课节省:当前打赏课时 × 单价 × 抽成比例差
bonus_ratio_diff = (
current_tier_data.get("bonus_deduction_ratio", 0)
- next_tier_data.get("bonus_deduction_ratio", 0)
)
bonus_course_price = salary.get("incentive_rate", 0.0) if salary else 0.0
bonus_saving = bonus_hours * bonus_course_price * bonus_ratio_diff if bonus_ratio_diff > 0 else 0.0
bonus_money = round(base_saving + bonus_saving, 2)
return {
"total_hours": salary["total_hours"] if salary else 0.0,
"total_hours": total_hours,
"total_income": current_income,
"total_customers": salary["total_customers"] if salary else 0,
"total_customers": total_customers,
"month_label": f"{month}",
"tier_nodes": [float(n) for n in tier_nodes] if tier_nodes else [0],
"basic_hours": salary["basic_hours"] if salary else 0.0,
"bonus_hours": salary["bonus_hours"] if salary else 0.0,
"current_tier": salary["tier_index"] if salary else 0,
"next_tier_hours": salary["next_tier_hours"] if salary else 0.0,
"tier_completed": salary["tier_completed"] if salary else False,
"bonus_money": 0.0 if (salary and salary.get("tier_completed")) else (salary["bonus_money"] if salary else 0.0),
"basic_hours": basic_hours,
"bonus_hours": bonus_hours,
"current_tier": current_tier,
"next_tier_hours": next_tier_hours,
"tier_completed": tier_completed,
"bonus_money": bonus_money,
"income_trend": income_trend,
"income_trend_dir": income_trend_dir,
"prev_month": f"{prev_month}",
"current_tier_label": salary["coach_level"] if salary else "",
"current_tier_label": coach_level,
}
# ---------------------------------------------------------------------------
# 按 member_id 查询最高优先级 active 任务
# ---------------------------------------------------------------------------
# 任务类型优先级排序(数值越小越优先)
_TASK_TYPE_SORT_ORDER: dict[str, int] = {
"high_priority_recall": 0,
"priority_recall": 1,
"follow_up_visit": 2,
"relationship_building": 3,
}
@trace_service("按会员查询任务详情", "Get task detail by member")
async def get_task_by_member(
member_id: int,
user_id: int,
site_id: int,
) -> dict:
"""
按 member_id 查询当前助教的最高优先级 active 任务,返回完整详情。
逻辑:
1. 查询 coach_tasks WHERE assistant_id + member_id + site_id + status='active'
2. 多条时按 _TASK_TYPE_SORT_ORDER 取优先级最高的一条
3. 复用 get_task_detail() 返回完整详情
权限校验:无 active 任务 → 404。
"""
conn = _get_connection()
try:
assistant_id = _get_assistant_id(conn, user_id, site_id)
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, task_type
FROM biz.coach_tasks
WHERE site_id = %s AND assistant_id = %s AND member_id = %s
AND status = 'active'
""",
(site_id, assistant_id, member_id),
)
rows = cur.fetchall()
if not rows:
raise HTTPException(status_code=404, detail="该会员无活跃任务")
# 按优先级排序,取最高的一条
best = min(rows, key=lambda r: _TASK_TYPE_SORT_ORDER.get(r[1], 99))
task_id = best[0]
finally:
conn.close()
# 复用完整详情逻辑
return await get_task_detail(task_id, user_id, site_id)
# ---------------------------------------------------------------------------
# RNS1.1get_task_detailTASK-2 任务详情完整版)
# ---------------------------------------------------------------------------
@trace_service("获取任务详情", "Get task detail")
async def get_task_detail(
task_id: int,
user_id: int,
@@ -831,6 +1048,15 @@ async def get_task_detail(
info = member_info_map.get(member_id, {})
customer_name = info.get("nickname") or "未知客户"
customer_phone = info.get("mobile") or ""
# 余额(用于前端储值等级展示)
balance = Decimal("0")
try:
balance_map = fdw_queries.get_member_balance(conn, site_id, [member_id])
balance = balance_map.get(member_id, Decimal("0"))
except Exception:
logger.warning("FDW 查询会员余额失败", exc_info=True)
# RS 指数
# CHANGE 2026-03-20 | H2 FDW→直连ETL | fdw_etl → app直连 ETL 库)
@@ -862,6 +1088,7 @@ async def get_task_detail(
SELECT id, category, summary, detail, source
FROM public.member_retention_clue
WHERE member_id = %s AND site_id = %s
AND is_hidden = false
ORDER BY recorded_at DESC
""",
(member_id, site_id),
@@ -874,7 +1101,7 @@ async def get_task_detail(
emoji, text = _extract_emoji_and_text(summary_raw)
tag = sanitize_tag(category)
tag_color = _CATEGORY_COLOR_MAP.get(tag, "#999999")
tag_color = _CATEGORY_COLOR_MAP.get(tag, "primary")
retention_clues.append({
"tag": tag,
@@ -936,17 +1163,33 @@ async def get_task_detail(
except Exception:
logger.warning("FDW 查询服务记录失败", exc_info=True)
# CHANGE 2026-03-25 | 统计范围近60天列表不限
# 预估规则:当月且日期 ≤ 5号
from datetime import date, timedelta
today = date.today()
cutoff_60d = today - timedelta(days=60)
is_estimate_month = today.day <= 5
service_records = []
total_hours = 0.0
total_income = 0.0
total_hours_60d = 0.0
total_income_60d = 0.0
count_60d = 0
for rec in service_records_raw:
hours = rec.get("service_hours", 0.0)
income = rec.get("income", 0.0)
total_hours += hours
total_income += income
# 判断是否在60天窗口内用于统计
settle_time = rec.get("settle_time")
in_60d = False
if settle_time:
rec_date = settle_time.date() if hasattr(settle_time, "date") else None
if rec_date and rec_date >= cutoff_60d:
in_60d = True
total_hours_60d += hours
total_income_60d += income
count_60d += 1
# 时间格式化
settle_time = rec.get("settle_time")
date_str = ""
if settle_time:
if hasattr(settle_time, "strftime"):
@@ -957,6 +1200,13 @@ async def get_task_detail(
raw_course_type = rec.get("course_type", "")
type_class = map_course_type_class(raw_course_type)
# CHANGE 2026-03-25 | 预估规则:当月且日期 ≤ 5号
rec_is_estimate = False
if settle_time and is_estimate_month:
rec_date_val = settle_time.date() if hasattr(settle_time, "date") else None
if rec_date_val and rec_date_val.year == today.year and rec_date_val.month == today.month:
rec_is_estimate = True
service_records.append({
"table": rec.get("table_name"),
"type": raw_course_type or "基础课",
@@ -965,15 +1215,15 @@ async def get_task_detail(
"duration": hours,
"duration_raw": rec.get("service_hours_raw"),
"income": income,
"is_estimate": rec.get("is_estimate"),
"drinks": None,
"is_estimate": rec_is_estimate,
"drinks": rec.get("drinks"),
"date": date_str,
})
avg_income = total_income / len(service_records) if service_records else 0.0
avg_income = total_income_60d / count_60d if count_60d else 0.0
service_summary = {
"total_hours": round(total_hours, 2),
"total_income": round(total_income, 2),
"total_hours": round(total_hours_60d, 2),
"total_income": round(total_income_60d, 2),
"avg_income": round(avg_income, 2),
}
@@ -984,7 +1234,8 @@ async def get_task_detail(
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, content, type, ai_score, created_at
SELECT id, content, type, ai_score, created_at, score,
rating_service_willingness, rating_revisit_likelihood
FROM biz.notes
WHERE task_id = %s
ORDER BY created_at DESC
@@ -997,6 +1248,10 @@ async def get_task_detail(
note_type = note_row[2] or "normal"
# type → tag_type/tag_label 映射
tag_label = "回访" if note_type == "follow_up" else "普通"
# CHANGE 2026-03-27 | 备注联调:补充 score用户星星评分和 ai_score
# ai_score 是 AI 应用 6 评分1-10score 是用户手动星星评分1-5
user_score = note_row[5]
ai_score_val = note_row[3]
notes.append({
"id": note_row[0],
@@ -1004,7 +1259,8 @@ async def get_task_detail(
"tag_type": note_type,
"tag_label": tag_label,
"created_at": _format_time(note_row[4]) or "",
"score": note_row[3],
"score": user_score,
"ai_score": ai_score_val,
})
conn.commit()
except Exception:
@@ -1014,6 +1270,7 @@ async def get_task_detail(
return {
"id": task_id,
"customer_name": customer_name,
"customer_phone": customer_phone,
"customer_avatar": "/assets/images/avatar-default.png",
"task_type": task_type,
"task_type_label": _TASK_TYPE_LABEL_MAP.get(task_type, task_type),
@@ -1024,6 +1281,7 @@ async def get_task_detail(
"has_note": has_note,
"status": task_status,
"customer_id": member_id,
"balance": float(balance),
"retention_clues": retention_clues,
"talking_points": talking_points,
"service_summary": service_summary,

View File

@@ -424,6 +424,10 @@ class TaskQueue:
# 被吞掉后_update_queue_status_from_log 读到的 execution_log 仍是
# running导致 task_queue 永远卡住,后续任务全部排队。
self._ensure_not_stuck_running(queue_id)
# CHANGE 2026-03-22 | P16: 回写 scheduled_tasks.last_status
# 成功时同时更新 last_success_at失败时不更新
if schedule_id:
self._update_schedule_status(queue_id, schedule_id)
def _get_pending_site_ids(self) -> list[int]:
@@ -533,6 +537,73 @@ class TaskQueue:
finally:
conn.close()
def _update_schedule_status(self, queue_id: str, schedule_id: str) -> None:
"""回写 scheduled_tasks.last_status成功时同时更新 last_success_at。
CHANGE 2026-03-22 | P16: 任务完成回调区分成功/失败
- 成功completed→ last_status='completed', last_success_at=NOW()
- 失败failed 等)→ last_status='failed'last_success_at 不变)
"""
conn = get_connection()
try:
with conn.cursor() as cur:
# 从 task_queue 读取最终状态
cur.execute(
"SELECT status FROM task_queue WHERE id = %s",
(queue_id,),
)
row = cur.fetchone()
if not row:
return
final_status = row[0]
if final_status == "completed":
cur.execute(
"""
UPDATE scheduled_tasks
SET last_status = 'completed',
last_success_at = NOW(),
updated_at = NOW()
WHERE id = %s
""",
(schedule_id,),
)
elif final_status in ("failed", "error"):
cur.execute(
"""
UPDATE scheduled_tasks
SET last_status = 'failed',
updated_at = NOW()
WHERE id = %s
""",
(schedule_id,),
)
else:
# 其他状态(如 running 兜底后仍未变)不更新
logger.warning(
"调度任务 [%s] 回写跳过task_queue [%s] 最终状态=%s",
schedule_id, queue_id, final_status,
)
return
conn.commit()
logger.info(
"调度任务 [%s] 状态回写:%squeue_id=%s",
schedule_id, final_status, queue_id,
)
except Exception:
logger.exception(
"_update_schedule_status 异常 schedule_id=%s queue_id=%s",
schedule_id, queue_id,
)
try:
conn.rollback()
except Exception:
pass
finally:
conn.close()
def _recover_zombie_tasks(self, max_running_minutes: int = 180) -> None:
"""恢复僵尸 running 任务:超过阈值时间仍为 running 的任务强制标记 failed。

View File

@@ -11,6 +11,8 @@ from __future__ import annotations
from dataclasses import dataclass, field
from app.trace.decorators import trace_service
@dataclass(frozen=True)
class TaskDefinition:
@@ -95,7 +97,7 @@ DWS_TASKS: list[TaskDefinition] = [
TaskDefinition("DWS_FINANCE_DISCOUNT_DETAIL", "折扣明细", "汇总折扣明细", "财务", "DWS"),
# CHANGE [2026-02-19] intent: 同步 ETL 侧合并——原 DWS_RETENTION_CLEANUP / DWS_MV_REFRESH_* 已合并为 DWS_MAINTENANCE
TaskDefinition("DWS_MAINTENANCE", "DWS 维护", "刷新物化视图 + 清理过期留存数据", "通用", "DWS", requires_window=False, is_common=False),
# CHANGE [2026-07-20] intent: 注册 DWS 库存汇总任务(日/周/月),依赖 DWD goods_stock_summary 加载完成(需求 12.9
# CHANGE [2026-03-27] intent: 注册 DWS 库存汇总任务(日/周/月)(需求 12.9
TaskDefinition("DWS_GOODS_STOCK_DAILY", "库存日报", "按日粒度汇总商品库存数据", "库存", "DWS"),
TaskDefinition("DWS_GOODS_STOCK_WEEKLY", "库存周报", "按周粒度汇总商品库存数据", "库存", "DWS"),
TaskDefinition("DWS_GOODS_STOCK_MONTHLY", "库存月报", "按月粒度汇总商品库存数据", "库存", "DWS"),
@@ -110,6 +112,8 @@ INDEX_TASKS: list[TaskDefinition] = [
# CHANGE [2026-02-19] intent: 补充说明 RelationIndexTask 产出 RS/OS/MS/ML 四个子指数
TaskDefinition("DWS_RELATION_INDEX", "关系指数 (RS)", "产出 RS/OS/MS/ML 四个子指数", "指数", "INDEX"),
TaskDefinition("DWS_SPENDING_POWER_INDEX", "消费力指数 (SPI)", "计算会员消费力指数", "指数", "INDEX"),
# CHANGE 2026-03-29 | DWS_TASK_ENGINE编排后端任务引擎完成检查→过期检查→任务生成
TaskDefinition("DWS_TASK_ENGINE", "任务引擎", "编排后端任务引擎:完成检查→过期检查→任务生成", "指数", "INDEX", requires_window=False, is_common=True),
]
# ── 工具类任务定义 ────────────────────────────────────────────
@@ -131,14 +135,17 @@ ALL_TASKS: list[TaskDefinition] = ODS_TASKS + DWD_TASKS + DWS_TASKS + INDEX_TASK
_TASK_BY_CODE: dict[str, TaskDefinition] = {t.code: t for t in ALL_TASKS}
@trace_service(description_zh="获取所有任务定义", description_en="Get all task definitions")
def get_all_tasks() -> list[TaskDefinition]:
return ALL_TASKS
@trace_service(description_zh="按代码获取任务", description_en="Get task by code")
def get_task_by_code(code: str) -> TaskDefinition | None:
return _TASK_BY_CODE.get(code.upper())
@trace_service(description_zh="按领域分组获取任务", description_en="Get tasks grouped by domain")
def get_tasks_grouped_by_domain() -> dict[str, list[TaskDefinition]]:
"""按业务域分组返回任务列表"""
groups: dict[str, list[TaskDefinition]] = {}
@@ -147,6 +154,7 @@ def get_tasks_grouped_by_domain() -> dict[str, list[TaskDefinition]]:
return groups
@trace_service(description_zh="按层级获取任务", description_en="Get tasks by layer")
def get_tasks_by_layer(layer: str) -> list[TaskDefinition]:
"""获取指定层的所有任务"""
layer_upper = layer.upper()
@@ -167,6 +175,7 @@ FLOW_LAYER_MAP: dict[str, list[str]] = {
}
@trace_service(description_zh="获取兼容任务列表", description_en="Get compatible tasks")
def get_compatible_tasks(flow_id: str) -> list[TaskDefinition]:
"""根据 Flow 包含的层,返回兼容的任务列表"""
layers = FLOW_LAYER_MAP.get(flow_id, [])
@@ -225,6 +234,7 @@ DWD_TABLES: list[DwdTableDefinition] = [
]
@trace_service(description_zh="按领域分组获取DWD表", description_en="Get DWD tables grouped by domain")
def get_dwd_tables_grouped_by_domain() -> dict[str, list[DwdTableDefinition]]:
"""按业务域分组返回 DWD 表定义"""
groups: dict[str, list[DwdTableDefinition]] = {}

View File

@@ -14,6 +14,8 @@ import logging
from datetime import datetime, timedelta, timezone
from typing import Any, Callable
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -26,11 +28,13 @@ def _get_connection():
_JOB_REGISTRY: dict[str, Callable] = {}
@trace_service(description_zh="register_job", description_en="Register Job")
def register_job(job_type: str, handler: Callable) -> None:
"""注册 job_type 对应的执行函数。"""
_JOB_REGISTRY[job_type] = handler
@trace_service(description_zh="update_job_last_run_at", description_en="Update Job Last Run At")
def update_job_last_run_at(cur, job_id: int) -> None:
"""
在 handler 的事务内更新 last_run_at。
@@ -45,6 +49,7 @@ def update_job_last_run_at(cur, job_id: int) -> None:
)
@trace_service(description_zh="触发调度事件", description_en="Fire scheduler event")
def fire_event(event_name: str, payload: dict[str, Any] | None = None) -> int:
"""
触发事件驱动型任务。
@@ -94,6 +99,7 @@ def fire_event(event_name: str, payload: dict[str, Any] | None = None) -> int:
return executed
@trace_service(description_zh="检查定时任务", description_en="Check scheduled jobs")
def check_scheduled_jobs() -> int:
"""
检查 cron/interval 类型的到期 job 并执行。
@@ -138,16 +144,30 @@ def check_scheduled_jobs() -> int:
cur.execute(
"""
UPDATE biz.trigger_jobs
SET last_run_at = NOW(), next_run_at = %s
SET last_run_at = NOW(), next_run_at = %s, last_error = NULL
WHERE id = %s
""",
(next_run, job_id),
)
conn.commit()
executed += 1
except Exception:
except Exception as exc:
logger.exception("触发器 %s 执行失败", job_name)
conn.rollback()
# 记录错误到 last_error 字段
try:
with conn.cursor() as cur:
cur.execute(
"UPDATE biz.trigger_jobs SET last_error = %s WHERE id = %s",
(str(exc)[:500], job_id),
)
conn.commit()
except Exception:
logger.debug("记录 last_error 失败", exc_info=True)
try:
conn.rollback()
except Exception:
pass
finally:
conn.close()
@@ -179,3 +199,151 @@ def _calculate_next_run(
trigger_config.get("cron_expression", "0 7 * * *"), now
)
return None # event 类型无 next_run_at
def check_startup_jobs() -> list[dict]:
"""
启动时检查 cron/interval 类型任务今天是否执行过。
返回未执行的任务列表,供启动横幅提示。
不自动执行,由用户通过管理页面手动确认。
"""
from datetime import date
conn = _get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, job_name, trigger_condition, trigger_config,
last_run_at, description
FROM biz.trigger_jobs
WHERE status = 'enabled'
AND trigger_condition IN ('cron', 'interval')
ORDER BY id
"""
)
rows = cur.fetchall()
conn.commit()
today = date.today()
pending = []
for row in rows:
job_id, job_name, trigger_condition, trigger_config, last_run_at, description = row
ran_today = False
if last_run_at is not None:
# last_run_at 可能带时区,取 date 部分比较
ran_today = last_run_at.date() == today if hasattr(last_run_at, 'date') else False
if not ran_today:
pending.append({
"id": job_id,
"job_name": job_name,
"trigger_condition": trigger_condition,
"description": description or job_name,
"last_run_at": str(last_run_at) if last_run_at else "从未执行",
})
return pending
finally:
conn.close()
def run_job_by_id(job_id: int) -> dict:
"""
手动触发指定 job通过管理页面调用
返回 {"success": bool, "message": str}。
"""
conn = _get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, job_type, job_name, trigger_condition, trigger_config
FROM biz.trigger_jobs
WHERE id = %s
""",
(job_id,),
)
row = cur.fetchone()
conn.commit()
if not row:
return {"success": False, "message": f"任务 {job_id} 不存在"}
_, job_type, job_name, trigger_condition, trigger_config = row
handler = _JOB_REGISTRY.get(job_type)
if not handler:
return {"success": False, "message": f"任务 {job_name} 未注册处理器"}
try:
handler()
# 更新 last_run_at 和 next_run_at
next_run = _calculate_next_run(trigger_condition, trigger_config)
with conn.cursor() as cur:
cur.execute(
"""
UPDATE biz.trigger_jobs
SET last_run_at = NOW(), next_run_at = %s, last_error = NULL
WHERE id = %s
""",
(next_run, job_id),
)
conn.commit()
return {"success": True, "message": f"任务 {job_name} 执行成功"}
except Exception as exc:
logger.exception("手动触发 %s 失败", job_name)
conn.rollback()
try:
with conn.cursor() as cur:
cur.execute(
"UPDATE biz.trigger_jobs SET last_error = %s WHERE id = %s",
(str(exc)[:500], job_id),
)
conn.commit()
except Exception:
try:
conn.rollback()
except Exception:
pass
return {"success": False, "message": f"任务 {job_name} 执行失败: {str(exc)[:200]}"}
finally:
conn.close()
def list_trigger_jobs() -> list[dict]:
"""
获取所有 trigger_jobs 列表(管理页面用)。
"""
conn = _get_connection()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT id, job_type, job_name, trigger_condition, trigger_config,
last_run_at, next_run_at, status, description, last_error,
created_at
FROM biz.trigger_jobs
ORDER BY id
"""
)
rows = cur.fetchall()
conn.commit()
result = []
for row in rows:
result.append({
"id": row[0],
"job_type": row[1],
"job_name": row[2],
"trigger_condition": row[3],
"trigger_config": row[4],
"last_run_at": row[5].isoformat() if row[5] else None,
"next_run_at": row[6].isoformat() if row[6] else None,
"status": row[7],
"description": row[8],
"last_error": row[9],
"created_at": row[10].isoformat() if row[10] else None,
})
return result
finally:
conn.close()

View File

@@ -13,6 +13,7 @@ import logging
import httpx
from app.config import WX_APPID, WX_SECRET
from app.trace.decorators import trace_service
logger = logging.getLogger(__name__)
@@ -45,6 +46,7 @@ class WeChatAuthError(Exception):
return _WX_ERROR_MAP.get(self.errcode, (401, "微信登录失败"))[1]
@trace_service(description_zh="微信登录code换session", description_en="WeChat code to session")
async def code2session(code: str) -> dict:
"""
调用微信 code2Session 接口。