feat: 累积功能变更 — 聊天集成、租户管理、小程序更新、ETL 增强、迁移脚本

包含多个会话的累积代码变更:
- backend: AI 聊天服务、触发器调度、认证增强、WebSocket、调度器最小间隔
- admin-web: ETL 状态页、任务管理、调度配置、登录优化
- miniprogram: 看板页面、聊天集成、UI 组件、导航更新
- etl: DWS 新任务(finance_area_daily/board_cache)、连接器增强
- tenant-admin: 项目初始化
- db: 19 个迁移脚本(etl_feiqiu 11 + zqyy_app 8)
- packages/shared: 枚举和工具函数更新
- tools: 数据库工具、报表生成、健康检查
- docs: PRD/架构/部署/合约文档更新

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Neo
2026-04-06 00:03:48 +08:00
parent 70324d8542
commit 6f8f12314f
515 changed files with 76604 additions and 7456 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,280 @@
"""
从测试数据库导出完整 DDL按 schema 分文件写入 docs/database/ddl/。
以数据库现状为准,整合所有 schema/表/约束/索引/视图/物化视图/序列/FDW 配置。
输出文件:
docs/database/ddl/etl_feiqiu__meta.sql
docs/database/ddl/etl_feiqiu__ods.sql
docs/database/ddl/etl_feiqiu__dwd.sql
docs/database/ddl/etl_feiqiu__core.sql
docs/database/ddl/etl_feiqiu__dws.sql
docs/database/ddl/etl_feiqiu__app.sql
docs/database/ddl/zqyy_app__public.sql
docs/database/ddl/zqyy_app__auth.sql
docs/database/ddl/zqyy_app__biz.sql
docs/database/ddl/fdw.sql
用法cd C:\\NeoZQYY && python scripts/ops/gen_consolidated_ddl.py
"""
import os, sys
from pathlib import Path
from datetime import date
import psycopg2
# ── 环境 ──────────────────────────────────────────────────────────────────
from dotenv import load_dotenv
ROOT = Path(__file__).resolve().parent.parent.parent
load_dotenv(ROOT / ".env")
ETL_DSN = os.environ.get("TEST_DB_DSN") or os.environ.get("PG_DSN")
APP_DSN = os.environ.get("TEST_APP_DB_DSN") or os.environ.get("APP_DB_DSN")
if not ETL_DSN:
sys.exit("ERROR: TEST_DB_DSN / PG_DSN 未配置")
if not APP_DSN:
sys.exit("ERROR: TEST_APP_DB_DSN / APP_DB_DSN 未配置")
OUTPUT_DIR = ROOT / "docs" / "database" / "ddl"
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
FDW_FILE = ROOT / "db" / "fdw" / "setup_fdw.sql"
TODAY = date.today().isoformat()
# ── SQL 模板 ──────────────────────────────────────────────────────────────
SQL_TABLES = """
WITH cols AS (
SELECT table_schema, table_name,
string_agg(
format(E' %%I %%s%%s%%s',
column_name,
CASE WHEN data_type = 'USER-DEFINED' THEN udt_name
WHEN data_type = 'ARRAY' THEN udt_name
WHEN character_maximum_length IS NOT NULL THEN data_type || '(' || character_maximum_length || ')'
WHEN numeric_precision IS NOT NULL AND data_type IN ('numeric','decimal') THEN data_type || '(' || numeric_precision || ',' || numeric_scale || ')'
ELSE data_type END,
CASE WHEN column_default IS NOT NULL THEN ' DEFAULT ' || column_default ELSE '' END,
CASE WHEN is_nullable = 'NO' THEN ' NOT NULL' ELSE '' END
), E',\\n' ORDER BY ordinal_position
) as col_defs
FROM information_schema.columns
WHERE table_schema = %s
AND table_name IN (SELECT table_name FROM information_schema.tables WHERE table_schema = %s AND table_type = 'BASE TABLE')
GROUP BY table_schema, table_name
)
SELECT format(E'CREATE TABLE %%I.%%I (\\n%%s\\n);', table_schema, table_name, col_defs) as ddl
FROM cols ORDER BY table_name;
"""
SQL_CONSTRAINTS = """
SELECT n.nspname as schema, conrelid::regclass as tbl, conname,
pg_get_constraintdef(c.oid) as def, contype
FROM pg_constraint c
JOIN pg_namespace n ON n.oid = c.connamespace
WHERE n.nspname = %s AND contype IN ('p','u','f')
ORDER BY conrelid::regclass::text, contype, conname;
"""
SQL_INDEXES = """
SELECT indexname, indexdef
FROM pg_indexes
WHERE schemaname = %s
AND indexname NOT IN (SELECT conname FROM pg_constraint WHERE contype IN ('p','u'))
ORDER BY tablename, indexname;
"""
SQL_SEQUENCES = """
SELECT sequence_name, data_type
FROM information_schema.sequences
WHERE sequence_schema = %s
ORDER BY sequence_name;
"""
SQL_VIEWS = """
SELECT viewname, definition
FROM pg_views
WHERE schemaname = %s
ORDER BY viewname;
"""
SQL_MATVIEWS = """
SELECT matviewname, definition
FROM pg_matviews
WHERE schemaname = %s
ORDER BY matviewname;
"""
SQL_MV_INDEXES = """
SELECT indexname, indexdef
FROM pg_indexes
WHERE schemaname = %s
AND tablename LIKE 'mv_%%'
ORDER BY tablename, indexname;
"""
SQL_TABLE_COUNT = """
SELECT count(*) FROM information_schema.tables
WHERE table_schema = %s AND table_type = 'BASE TABLE';
"""
# ── 辅助函数 ──────────────────────────────────────────────────────────────
def query(conn, sql, params=None):
with conn.cursor() as cur:
cur.execute(sql, params)
return cur.fetchall()
def section(f, title, level=1):
sep = "=" * 77 if level == 1 else "-" * 77
f.write(f"\n-- {sep}\n-- {title}\n-- {sep}\n\n")
def write_sequences(f, conn, schema):
rows = query(conn, SQL_SEQUENCES, (schema,))
if not rows:
return
f.write("-- 序列\n")
for name, dtype in rows:
f.write(f"CREATE SEQUENCE IF NOT EXISTS {schema}.{name} AS {dtype};\n")
f.write("\n")
def write_tables(f, conn, schema):
rows = query(conn, SQL_TABLES, (schema, schema))
if not rows:
return
f.write("-- 表\n")
for (ddl,) in rows:
f.write(ddl + "\n\n")
def write_constraints(f, conn, schema):
rows = query(conn, SQL_CONSTRAINTS, (schema,))
if not rows:
return
f.write("-- 约束(主键 / 唯一 / 外键)\n")
for _, tbl, conname, condef, _ in rows:
f.write(f"ALTER TABLE {tbl} ADD CONSTRAINT {conname} {condef};\n")
f.write("\n")
def write_indexes(f, conn, schema):
rows = query(conn, SQL_INDEXES, (schema,))
if not rows:
return
f.write("-- 索引\n")
for _, indexdef in rows:
f.write(indexdef + ";\n")
f.write("\n")
def write_views(f, conn, schema):
rows = query(conn, SQL_VIEWS, (schema,))
if not rows:
return
f.write("-- 视图\n")
for vname, vdef in rows:
f.write(f"CREATE OR REPLACE VIEW {schema}.{vname} AS\n{vdef.strip()}\n;\n\n")
def write_matviews(f, conn, schema):
rows = query(conn, SQL_MATVIEWS, (schema,))
if not rows:
return
f.write("-- 物化视图\n")
for mvname, mvdef in rows:
f.write(f"CREATE MATERIALIZED VIEW {schema}.{mvname} AS\n{mvdef.strip()}\n;\n\n")
# 物化视图索引
idx_rows = query(conn, SQL_MV_INDEXES, (schema,))
if idx_rows:
f.write("-- 物化视图索引\n")
for _, indexdef in idx_rows:
f.write(indexdef + ";\n")
f.write("\n")
def write_schema_file(conn, db_name, schema, label, views_only=False):
"""为单个 schema 生成独立 DDL 文件。"""
filename = f"{db_name}__{schema}.sql"
filepath = OUTPUT_DIR / filename
# 获取表数量
table_count = query(conn, SQL_TABLE_COUNT, (schema,))[0][0]
with open(filepath, "w", encoding="utf-8") as f:
f.write(f"""\
-- =============================================================================
-- {db_name} / {schema}{label}
-- 生成日期:{TODAY}
-- 来源:测试库(通过脚本自动导出)
-- =============================================================================
CREATE SCHEMA IF NOT EXISTS {schema};
""")
if views_only:
write_views(f, conn, schema)
else:
write_sequences(f, conn, schema)
write_tables(f, conn, schema)
write_constraints(f, conn, schema)
write_indexes(f, conn, schema)
write_views(f, conn, schema)
write_matviews(f, conn, schema)
size_kb = filepath.stat().st_size / 1024
obj_desc = "仅视图" if views_only else f"{table_count}"
print(f"{filename:<35s} {size_kb:>6.1f} KB ({obj_desc})")
return filepath
def write_fdw_file():
"""输出 FDW 配置文件。"""
filepath = OUTPUT_DIR / "fdw.sql"
with open(filepath, "w", encoding="utf-8") as f:
f.write(f"""\
-- =============================================================================
-- FDW 跨库映射(在 zqyy_app 中执行)
-- 生成日期:{TODAY}
-- 来源db/fdw/setup_fdw.sql
-- =============================================================================
""")
if FDW_FILE.exists():
f.write(FDW_FILE.read_text(encoding="utf-8"))
f.write("\n")
else:
f.write("-- FDW 配置文件未找到db/fdw/setup_fdw.sql\n")
size_kb = filepath.stat().st_size / 1024
print(f"{'fdw.sql':<35s} {size_kb:>6.1f} KB")
return filepath
# ── 主流程 ────────────────────────────────────────────────────────────────
def main():
etl_conn = psycopg2.connect(ETL_DSN)
app_conn = psycopg2.connect(APP_DSN)
print(f"输出目录:{OUTPUT_DIR}\n")
# etl_feiqiu 六层 schema
write_schema_file(etl_conn, "etl_feiqiu", "meta", "ETL 调度元数据")
write_schema_file(etl_conn, "etl_feiqiu", "ods", "原始数据层")
write_schema_file(etl_conn, "etl_feiqiu", "dwd", "明细数据层")
write_schema_file(etl_conn, "etl_feiqiu", "core", "跨门店标准化维度/事实")
write_schema_file(etl_conn, "etl_feiqiu", "dws", "汇总数据层")
write_schema_file(etl_conn, "etl_feiqiu", "app", "RLS 视图层", views_only=True)
# zqyy_app
write_schema_file(app_conn, "zqyy_app", "public", "小程序业务表")
write_schema_file(app_conn, "zqyy_app", "auth", "用户认证与权限")
write_schema_file(app_conn, "zqyy_app", "biz", "核心业务表(任务/备注/触发器)")
# FDW
write_fdw_file()
etl_conn.close()
app_conn.close()
# 删除旧的合并文件
old_file = ROOT / "docs" / "database" / "consolidated_ddl.sql"
if old_file.exists():
old_file.unlink()
print(f"\n🗑️ 已删除旧文件:{old_file.name}")
print(f"\n✅ 完成,共 10 个文件")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,83 @@
# -*- coding: utf-8 -*-
"""
在 zqyy_app 和 test_zqyy_app 中执行 FDW 配置。
- zqyy_app -> setup_fdw.sql (指向 etl_feiqiu)
- test_zqyy_app -> setup_fdw_test.sql (指向 test_etl_feiqiu)
"""
import os
import psycopg2
CONN = dict(host="100.64.0.4", port=5432, user="local-Python", password="Neo-local-1991125")
BASE = r"C:\NeoZQYY"
# 实际密码替换占位符 '***'
APP_READER_PWD = "AppR3ad_2026!"
TARGETS = [
("zqyy_app", os.path.join(BASE, "db", "fdw", "setup_fdw.sql")),
("test_zqyy_app", os.path.join(BASE, "db", "fdw", "setup_fdw_test.sql")),
]
for dbname, sql_path in TARGETS:
print(f"\n{'='*60}")
print(f"执行 FDW 配置: {dbname} <- {os.path.basename(sql_path)}")
print(f"{'='*60}")
sql = open(sql_path, encoding="utf-8").read()
# 替换密码占位符
sql = sql.replace("password '***'", f"password '{APP_READER_PWD}'")
conn = psycopg2.connect(**CONN, dbname=dbname)
conn.autocommit = True
cur = conn.cursor()
# 逐条执行(按分号拆分,跳过注释和空行)
statements = []
current = []
for line in sql.split("\n"):
stripped = line.strip()
if stripped.startswith("--") or not stripped:
continue
current.append(line)
if stripped.endswith(";"):
statements.append("\n".join(current))
current = []
success = 0
skip = 0
fail = 0
for stmt in statements:
try:
cur.execute(stmt)
first_line = stmt.strip().split("\n")[0][:80]
print(f" [OK] {first_line}")
success += 1
except psycopg2.errors.DuplicateObject as e:
conn.rollback()
print(f" [SKIP] 已存在: {str(e).strip().split(chr(10))[0]}")
skip += 1
except Exception as e:
conn.rollback()
print(f" [FAIL] {str(e).strip().split(chr(10))[0]}")
print(f" SQL: {stmt[:100]}")
fail += 1
# 验证
cur.execute("SELECT 1 FROM pg_extension WHERE extname = 'postgres_fdw'")
fdw_ext = cur.fetchone() is not None
cur.execute("SELECT srvname FROM pg_foreign_server")
servers = [r[0] for r in cur.fetchall()]
cur.execute(
"SELECT count(*) FROM information_schema.tables "
"WHERE table_schema = 'fdw_etl'"
)
fdw_tables = cur.fetchone()[0]
print(f"\n 结果: {success} OK, {skip} SKIP, {fail} FAIL")
print(f" 验证: fdw扩展={fdw_ext}, servers={servers}, fdw_etl表数={fdw_tables}")
conn.close()
print("\n完成!")

View File

@@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
"""验证四个数据库的状态表数量、schema 分布"""
import psycopg2
CONN = dict(host="100.64.0.4", port=5432, user="local-Python", password="Neo-local-1991125")
DBS = ["etl_feiqiu", "test_etl_feiqiu", "zqyy_app", "test_zqyy_app"]
for db in DBS:
try:
c = psycopg2.connect(**CONN, dbname=db)
cur = c.cursor()
cur.execute(
"SELECT schemaname, count(*) FROM pg_tables "
"WHERE schemaname NOT IN ('pg_catalog','information_schema') "
"GROUP BY schemaname ORDER BY schemaname"
)
rows = cur.fetchall()
total = sum(r[1] for r in rows)
schemas = ", ".join(f"{r[0]}({r[1]})" for r in rows)
print(f"[OK] {db}: {total} tables | {schemas}")
# 物化视图数量
cur.execute(
"SELECT count(*) FROM pg_matviews "
"WHERE schemaname NOT IN ('pg_catalog','information_schema')"
)
mv_count = cur.fetchone()[0]
if mv_count:
print(f" matviews: {mv_count}")
c.close()
except Exception as e:
print(f"[FAIL] {db}: {e}")
print("\n--- 配置文件指向 ---")
print("ETL .env PG_DSN -> test_etl_feiqiu (已确认)")
print("根 .env -> PG_DSN=test_etl_feiqiu, APP_DB_DSN=test_zqyy_app")
print("后端 .env.local -> APP_DB_NAME=test_zqyy_app, ETL_DB_NAME=test_etl_feiqiu")
print("后端 config.py 默认值 -> test_zqyy_app / test_etl_feiqiu")
print("FDW 生产 -> setup_fdw.sql (etl_feiqiu)")
print("FDW 测试 -> setup_fdw_test.sql (test_etl_feiqiu)")

View File

@@ -0,0 +1,160 @@
"""
DDL 迁移验证脚本 — admin-web-enhancement spec (Task 17.2)
验证 NS4.1 + P16 迁移在 test_zqyy_app 上的正确性:
1. biz.connectors / biz.tenants / biz.sites / biz.site_code_history 四张表存在且字段正确
2. scheduled_tasks 新增 3 个字段
3. auth._archived_site_code_mapping 存在
"""
import os
import sys
from dotenv import load_dotenv
load_dotenv()
dsn = os.environ.get("APP_DB_DSN", "")
if not dsn:
print("ERROR: APP_DB_DSN 未设置")
sys.exit(1)
if "test_" not in dsn:
print(f"ERROR: APP_DB_DSN 不包含 'test_',拒绝连接非测试库: {dsn}")
sys.exit(1)
import psycopg2 # noqa: E402
conn = psycopg2.connect(dsn)
cur = conn.cursor()
results: list[tuple[str, bool, str]] = []
def check(name: str, sql: str, expected_fn):
"""执行 SQL 并用 expected_fn 判断结果,记录 pass/fail。"""
try:
cur.execute(sql)
rows = cur.fetchall()
ok, detail = expected_fn(rows)
results.append((name, ok, detail))
except Exception as e:
results.append((name, False, f"异常: {e}"))
# ── 1. biz.connectors 表字段 ──
check(
"biz.connectors 表存在且字段正确",
"""
SELECT column_name FROM information_schema.columns
WHERE table_schema = 'biz' AND table_name = 'connectors'
ORDER BY ordinal_position
""",
lambda rows: (
(cols := [r[0] for r in rows])
and set(cols) == {"id", "connector_key", "display_name", "is_active", "created_at"},
f"字段: {cols}",
),
)
# ── 2. biz.tenants 表字段 ──
check(
"biz.tenants 表存在且字段正确",
"""
SELECT column_name FROM information_schema.columns
WHERE table_schema = 'biz' AND table_name = 'tenants'
ORDER BY ordinal_position
""",
lambda rows: (
(cols := [r[0] for r in rows])
and set(cols)
== {"id", "connector_id", "tenant_id", "tenant_name", "is_active", "created_at", "updated_at"},
f"字段: {cols}",
),
)
# ── 3. biz.sites 表字段 ──
check(
"biz.sites 表存在且字段正确",
"""
SELECT column_name FROM information_schema.columns
WHERE table_schema = 'biz' AND table_name = 'sites'
ORDER BY ordinal_position
""",
lambda rows: (
(cols := [r[0] for r in rows])
and set(cols)
== {
"id", "tenant_id", "site_id", "site_name", "site_code",
"site_label", "is_active", "created_at", "updated_at",
},
f"字段: {cols}",
),
)
# ── 4. biz.site_code_history 表字段 ──
check(
"biz.site_code_history 表存在且字段正确",
"""
SELECT column_name FROM information_schema.columns
WHERE table_schema = 'biz' AND table_name = 'site_code_history'
ORDER BY ordinal_position
""",
lambda rows: (
(cols := [r[0] for r in rows])
and set(cols) == {"id", "site_id", "site_code", "is_current", "created_at", "retired_at"},
f"字段: {cols}",
),
)
# ── 5. scheduled_tasks 新增 3 个字段 ──
check(
"scheduled_tasks 新增字段存在",
"""
SELECT column_name, data_type, column_default
FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = 'scheduled_tasks'
AND column_name IN ('min_run_interval_value', 'min_run_interval_unit', 'last_success_at')
ORDER BY ordinal_position
""",
lambda rows: (
len(rows) == 3,
f"找到 {len(rows)} 个字段: {[r[0] for r in rows]}",
),
)
# ── 6. auth._archived_site_code_mapping 存在 ──
check(
"auth._archived_site_code_mapping 存在",
"""
SELECT table_name FROM information_schema.tables
WHERE table_schema = 'auth' AND table_name = '_archived_site_code_mapping'
""",
lambda rows: (
len(rows) == 1,
f"找到 {len(rows)} 张表",
),
)
cur.close()
conn.close()
# ── 打印结果 ──
print("\n" + "=" * 60)
print("DDL 迁移验证结果 — admin-web-enhancement")
print("=" * 60)
passed = 0
failed = 0
for name, ok, detail in results:
status = "✅ PASS" if ok else "❌ FAIL"
print(f" {status} {name}")
print(f" {detail}")
if ok:
passed += 1
else:
failed += 1
print(f"\n总计: {passed} 通过, {failed} 失败")
if failed > 0:
sys.exit(1)
print("全部通过 ✅")