init: 项目初始提交 - NeoZQYY Monorepo 完整代码
This commit is contained in:
0
tests/.gitkeep
Normal file
0
tests/.gitkeep
Normal file
16
tests/README.md
Normal file
16
tests/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# tests/
|
||||
|
||||
## 作用说明
|
||||
|
||||
跨项目集成测试目录,存放需要多个子项目协同验证的端到端测试。各子项目的单元测试放在各自目录内。
|
||||
|
||||
## 内部结构
|
||||
|
||||
- 端到端集成测试(ETL + 后端联调)
|
||||
- FDW 跨库访问验证
|
||||
- 配置加载集成测试
|
||||
|
||||
## Roadmap
|
||||
|
||||
- 补充 ETL→后端→小程序全链路冒烟测试
|
||||
- 补充数据库 schema 一致性自动化检查
|
||||
104
tests/test_property_config_missing.py
Normal file
104
tests/test_property_config_missing.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
必需配置缺失检测属性测试
|
||||
|
||||
**Validates: Requirements 4.4**
|
||||
|
||||
Property 4: 必需配置缺失检测
|
||||
对于任意必需配置项,当所有配置层级(.env、.env.local、环境变量、CLI)
|
||||
均未提供该项时,配置加载器应抛出错误,且错误信息中包含该缺失配置项的名称。
|
||||
"""
|
||||
import pytest
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import lists, from_regex
|
||||
|
||||
|
||||
def validate_required_config(required_keys: list[str], config: dict) -> None:
|
||||
"""验证必需配置项是否全部存在且非空。
|
||||
|
||||
Args:
|
||||
required_keys: 必需配置项名称列表
|
||||
config: 实际配置字典
|
||||
|
||||
Raises:
|
||||
ValueError: 当存在缺失或空值的必需配置项时,
|
||||
错误信息包含所有缺失项名称
|
||||
"""
|
||||
missing = [k for k in required_keys if k not in config or not config[k]]
|
||||
if missing:
|
||||
raise ValueError(f"缺失必需配置项: {', '.join(missing)}")
|
||||
|
||||
|
||||
# 合法配置项名称:大写字母开头,后跟大写字母/数字/下划线
|
||||
_key_strategy = from_regex(r"[A-Z][A-Z0-9_]{0,19}", fullmatch=True)
|
||||
|
||||
|
||||
@given(
|
||||
required_keys=lists(
|
||||
_key_strategy,
|
||||
min_size=1,
|
||||
max_size=5,
|
||||
unique=True,
|
||||
)
|
||||
)
|
||||
@settings(max_examples=100)
|
||||
def test_missing_required_config_raises_error(required_keys: list[str]):
|
||||
"""
|
||||
Property 4: 空配置字典 -> 抛出 ValueError 且包含缺失项名称
|
||||
|
||||
**Validates: Requirements 4.4**
|
||||
"""
|
||||
empty_config: dict = {}
|
||||
|
||||
with pytest.raises(ValueError, match="缺失必需配置项"):
|
||||
validate_required_config(required_keys, empty_config)
|
||||
|
||||
# 额外验证:错误信息包含每个缺失项名称
|
||||
try:
|
||||
validate_required_config(required_keys, empty_config)
|
||||
except ValueError as exc:
|
||||
msg = str(exc)
|
||||
for key in required_keys:
|
||||
assert key in msg, (
|
||||
f"错误信息应包含缺失配置项 '{key}',但实际信息为: {msg}"
|
||||
)
|
||||
|
||||
|
||||
@given(
|
||||
required_keys=lists(
|
||||
_key_strategy,
|
||||
min_size=1,
|
||||
max_size=5,
|
||||
unique=True,
|
||||
)
|
||||
)
|
||||
@settings(max_examples=100)
|
||||
def test_empty_value_treated_as_missing(required_keys: list[str]):
|
||||
"""
|
||||
Property 4: 空值视为缺失 -> 抛出 ValueError
|
||||
|
||||
**Validates: Requirements 4.4**
|
||||
"""
|
||||
config_with_empty = {k: "" for k in required_keys}
|
||||
|
||||
with pytest.raises(ValueError, match="缺失必需配置项"):
|
||||
validate_required_config(required_keys, config_with_empty)
|
||||
|
||||
|
||||
@given(
|
||||
required_keys=lists(
|
||||
_key_strategy,
|
||||
min_size=1,
|
||||
max_size=5,
|
||||
unique=True,
|
||||
)
|
||||
)
|
||||
@settings(max_examples=100)
|
||||
def test_all_required_present_no_error(required_keys: list[str]):
|
||||
"""
|
||||
Property 4 反向验证:所有必需项均提供非空值时不抛异常
|
||||
|
||||
**Validates: Requirements 4.4**
|
||||
"""
|
||||
config_complete = {k: f"value_for_{k}" for k in required_keys}
|
||||
validate_required_config(required_keys, config_complete)
|
||||
72
tests/test_property_config_priority.py
Normal file
72
tests/test_property_config_priority.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
配置优先级属性测试
|
||||
|
||||
**Validates: Requirements 4.3**
|
||||
|
||||
Property 3: 配置优先级 - .env.local 覆盖
|
||||
对于任意配置项名称和两个不同的值,当根 .env 和应用 .env.local
|
||||
都定义了该配置项时,配置加载器返回的值应等于 .env.local 中的值。
|
||||
|
||||
测试逻辑:
|
||||
1. 使用 hypothesis 生成随机配置项名称(字母数字下划线)和两个不同的值
|
||||
2. 创建临时 .env 文件,写入 KEY=value1
|
||||
3. 创建临时 .env.local 文件,写入 KEY=value2
|
||||
4. 使用 python-dotenv 模拟分层加载(先加载 .env,再加载 .env.local,override=True)
|
||||
5. 验证最终值等于 value2(.env.local 的值)
|
||||
|
||||
不依赖 ETL 的 AppConfig,直接测试 python-dotenv 的分层加载行为,
|
||||
因为这是设计文档中描述的配置隔离机制的基础。
|
||||
"""
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from hypothesis import given, settings, assume
|
||||
from hypothesis.strategies import from_regex
|
||||
|
||||
from dotenv import dotenv_values
|
||||
|
||||
# 策略:生成合法的 .env 配置项名称和值
|
||||
# 配置项名称:首字符为大写字母,后跟大写字母/数字/下划线
|
||||
_key_strategy = from_regex(r"[A-Z][A-Z0-9_]{0,29}", fullmatch=True)
|
||||
|
||||
# 配置项值:可打印 ASCII,排除换行、# 号(注释符)和引号(避免解析歧义)
|
||||
_value_strategy = from_regex(r"[A-Za-z0-9_./:@\-]{1,50}", fullmatch=True)
|
||||
|
||||
|
||||
@given(key=_key_strategy, val_root=_value_strategy, val_local=_value_strategy)
|
||||
@settings(max_examples=100)
|
||||
def test_env_local_overrides_root_env(key: str, val_root: str, val_local: str):
|
||||
"""
|
||||
Property 3: 配置优先级 - .env.local 覆盖
|
||||
|
||||
当根 .env 和应用 .env.local 都定义了同一配置项时,
|
||||
分层加载(先 .env,再 .env.local override=True)的结果
|
||||
应等于 .env.local 中的值。
|
||||
|
||||
**Validates: Requirements 4.3**
|
||||
"""
|
||||
# 两个值必须不同,否则无法验证覆盖语义
|
||||
assume(val_root != val_local)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
root_env = os.path.join(tmpdir, ".env")
|
||||
local_env = os.path.join(tmpdir, ".env.local")
|
||||
|
||||
# 写入根 .env(公共配置)
|
||||
with open(root_env, "w", encoding="utf-8") as f:
|
||||
f.write(f"{key}={val_root}\n")
|
||||
|
||||
# 写入应用 .env.local(私有覆盖)
|
||||
with open(local_env, "w", encoding="utf-8") as f:
|
||||
f.write(f"{key}={val_local}\n")
|
||||
|
||||
# 模拟分层加载:先加载 .env,再用 .env.local 覆盖
|
||||
config = dotenv_values(root_env)
|
||||
local_config = dotenv_values(local_env)
|
||||
config.update(local_config) # .env.local 覆盖 .env
|
||||
|
||||
assert config[key] == val_local, (
|
||||
f"配置项 '{key}' 应被 .env.local 覆盖为 '{val_local}',"
|
||||
f"但实际值为 '{config[key]}'(根 .env 值: '{val_root}')"
|
||||
)
|
||||
110
tests/test_property_core_minimal_fields.py
Normal file
110
tests/test_property_core_minimal_fields.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Property 7: Core schema 最小字段集
|
||||
Validates: Requirements 7.5
|
||||
|
||||
对于任意 core schema 中的表,其字段数量应严格少于对应 dwd schema 中同名(或对应)表的字段数量。
|
||||
使用 hypothesis 从 core 表列表中随机选取,验证 core 表字段数 < 对应 dwd 表字段数。
|
||||
"""
|
||||
import re
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from hypothesis import given, settings, assume
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SQL 解析工具
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _parse_tables(sql_text: str) -> dict[str, int]:
|
||||
"""从 SQL 文本中提取每个 CREATE TABLE 的表名和字段数量。
|
||||
|
||||
只统计显式声明的列(不含 CONSTRAINT / PRIMARY KEY / CHECK 等行)。
|
||||
"""
|
||||
tables: dict[str, int] = {}
|
||||
# 匹配 CREATE TABLE ... ( ... ); 允许 IF NOT EXISTS
|
||||
pattern = re.compile(
|
||||
r"CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?(\w+)\s*\((.*?)\);",
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
for match in pattern.finditer(sql_text):
|
||||
table_name = match.group(1).lower()
|
||||
body = match.group(2)
|
||||
# 按逗号拆分,过滤掉约束行
|
||||
col_count = 0
|
||||
for line in body.split(","):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# 跳过约束 / 索引 / 空行
|
||||
upper = line.upper().lstrip()
|
||||
if upper.startswith(("PRIMARY KEY", "UNIQUE", "CHECK", "CONSTRAINT",
|
||||
"EXCLUDE", "FOREIGN KEY", "INDEX")):
|
||||
continue
|
||||
# 剩余视为列定义
|
||||
col_count += 1
|
||||
tables[table_name] = col_count
|
||||
return tables
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 加载 SQL 文件并建立映射
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_CORE_SQL = Path(r"C:\NeoZQYY\db\etl_feiqiu\schemas\core.sql")
|
||||
_DWD_SQL = Path(r"C:\NeoZQYY\db\etl_feiqiu\schemas\dwd.sql")
|
||||
|
||||
_core_tables = _parse_tables(_CORE_SQL.read_text(encoding="utf-8"))
|
||||
_dwd_tables = _parse_tables(_DWD_SQL.read_text(encoding="utf-8"))
|
||||
|
||||
# core → dwd 映射(手动定义,因为命名规则不完全一致)
|
||||
# 维度表:core 与 dwd 同名
|
||||
# 事实表:core.fact_settlement → dwd.dwd_settlement_head
|
||||
# core.fact_payment → dwd.dwd_payment
|
||||
_CORE_TO_DWD_MAP: dict[str, str] = {
|
||||
"dim_site": "dim_site",
|
||||
"dim_member": "dim_member",
|
||||
"dim_assistant": "dim_assistant",
|
||||
"dim_table": "dim_table",
|
||||
"dim_goods_category": "dim_goods_category",
|
||||
"fact_settlement": "dwd_settlement_head",
|
||||
"fact_payment": "dwd_payment",
|
||||
}
|
||||
|
||||
# 预检:确保映射中的表在两侧 SQL 中都存在
|
||||
_valid_pairs: list[tuple[str, str, int, int]] = []
|
||||
for core_name, dwd_name in _CORE_TO_DWD_MAP.items():
|
||||
if core_name in _core_tables and dwd_name in _dwd_tables:
|
||||
_valid_pairs.append(
|
||||
(core_name, dwd_name, _core_tables[core_name], _dwd_tables[dwd_name])
|
||||
)
|
||||
|
||||
# 确保至少有可测试的映射对
|
||||
assert len(_valid_pairs) > 0, (
|
||||
f"未找到有效的 core→dwd 映射对。"
|
||||
f" core 表: {list(_core_tables.keys())},"
|
||||
f" dwd 表: {list(_dwd_tables.keys())}"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# 属性测试
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(pair=sampled_from(_valid_pairs))
|
||||
def test_core_table_has_fewer_fields_than_dwd(pair):
|
||||
"""**Validates: Requirements 7.5**
|
||||
|
||||
对于任意 core schema 中的表,其字段数量应严格少于
|
||||
对应 dwd schema 中同名(或对应)表的字段数量。
|
||||
"""
|
||||
core_name, dwd_name, core_count, dwd_count = pair
|
||||
assert core_count < dwd_count, (
|
||||
f"core.{core_name} 有 {core_count} 个字段,"
|
||||
f"但 dwd.{dwd_name} 只有 {dwd_count} 个字段。"
|
||||
f" 期望 core 字段数严格少于 dwd。"
|
||||
)
|
||||
122
tests/test_property_file_migration.py
Normal file
122
tests/test_property_file_migration.py
Normal file
@@ -0,0 +1,122 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Property 5: 文件迁移完整性
|
||||
|
||||
对于任意源-目标目录映射关系(ETL 业务代码、database 文件、tests 目录),
|
||||
源目录中的每个文件在目标目录的对应位置都应存在且内容一致。
|
||||
|
||||
**Validates: Requirements 5.1, 5.2, 5.3**
|
||||
"""
|
||||
import hashlib
|
||||
import os
|
||||
from typing import List, Tuple
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# 源-目标目录映射(需求 5.1: ETL 业务代码,5.2: database,5.3: tests)
|
||||
MIGRATION_MAPPINGS: List[Tuple[str, str]] = [
|
||||
# ETL 业务代码目录(需求 5.1)
|
||||
(r"C:\ZQYY\FQ-ETL\api", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\api"),
|
||||
(r"C:\ZQYY\FQ-ETL\cli", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\cli"),
|
||||
(r"C:\ZQYY\FQ-ETL\config", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\config"),
|
||||
(r"C:\ZQYY\FQ-ETL\loaders", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\loaders"),
|
||||
(r"C:\ZQYY\FQ-ETL\models", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\models"),
|
||||
(r"C:\ZQYY\FQ-ETL\orchestration", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\orchestration"),
|
||||
(r"C:\ZQYY\FQ-ETL\scd", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\scd"),
|
||||
(r"C:\ZQYY\FQ-ETL\tasks", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\tasks"),
|
||||
(r"C:\ZQYY\FQ-ETL\utils", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\utils"),
|
||||
(r"C:\ZQYY\FQ-ETL\quality", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\quality"),
|
||||
# tests 子目录(需求 5.3)— 只映射 ETL 自身的 unit/integration,
|
||||
# Monorepo 级属性测试(test_property_*.py)按设计放在 C:\NeoZQYY\tests\
|
||||
(r"C:\ZQYY\FQ-ETL\tests\unit", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\tests\unit"),
|
||||
(r"C:\ZQYY\FQ-ETL\tests\integration", r"C:\NeoZQYY\apps\etl\pipelines\feiqiu\tests\integration"),
|
||||
]
|
||||
|
||||
# 排除模式:__pycache__ 等不参与比较
|
||||
EXCLUDE_DIRS = {"__pycache__", ".pytest_cache", ".hypothesis"}
|
||||
|
||||
|
||||
def _file_hash(filepath: str) -> str:
|
||||
"""计算文件的 SHA-256 哈希值。"""
|
||||
h = hashlib.sha256()
|
||||
with open(filepath, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(8192), b""):
|
||||
h.update(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def _collect_py_files(root_dir: str) -> List[str]:
|
||||
"""递归收集目录下所有 .py 文件的相对路径(排除 __pycache__ 等)。"""
|
||||
result = []
|
||||
for dirpath, dirnames, filenames in os.walk(root_dir):
|
||||
dirnames[:] = [d for d in dirnames if d not in EXCLUDE_DIRS]
|
||||
for fname in filenames:
|
||||
if fname.endswith(".py"):
|
||||
rel = os.path.relpath(os.path.join(dirpath, fname), root_dir)
|
||||
result.append(rel)
|
||||
return sorted(result)
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(mapping=sampled_from(MIGRATION_MAPPINGS))
|
||||
def test_all_source_files_exist_in_target(mapping: Tuple[str, str]) -> None:
|
||||
"""
|
||||
Property 5(存在性):源目录中的每个 .py 文件在目标目录的对应位置都应存在。
|
||||
|
||||
**Validates: Requirements 5.1, 5.2, 5.3**
|
||||
"""
|
||||
src_dir, dst_dir = mapping
|
||||
|
||||
assert os.path.isdir(src_dir), f"源目录不存在: {src_dir}"
|
||||
assert os.path.isdir(dst_dir), f"目标目录不存在: {dst_dir}"
|
||||
|
||||
src_files = _collect_py_files(src_dir)
|
||||
assert len(src_files) > 0, f"源目录无 .py 文件: {src_dir}"
|
||||
|
||||
missing = []
|
||||
for rel_path in src_files:
|
||||
dst_path = os.path.join(dst_dir, rel_path)
|
||||
if not os.path.isfile(dst_path):
|
||||
missing.append(rel_path)
|
||||
|
||||
assert not missing, (
|
||||
f"目标目录 {dst_dir} 缺少 {len(missing)} 个文件:\n"
|
||||
+ "\n".join(f" - {f}" for f in missing[:10])
|
||||
+ (f"\n ... 及其他 {len(missing) - 10} 个" if len(missing) > 10 else "")
|
||||
)
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(mapping=sampled_from(MIGRATION_MAPPINGS))
|
||||
def test_source_and_target_file_content_identical(mapping: Tuple[str, str]) -> None:
|
||||
"""
|
||||
Property 5(内容一致性):源目录与目标目录中对应文件的内容应完全一致。
|
||||
|
||||
**Validates: Requirements 5.1, 5.2, 5.3**
|
||||
"""
|
||||
src_dir, dst_dir = mapping
|
||||
|
||||
assert os.path.isdir(src_dir), f"源目录不存在: {src_dir}"
|
||||
assert os.path.isdir(dst_dir), f"目标目录不存在: {dst_dir}"
|
||||
|
||||
src_files = _collect_py_files(src_dir)
|
||||
mismatched = []
|
||||
|
||||
for rel_path in src_files:
|
||||
src_path = os.path.join(src_dir, rel_path)
|
||||
dst_path = os.path.join(dst_dir, rel_path)
|
||||
|
||||
if not os.path.isfile(dst_path):
|
||||
continue
|
||||
|
||||
src_hash = _file_hash(src_path)
|
||||
dst_hash = _file_hash(dst_path)
|
||||
if src_hash != dst_hash:
|
||||
mismatched.append(rel_path)
|
||||
|
||||
assert not mismatched, (
|
||||
f"源目录 {src_dir} 与目标目录 {dst_dir} 中 {len(mismatched)} 个文件内容不一致:\n"
|
||||
+ "\n".join(f" - {f}" for f in mismatched[:10])
|
||||
+ (f"\n ... 及其他 {len(mismatched) - 10} 个" if len(mismatched) > 10 else "")
|
||||
)
|
||||
65
tests/test_property_pyproject_completeness.py
Normal file
65
tests/test_property_pyproject_completeness.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Property 2: Python 子项目配置完整性
|
||||
|
||||
对于任意 uv workspace 声明的 Python 子项目成员,该子项目目录下应存在
|
||||
独立的 pyproject.toml 文件,且文件中包含 [project] 段落。
|
||||
|
||||
Validates: Requirements 3.2
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# uv workspace 声明的 Python 子项目成员
|
||||
WORKSPACE_MEMBERS = [
|
||||
"apps/etl/pipelines/feiqiu",
|
||||
"apps/backend",
|
||||
"packages/shared",
|
||||
"gui",
|
||||
]
|
||||
|
||||
MONOREPO_ROOT = r"C:\NeoZQYY"
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(member=sampled_from(WORKSPACE_MEMBERS))
|
||||
def test_pyproject_toml_exists(member: str) -> None:
|
||||
"""子项目目录下应存在 pyproject.toml 文件。"""
|
||||
path = os.path.join(MONOREPO_ROOT, member, "pyproject.toml")
|
||||
assert os.path.isfile(path), f"{member}/pyproject.toml 不存在"
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(member=sampled_from(WORKSPACE_MEMBERS))
|
||||
def test_pyproject_contains_project_section(member: str) -> None:
|
||||
"""pyproject.toml 应包含 [project] 段落。"""
|
||||
path = os.path.join(MONOREPO_ROOT, member, "pyproject.toml")
|
||||
content = open(path, encoding="utf-8").read()
|
||||
assert re.search(r"^\[project\]", content, re.MULTILINE), (
|
||||
f"{member}/pyproject.toml 缺少 [project] 段落"
|
||||
)
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(member=sampled_from(WORKSPACE_MEMBERS))
|
||||
def test_pyproject_contains_name(member: str) -> None:
|
||||
"""pyproject.toml 的 [project] 段落应包含 name 字段。"""
|
||||
path = os.path.join(MONOREPO_ROOT, member, "pyproject.toml")
|
||||
content = open(path, encoding="utf-8").read()
|
||||
assert re.search(r'^name\s*=\s*".+"', content, re.MULTILINE), (
|
||||
f"{member}/pyproject.toml 缺少 name 字段"
|
||||
)
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(member=sampled_from(WORKSPACE_MEMBERS))
|
||||
def test_pyproject_contains_version(member: str) -> None:
|
||||
"""pyproject.toml 的 [project] 段落应包含 version 字段。"""
|
||||
path = os.path.join(MONOREPO_ROOT, member, "pyproject.toml")
|
||||
content = open(path, encoding="utf-8").read()
|
||||
assert re.search(r'^version\s*=\s*".+"', content, re.MULTILINE), (
|
||||
f"{member}/pyproject.toml 缺少 version 字段"
|
||||
)
|
||||
69
tests/test_property_readme_structure.py
Normal file
69
tests/test_property_readme_structure.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
README.md 结构完整性属性测试
|
||||
|
||||
**Validates: Requirements 1.5**
|
||||
|
||||
Property 1: 对于任意 Monorepo 一级目录,其 README.md 文件应存在
|
||||
且包含"作用说明"、"结构描述"和"Roadmap"三个段落。
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# Monorepo 根目录
|
||||
MONOREPO_ROOT = r"C:\NeoZQYY"
|
||||
|
||||
# 一级目录列表(需求 1.5 定义)
|
||||
TOP_LEVEL_DIRS = [
|
||||
"apps",
|
||||
"gui",
|
||||
"packages",
|
||||
"db",
|
||||
"docs",
|
||||
"infra",
|
||||
"scripts",
|
||||
"samples",
|
||||
"tests",
|
||||
]
|
||||
|
||||
|
||||
@settings(max_examples=100)
|
||||
@given(dir_name=sampled_from(TOP_LEVEL_DIRS))
|
||||
def test_readme_structure_completeness(dir_name: str) -> None:
|
||||
"""
|
||||
Property 1: README.md 结构完整性
|
||||
|
||||
**Validates: Requirements 1.5**
|
||||
|
||||
对于任意一级目录,验证:
|
||||
1. README.md 文件存在
|
||||
2. 包含"作用说明"段落标题
|
||||
3. 包含"内部结构"或"结构"段落标题
|
||||
4. 包含"Roadmap"段落标题
|
||||
"""
|
||||
readme_path = os.path.join(MONOREPO_ROOT, dir_name, "README.md")
|
||||
|
||||
# README.md 必须存在
|
||||
assert os.path.isfile(readme_path), (
|
||||
f"{dir_name}/README.md 不存在: {readme_path}"
|
||||
)
|
||||
|
||||
content = open(readme_path, encoding="utf-8").read()
|
||||
|
||||
# 包含"作用说明"段落标题
|
||||
assert re.search(r"^#{1,3}\s*作用说明", content, re.MULTILINE), (
|
||||
f"{dir_name}/README.md 缺少'作用说明'段落"
|
||||
)
|
||||
|
||||
# 包含"结构"相关段落标题(内部结构 或 结构)
|
||||
assert re.search(r"^#{1,3}\s*(内部)?结构", content, re.MULTILINE), (
|
||||
f"{dir_name}/README.md 缺少'结构'段落"
|
||||
)
|
||||
|
||||
# 包含"Roadmap"段落标题
|
||||
assert re.search(r"^#{1,3}\s*Roadmap", content, re.MULTILINE | re.IGNORECASE), (
|
||||
f"{dir_name}/README.md 缺少'Roadmap'段落"
|
||||
)
|
||||
202
tests/test_property_rls_site_id.py
Normal file
202
tests/test_property_rls_site_id.py
Normal file
@@ -0,0 +1,202 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
RLS 按 site_id 隔离属性测试
|
||||
|
||||
**Validates: Requirements 13.2**
|
||||
|
||||
Property 11: 对于任意 app schema 中启用了 RLS 的视图,当会话变量
|
||||
`app.current_site_id` 设置为某个门店 ID 时,查询结果应仅包含该
|
||||
`site_id` 的数据行。
|
||||
|
||||
实现方式:基于 DDL 文件的静态分析(不需要实际数据库连接)
|
||||
- 解析 app.sql 中所有 ENABLE ROW LEVEL SECURITY 的表
|
||||
- 解析所有 CREATE POLICY 语句
|
||||
- 验证每个启用 RLS 的表都有包含 site_id 过滤的策略
|
||||
- 验证策略的 USING 子句使用 current_setting('app.current_site_id') 模式
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from hypothesis import given, settings, assume
|
||||
from hypothesis.strategies import sampled_from, integers
|
||||
|
||||
# ── 路径常量 ──────────────────────────────────────────────────────
|
||||
SCHEMAS_DIR = os.path.join(r"C:\NeoZQYY", "db", "etl_feiqiu", "schemas")
|
||||
APP_SQL = os.path.join(SCHEMAS_DIR, "app.sql")
|
||||
|
||||
|
||||
# ── 解析工具 ──────────────────────────────────────────────────────
|
||||
|
||||
def _read_app_sql() -> str:
|
||||
"""读取 app.sql 文件内容。"""
|
||||
with open(APP_SQL, encoding="utf-8") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
# 匹配 ALTER TABLE [schema.]table_name ENABLE ROW LEVEL SECURITY
|
||||
_ENABLE_RLS_RE = re.compile(
|
||||
r"ALTER\s+TABLE\s+([\w]+\.[\w]+)\s+ENABLE\s+ROW\s+LEVEL\s+SECURITY",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# 匹配 CREATE POLICY policy_name ON [schema.]table_name ... USING (...)
|
||||
# 捕获:策略名、表全名、USING 子句内容
|
||||
_CREATE_POLICY_RE = re.compile(
|
||||
r"CREATE\s+POLICY\s+(\w+)\s+ON\s+([\w]+\.[\w]+)"
|
||||
r".*?USING\s*\((.+?)\)\s*;",
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
|
||||
# 匹配 USING 子句中的 current_setting('app.current_site_id') 模式
|
||||
_SITE_ID_FILTER_RE = re.compile(
|
||||
r"current_setting\s*\(\s*'app\.current_site_id'\s*\)",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# 匹配 USING 子句中的 site_id 相关字段(site_id 或 register_site_id 等)
|
||||
_SITE_ID_FIELD_RE = re.compile(
|
||||
r"\b\w*site_id\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def _parse_rls_enabled_tables(content: str) -> list[str]:
|
||||
"""提取所有启用了 RLS 的表(schema.table 格式)。"""
|
||||
return [m.group(1).lower() for m in _ENABLE_RLS_RE.finditer(content)]
|
||||
|
||||
|
||||
def _parse_policies(content: str) -> list[dict]:
|
||||
"""
|
||||
提取所有 CREATE POLICY 语句。
|
||||
返回 [{"name": ..., "table": ..., "using_clause": ...}, ...]
|
||||
"""
|
||||
policies = []
|
||||
for m in _CREATE_POLICY_RE.finditer(content):
|
||||
policies.append({
|
||||
"name": m.group(1).lower(),
|
||||
"table": m.group(2).lower(),
|
||||
"using_clause": m.group(3).strip(),
|
||||
})
|
||||
return policies
|
||||
|
||||
|
||||
# ── 预加载(模块级,只解析一次) ──────────────────────────────────
|
||||
|
||||
_content = _read_app_sql()
|
||||
RLS_TABLES = _parse_rls_enabled_tables(_content)
|
||||
POLICIES = _parse_policies(_content)
|
||||
|
||||
# 构建 table -> [policy] 映射
|
||||
POLICY_MAP: dict[str, list[dict]] = {}
|
||||
for p in POLICIES:
|
||||
POLICY_MAP.setdefault(p["table"], []).append(p)
|
||||
|
||||
assert len(RLS_TABLES) > 0, "未找到任何启用 RLS 的表,请检查 app.sql"
|
||||
assert len(POLICIES) > 0, "未找到任何 CREATE POLICY 语句,请检查 app.sql"
|
||||
|
||||
|
||||
# ── 属性测试 ──────────────────────────────────────────────────────
|
||||
|
||||
@given(table=sampled_from(RLS_TABLES))
|
||||
@settings(max_examples=100)
|
||||
def test_rls_table_has_site_isolation_policy(table: str):
|
||||
"""
|
||||
Property 11(子属性 A):每个启用 RLS 的表都有对应的隔离策略。
|
||||
|
||||
对于任意启用了 RLS 的表,应存在至少一条 CREATE POLICY 语句。
|
||||
|
||||
**Validates: Requirements 13.2**
|
||||
"""
|
||||
assert table in POLICY_MAP, (
|
||||
f"表 {table} 启用了 RLS 但没有对应的 CREATE POLICY 语句。"
|
||||
f"Requirements 13.2 要求所有启用 RLS 的表都有隔离策略。"
|
||||
)
|
||||
|
||||
|
||||
@given(table=sampled_from(RLS_TABLES))
|
||||
@settings(max_examples=100)
|
||||
def test_rls_policy_uses_current_site_id_setting(table: str):
|
||||
"""
|
||||
Property 11(子属性 B):RLS 策略使用 app.current_site_id 会话变量过滤。
|
||||
|
||||
对于任意启用了 RLS 的表,其策略的 USING 子句应包含
|
||||
current_setting('app.current_site_id') 模式,确保按门店 ID 隔离。
|
||||
|
||||
**Validates: Requirements 13.2**
|
||||
"""
|
||||
assume(table in POLICY_MAP)
|
||||
policies = POLICY_MAP[table]
|
||||
|
||||
has_site_filter = any(
|
||||
_SITE_ID_FILTER_RE.search(p["using_clause"])
|
||||
for p in policies
|
||||
)
|
||||
assert has_site_filter, (
|
||||
f"表 {table} 的 RLS 策略未使用 current_setting('app.current_site_id') 过滤。"
|
||||
f"策略 USING 子句: {[p['using_clause'] for p in policies]}。"
|
||||
f"Requirements 13.2 要求根据会话变量 app.current_site_id 自动过滤。"
|
||||
)
|
||||
|
||||
|
||||
@given(table=sampled_from(RLS_TABLES))
|
||||
@settings(max_examples=100)
|
||||
def test_rls_policy_filters_by_site_id_field(table: str):
|
||||
"""
|
||||
Property 11(子属性 C):RLS 策略的 USING 子句包含 site_id 相关字段。
|
||||
|
||||
对于任意启用了 RLS 的表,其策略的 USING 子句应引用 site_id
|
||||
或 register_site_id 等 site_id 相关字段。
|
||||
|
||||
**Validates: Requirements 13.2**
|
||||
"""
|
||||
assume(table in POLICY_MAP)
|
||||
policies = POLICY_MAP[table]
|
||||
|
||||
has_site_id_field = any(
|
||||
_SITE_ID_FIELD_RE.search(p["using_clause"])
|
||||
for p in policies
|
||||
)
|
||||
assert has_site_id_field, (
|
||||
f"表 {table} 的 RLS 策略 USING 子句中未引用 site_id 相关字段。"
|
||||
f"策略 USING 子句: {[p['using_clause'] for p in policies]}。"
|
||||
f"Requirements 13.2 要求按 site_id 隔离数据。"
|
||||
)
|
||||
|
||||
|
||||
@given(
|
||||
table=sampled_from(RLS_TABLES),
|
||||
site_id=integers(min_value=1, max_value=10**15),
|
||||
)
|
||||
@settings(max_examples=100)
|
||||
def test_rls_policy_using_clause_pattern_valid_for_any_site_id(
|
||||
table: str, site_id: int
|
||||
):
|
||||
"""
|
||||
Property 11(子属性 D):RLS 策略的 USING 子句模式对任意 site_id 值有效。
|
||||
|
||||
对于任意启用了 RLS 的表和任意正整数 site_id,策略的 USING 子句
|
||||
应为 `<field> = current_setting('app.current_site_id')::<type>` 的等值比较模式,
|
||||
确保设置任意 site_id 值时都能正确过滤。
|
||||
|
||||
**Validates: Requirements 13.2**
|
||||
"""
|
||||
assume(table in POLICY_MAP)
|
||||
policies = POLICY_MAP[table]
|
||||
|
||||
# 验证策略使用等值比较模式:field = current_setting(...)::type
|
||||
# \w* 允许匹配 site_id(无前缀)和 register_site_id(有前缀)
|
||||
equality_pattern = re.compile(
|
||||
r"\w*site_id\s*=\s*current_setting\s*\(\s*'app\.current_site_id'\s*\)\s*::\s*\w+",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
has_equality = any(
|
||||
equality_pattern.search(p["using_clause"])
|
||||
for p in policies
|
||||
)
|
||||
assert has_equality, (
|
||||
f"表 {table} 的 RLS 策略未使用等值比较模式 "
|
||||
f"(field = current_setting('app.current_site_id')::type)。"
|
||||
f"对于 site_id={site_id},无法保证正确过滤。"
|
||||
f"策略 USING 子句: {[p['using_clause'] for p in policies]}"
|
||||
)
|
||||
78
tests/test_property_schema_migration.py
Normal file
78
tests/test_property_schema_migration.py
Normal file
@@ -0,0 +1,78 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Schema 表定义迁移完整性属性测试
|
||||
|
||||
**Validates: Requirements 7.3, 7.6**
|
||||
|
||||
Property 6: 对于任意现有数据库 schema(billiards_ods、billiards_dws)中的表,
|
||||
新 schema(ods、dws)的 DDL 文件中应包含该表的 CREATE TABLE 定义。
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# ── 路径常量 ──────────────────────────────────────────────
|
||||
SCHEMAS_DIR = os.path.join(r"C:\NeoZQYY", "db", "etl_feiqiu", "schemas")
|
||||
|
||||
# 旧 schema 文件(billiards_ods / billiards_dws)
|
||||
OLD_ODS_FILE = os.path.join(SCHEMAS_DIR, "schema_ODS_doc.sql")
|
||||
OLD_DWS_FILE = os.path.join(SCHEMAS_DIR, "schema_dws.sql")
|
||||
|
||||
# 新 schema 文件(ods / dws)
|
||||
NEW_ODS_FILE = os.path.join(SCHEMAS_DIR, "ods.sql")
|
||||
NEW_DWS_FILE = os.path.join(SCHEMAS_DIR, "dws.sql")
|
||||
|
||||
# ── 解析工具 ──────────────────────────────────────────────
|
||||
# 匹配 CREATE TABLE [IF NOT EXISTS] [schema.]table_name
|
||||
_CREATE_TABLE_RE = re.compile(
|
||||
r"CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?"
|
||||
r"(?:[\w]+\.)?(\w+)",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def _extract_table_names(sql_path: str) -> set[str]:
|
||||
"""从 SQL 文件中提取所有 CREATE TABLE 的表名(去掉 schema 前缀)。"""
|
||||
with open(sql_path, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return {m.group(1).lower() for m in _CREATE_TABLE_RE.finditer(content)}
|
||||
|
||||
|
||||
# ── 预加载表名集合(模块级,只解析一次) ────────────────────
|
||||
OLD_ODS_TABLES = sorted(_extract_table_names(OLD_ODS_FILE))
|
||||
OLD_DWS_TABLES = sorted(_extract_table_names(OLD_DWS_FILE))
|
||||
|
||||
NEW_ODS_TABLES = _extract_table_names(NEW_ODS_FILE)
|
||||
NEW_DWS_TABLES = _extract_table_names(NEW_DWS_FILE)
|
||||
|
||||
# 合并旧表名列表,附带来源标记,方便 hypothesis 采样
|
||||
_OLD_ODS_TAGGED = [(t, "ods") for t in OLD_ODS_TABLES]
|
||||
_OLD_DWS_TAGGED = [(t, "dws") for t in OLD_DWS_TABLES]
|
||||
_ALL_OLD_TABLES = _OLD_ODS_TAGGED + _OLD_DWS_TAGGED
|
||||
|
||||
|
||||
# ── 属性测试 ──────────────────────────────────────────────
|
||||
@settings(max_examples=100)
|
||||
@given(table_info=sampled_from(_ALL_OLD_TABLES))
|
||||
def test_old_table_exists_in_new_schema(table_info: tuple[str, str]) -> None:
|
||||
"""
|
||||
Property 6: Schema 表定义迁移完整性
|
||||
|
||||
**Validates: Requirements 7.3, 7.6**
|
||||
|
||||
对于旧 schema 中的任意表,新 schema DDL 中应包含同名 CREATE TABLE 定义。
|
||||
"""
|
||||
table_name, source = table_info
|
||||
|
||||
if source == "ods":
|
||||
assert table_name in NEW_ODS_TABLES, (
|
||||
f"旧 billiards_ods 表 '{table_name}' 在新 ods.sql 中未找到 CREATE TABLE 定义。"
|
||||
f"\n新 ods.sql 包含的表: {sorted(NEW_ODS_TABLES)}"
|
||||
)
|
||||
else:
|
||||
assert table_name in NEW_DWS_TABLES, (
|
||||
f"旧 billiards_dws 表 '{table_name}' 在新 dws.sql 中未找到 CREATE TABLE 定义。"
|
||||
f"\n新 dws.sql 包含的表: {sorted(NEW_DWS_TABLES)}"
|
||||
)
|
||||
147
tests/test_property_site_id_existence.py
Normal file
147
tests/test_property_site_id_existence.py
Normal file
@@ -0,0 +1,147 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
业务表 site_id 字段存在性属性测试
|
||||
|
||||
**Validates: Requirements 13.1**
|
||||
|
||||
Property 10: 对于任意 app schema 中的业务视图和 dws/core schema 中的业务表,
|
||||
其定义中应包含 site_id 字段。
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# ── 路径常量 ──────────────────────────────────────────────
|
||||
SCHEMAS_DIR = os.path.join(r"C:\NeoZQYY", "db", "etl_feiqiu", "schemas")
|
||||
ZQYY_APP_DIR = os.path.join(r"C:\NeoZQYY", "db", "zqyy_app", "schemas")
|
||||
|
||||
APP_SQL = os.path.join(SCHEMAS_DIR, "app.sql")
|
||||
DWS_SQL = os.path.join(SCHEMAS_DIR, "dws.sql")
|
||||
CORE_SQL = os.path.join(SCHEMAS_DIR, "core.sql")
|
||||
ZQYY_INIT_SQL = os.path.join(ZQYY_APP_DIR, "init.sql")
|
||||
|
||||
# ── 全局排除表 ────────────────────────────────────────────
|
||||
# permissions / role_permissions 是全局表,不需要 site_id
|
||||
# cfg_* 是 dws 层的配置表,属于全局/租户级配置
|
||||
# dim_goods_category 是商品分类维度,属于租户级全局参照表
|
||||
GLOBAL_TABLES = {
|
||||
"permissions",
|
||||
"role_permissions",
|
||||
"dim_goods_category",
|
||||
}
|
||||
|
||||
# dws 配置表前缀(全局配置,不按门店隔离)
|
||||
CFG_PREFIX = "cfg_"
|
||||
|
||||
|
||||
# ── 解析工具 ──────────────────────────────────────────────
|
||||
|
||||
# 匹配 CREATE TABLE [IF NOT EXISTS] [schema.]table_name(
|
||||
_CREATE_TABLE_RE = re.compile(
|
||||
r"CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?"
|
||||
r"(?:[\w]+\.)?(\w+)\s*\(",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# 匹配 CREATE [OR REPLACE] VIEW [schema.]view_name AS
|
||||
_CREATE_VIEW_RE = re.compile(
|
||||
r"CREATE\s+(?:OR\s+REPLACE\s+)?VIEW\s+(?:[\w]+\.)?(\w+)\s+AS",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def _extract_definitions(sql_path: str) -> dict[str, str]:
|
||||
"""
|
||||
从 SQL 文件中提取所有 CREATE TABLE / CREATE VIEW 定义。
|
||||
返回 {name: definition_text} 字典。
|
||||
"""
|
||||
with open(sql_path, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
markers: list[tuple[int, str]] = []
|
||||
|
||||
for m in _CREATE_TABLE_RE.finditer(content):
|
||||
markers.append((m.start(), m.group(1).lower()))
|
||||
|
||||
for m in _CREATE_VIEW_RE.finditer(content):
|
||||
markers.append((m.start(), m.group(1).lower()))
|
||||
|
||||
markers.sort(key=lambda x: x[0])
|
||||
|
||||
result: dict[str, str] = {}
|
||||
for i, (pos, name) in enumerate(markers):
|
||||
end = markers[i + 1][0] if i + 1 < len(markers) else len(content)
|
||||
result[name] = content[pos:end]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _has_site_id(definition: str) -> bool:
|
||||
"""检查定义文本中是否包含 site_id 字段。"""
|
||||
return bool(re.search(r"\bsite_id\b", definition, re.IGNORECASE))
|
||||
|
||||
|
||||
def _is_business_object(name: str) -> bool:
|
||||
"""判断是否为业务表/视图(排除全局表和配置表)。"""
|
||||
if name in GLOBAL_TABLES:
|
||||
return False
|
||||
if name.startswith(CFG_PREFIX):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# ── 预加载定义(模块级,只解析一次) ────────────────────────
|
||||
|
||||
_app_defs = _extract_definitions(APP_SQL)
|
||||
_dws_defs = _extract_definitions(DWS_SQL)
|
||||
_core_defs = _extract_definitions(CORE_SQL)
|
||||
_zqyy_defs = _extract_definitions(ZQYY_INIT_SQL)
|
||||
|
||||
# 构建业务对象列表:(name, source, definition)
|
||||
BUSINESS_OBJECTS: list[tuple[str, str, str]] = []
|
||||
|
||||
for name, defn in _app_defs.items():
|
||||
if _is_business_object(name):
|
||||
BUSINESS_OBJECTS.append((name, "app", defn))
|
||||
|
||||
for name, defn in _dws_defs.items():
|
||||
if _is_business_object(name):
|
||||
BUSINESS_OBJECTS.append((name, "dws", defn))
|
||||
|
||||
for name, defn in _core_defs.items():
|
||||
if _is_business_object(name):
|
||||
BUSINESS_OBJECTS.append((name, "core", defn))
|
||||
|
||||
for name, defn in _zqyy_defs.items():
|
||||
if _is_business_object(name):
|
||||
BUSINESS_OBJECTS.append((name, "zqyy_app", defn))
|
||||
|
||||
# 排除 dws 中的函数定义(不是表/视图)
|
||||
BUSINESS_OBJECTS = [
|
||||
(n, s, d) for n, s, d in BUSINESS_OBJECTS
|
||||
if not n.startswith("get_")
|
||||
]
|
||||
|
||||
assert len(BUSINESS_OBJECTS) > 0, "未找到任何业务表/视图定义,请检查 DDL 文件路径"
|
||||
|
||||
|
||||
# ── 属性测试 ──────────────────────────────────────────────
|
||||
|
||||
@given(obj=sampled_from(BUSINESS_OBJECTS))
|
||||
@settings(max_examples=100)
|
||||
def test_business_object_has_site_id(obj: tuple[str, str, str]):
|
||||
"""
|
||||
Property 10: 业务表 site_id 字段存在性
|
||||
|
||||
对于任意 app schema 中的业务视图和 dws/core/zqyy_app schema 中的业务表,
|
||||
其定义中应包含 site_id 字段。
|
||||
|
||||
**Validates: Requirements 13.1**
|
||||
"""
|
||||
name, source, definition = obj
|
||||
assert _has_site_id(definition), (
|
||||
f"{source}.{name} 缺少 site_id 字段。"
|
||||
f"Requirements 13.1 要求所有业务表包含 site_id 以支持多门店隔离。"
|
||||
)
|
||||
64
tests/test_property_steering_paths.py
Normal file
64
tests/test_property_steering_paths.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Steering 文件路径更新属性测试
|
||||
|
||||
**Validates: Requirements 10.2**
|
||||
|
||||
Property 9: 对于任意 .kiro/steering/ 目录下的文件,
|
||||
文件内容中不应包含旧仓库路径引用(如 FQ-ETL、C:\\ZQYY\\FQ-ETL)。
|
||||
|
||||
测试逻辑:
|
||||
1. 列出 .kiro/steering/ 下所有 .md 文件
|
||||
2. 使用 hypothesis sampled_from 随机选取
|
||||
3. 读取文件内容,验证不包含旧路径引用
|
||||
"""
|
||||
import os
|
||||
import glob
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# ── 路径常量 ──────────────────────────────────────────────
|
||||
MONOREPO_ROOT = r"C:\NeoZQYY"
|
||||
STEERING_DIR = os.path.join(MONOREPO_ROOT, ".kiro", "steering")
|
||||
|
||||
# 旧仓库路径模式(需要检测并确认已清除的字符串)
|
||||
OLD_PATH_PATTERNS = [
|
||||
"FQ-ETL",
|
||||
r"C:\ZQYY\FQ-ETL",
|
||||
r"C:\\ZQYY\\FQ-ETL",
|
||||
]
|
||||
|
||||
# ── 预加载 steering 文件列表(模块级,只扫描一次) ────────────
|
||||
STEERING_FILES: list[str] = sorted(
|
||||
glob.glob(os.path.join(STEERING_DIR, "*.md"))
|
||||
)
|
||||
|
||||
assert len(STEERING_FILES) > 0, (
|
||||
f"未在 {STEERING_DIR} 下找到任何 .md 文件,请检查目录是否存在"
|
||||
)
|
||||
|
||||
|
||||
# ── 属性测试 ──────────────────────────────────────────────
|
||||
|
||||
@given(filepath=sampled_from(STEERING_FILES))
|
||||
@settings(max_examples=100)
|
||||
def test_steering_files_no_old_repo_paths(filepath: str):
|
||||
"""
|
||||
Property 9: Steering 文件路径更新
|
||||
|
||||
对于任意 .kiro/steering/ 目录下的 .md 文件,
|
||||
文件内容中不应包含旧仓库路径引用。
|
||||
这确保迁移后所有 steering 文件已更新为 Monorepo 视角。
|
||||
|
||||
**Validates: Requirements 10.2**
|
||||
"""
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
filename = os.path.basename(filepath)
|
||||
for pattern in OLD_PATH_PATTERNS:
|
||||
assert pattern not in content, (
|
||||
f"[{filename}] 仍包含旧仓库路径引用: '{pattern}'\n"
|
||||
f"请更新该文件,移除所有旧路径(FQ-ETL / C:\\ZQYY\\FQ-ETL)"
|
||||
)
|
||||
104
tests/test_property_test_db_consistency.py
Normal file
104
tests/test_property_test_db_consistency.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
测试数据库结构一致性属性测试
|
||||
|
||||
**Validates: Requirements 9.1, 9.2**
|
||||
|
||||
Property 8: 对于任意生产数据库(etl_feiqiu、zqyy_app)中的 schema 和表定义,
|
||||
对应的测试数据库(test_etl_feiqiu、test_zqyy_app)中应存在相同的 schema 和表结构。
|
||||
|
||||
测试逻辑:测试数据库创建脚本通过 \\i 引用生产 DDL 文件,
|
||||
结构一致性可以通过验证脚本引用的完整性来保证——
|
||||
即每个 \\i 引用的 DDL 文件在磁盘上实际存在。
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from hypothesis import given, settings
|
||||
from hypothesis.strategies import sampled_from
|
||||
|
||||
# ── 路径常量 ──────────────────────────────────────────────
|
||||
MONOREPO_ROOT = r"C:\NeoZQYY"
|
||||
|
||||
DB_CONFIGS = {
|
||||
"etl_feiqiu": {
|
||||
"script": os.path.join(
|
||||
MONOREPO_ROOT, "db", "etl_feiqiu", "scripts", "create_test_db.sql"
|
||||
),
|
||||
"base_dir": os.path.join(
|
||||
MONOREPO_ROOT, "db", "etl_feiqiu", "scripts"
|
||||
),
|
||||
},
|
||||
"zqyy_app": {
|
||||
"script": os.path.join(
|
||||
MONOREPO_ROOT, "db", "zqyy_app", "scripts", "create_test_db.sql"
|
||||
),
|
||||
"base_dir": os.path.join(
|
||||
MONOREPO_ROOT, "db", "zqyy_app", "scripts"
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
# ── 解析 \\i 引用 ─────────────────────────────────────────
|
||||
# 匹配注释中的 \i 指令(psql 元命令),如:
|
||||
# \i ../schemas/meta.sql
|
||||
# \i ../seeds/*.sql ← 通配符引用,跳过
|
||||
_PSQL_INCLUDE_RE = re.compile(r"\\i\s+(\S+)")
|
||||
|
||||
|
||||
def _extract_ddl_refs(script_path: str, base_dir: str) -> list[tuple[str, str]]:
|
||||
"""
|
||||
从 create_test_db.sql 中提取所有 \\i 引用的 DDL 文件路径。
|
||||
返回 [(相对路径, 绝对路径), ...] 列表。
|
||||
跳过包含通配符的引用(如 ../seeds/*.sql)。
|
||||
"""
|
||||
with open(script_path, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
refs = []
|
||||
for m in _PSQL_INCLUDE_RE.finditer(content):
|
||||
rel_path = m.group(1)
|
||||
# 跳过通配符引用,无法逐文件验证
|
||||
if "*" in rel_path:
|
||||
continue
|
||||
abs_path = os.path.normpath(os.path.join(base_dir, rel_path))
|
||||
refs.append((rel_path, abs_path))
|
||||
return refs
|
||||
|
||||
|
||||
# ── 预加载所有引用(模块级,只解析一次) ─────────────────────
|
||||
|
||||
DDL_REFERENCES: list[tuple[str, str, str]] = []
|
||||
"""每个元素: (数据库名, 相对路径, 绝对路径)"""
|
||||
|
||||
for db_name, cfg in DB_CONFIGS.items():
|
||||
assert os.path.isfile(cfg["script"]), (
|
||||
f"测试数据库创建脚本不存在: {cfg['script']}"
|
||||
)
|
||||
for rel_path, abs_path in _extract_ddl_refs(cfg["script"], cfg["base_dir"]):
|
||||
DDL_REFERENCES.append((db_name, rel_path, abs_path))
|
||||
|
||||
assert len(DDL_REFERENCES) > 0, (
|
||||
"未从 create_test_db.sql 中提取到任何 \\i DDL 引用,请检查脚本内容"
|
||||
)
|
||||
|
||||
|
||||
# ── 属性测试 ──────────────────────────────────────────────
|
||||
|
||||
@given(ref=sampled_from(DDL_REFERENCES))
|
||||
@settings(max_examples=100)
|
||||
def test_test_db_ddl_references_exist(ref: tuple[str, str, str]):
|
||||
"""
|
||||
Property 8: 测试数据库结构一致性
|
||||
|
||||
对于任意生产数据库的测试数据库创建脚本中 \\i 引用的 DDL 文件,
|
||||
该文件在磁盘上应实际存在。这保证了测试数据库能完整复用生产 DDL,
|
||||
从而确保结构一致性。
|
||||
|
||||
**Validates: Requirements 9.1, 9.2**
|
||||
"""
|
||||
db_name, rel_path, abs_path = ref
|
||||
assert os.path.isfile(abs_path), (
|
||||
f"[{db_name}] create_test_db.sql 引用的 DDL 文件不存在: "
|
||||
f"{rel_path} → {abs_path}"
|
||||
)
|
||||
Reference in New Issue
Block a user