chore: 迁移项目路径 C:\NeoZQYY → C:\Project\NeoZQYY

开发环境从旧虚拟机 (DESKTOP-KGB0K5G) 迁移到新机器 (DESKTOP-D676QDA),
项目目录从 C:\NeoZQYY 变更为 C:\Project\NeoZQYY,
批量替换 126 个文件中的绝对路径引用。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Neo
2026-04-10 06:45:29 +08:00
parent f9b1039970
commit 66c9ae8738
126 changed files with 4154 additions and 4127 deletions

81
.env
View File

@@ -77,52 +77,64 @@ BUSINESS_DAY_START_HOUR=8
# ETL Connector飞球输出路径
# ------------------------------------------------------------------------------
# JSON 导出根目录ODS 抓取落盘,按 TASK_CODE/run_id 自动建子目录)
EXPORT_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
EXPORT_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
# ETL 运行日志根目录
LOG_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS
LOG_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS
# 在线抓取 JSON 输出根目录FETCH_ONLY 模式使用)
FETCH_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
FETCH_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
# ETL 质检/完整性报告输出目录
ETL_REPORT_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/REPORTS
ETL_REPORT_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/REPORTS
# ------------------------------------------------------------------------------
# 系统级输出路径
# ------------------------------------------------------------------------------
# 数据流结构分析报告输出目录gen_dataflow_report.py / analyze_dataflow.py
SYSTEM_ANALYZE_ROOT=C:/NeoZQYY/export/SYSTEM/REPORTS/dataflow_analysis
SYSTEM_ANALYZE_ROOT=C:/Project/NeoZQYY/export/SYSTEM/REPORTS/dataflow_analysis
# 字段排查报告输出目录field_audit.py
FIELD_AUDIT_ROOT=C:/NeoZQYY/export/SYSTEM/REPORTS/field_audit
FIELD_AUDIT_ROOT=C:/Project/NeoZQYY/export/SYSTEM/REPORTS/field_audit
# 全链路数据流文档输出目录gen_full_dataflow_doc.py
FULL_DATAFLOW_DOC_ROOT=C:/NeoZQYY/export/SYSTEM/REPORTS/full_dataflow_doc
FULL_DATAFLOW_DOC_ROOT=C:/Project/NeoZQYY/export/SYSTEM/REPORTS/full_dataflow_doc
# API 样本缓存目录gen_full_dataflow_doc.py 的 24h 缓存)
API_SAMPLE_CACHE_ROOT=C:/NeoZQYY/export/SYSTEM/CACHE/api_samples
API_SAMPLE_CACHE_ROOT=C:/Project/NeoZQYY/export/SYSTEM/CACHE/api_samples
# 系统级运维日志目录
SYSTEM_LOG_ROOT=C:/NeoZQYY/export/SYSTEM/LOGS
SYSTEM_LOG_ROOT=C:/Project/NeoZQYY/export/SYSTEM/LOGS
# ------------------------------------------------------------------------------
# 后端输出路径(预留)
# ------------------------------------------------------------------------------
# 后端结构化日志目录
BACKEND_LOG_ROOT=C:/NeoZQYY/export/BACKEND/LOGS
BACKEND_LOG_ROOT=C:/Project/NeoZQYY/export/BACKEND/LOGS
# 用户头像存储目录
AVATAR_EXPORT_PATH=C:/Project/NeoZQYY/export/BACKEND/avatars
# ------------------------------------------------------------------------------
# 阿里云百炼 AI 配置
# DashScope AI 配置(百炼 Application API
# CHANGE 2026-02-23 | 从 PRD 文档迁移至 .env禁止在文档中明文存放
# CHANGE P14 | BAILIAN_* → DASHSCOPE_*;移除 BASE_URL/MODELApplication API 不需要)
# ------------------------------------------------------------------------------
BAILIAN_API_KEY=sk-6def29cab3474cc797e52b82a46a5dba
BAILIAN_MODEL=qwen-plus
BAILIAN_BASE_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
BAILIAN_TEST_APP_ID=541edb3d5fcd4c18b13cbad81bb5fb9d
DASHSCOPE_API_KEY=sk-6def29cab3474cc797e52b82a46a5dba
DASHSCOPE_WORKSPACE_ID=
# CHANGE 2026-03-05 | 8 个百炼 AI 应用 ID从百炼平台获取2026-03-05 更新
BAILIAN_APP_ID_1_CHAT=979dabe6f22a43989632b8c662cac97c
BAILIAN_APP_ID_2_FINANCE=1dcdb5f39c3040b6af8ef79215b9b051
BAILIAN_APP_ID_3_CLUE=708bf45439cd48c7ab9a514d03482890
BAILIAN_APP_ID_4_ANALYSIS=ea7b1c374f574b9a925a2fb5789a9b90
BAILIAN_APP_ID_5_TACTICS=46f54e6053df4bb0b83be29366025cf6
BAILIAN_APP_ID_6_NOTE=025bb344146b4e4e8be30c444adab3b4
BAILIAN_APP_ID_7_CUSTOMER=df35e06991b24d49971c03c6428a9c87
BAILIAN_APP_ID_8_CONSOLIDATE=407dfb89283b4196934eec5fefe3ebc2
# 8 个百炼 AI 应用 ID从百炼平台获取通过 app_id 指定应用
# 应用 1通用对话 | 应用 2财务洞察 | 应用 3客户数据维客线索分析
# 应用 4关系分析/任务建议 | 应用 5话术参考 | 应用 6备注分析
# 应用 7客户分析 | 应用 8维客线索整理
DASHSCOPE_APP_ID_1_CHAT=979dabe6f22a43989632b8c662cac97c
DASHSCOPE_APP_ID_2_FINANCE=1dcdb5f39c3040b6af8ef79215b9b051
DASHSCOPE_APP_ID_3_CLUE=708bf45439cd48c7ab9a514d03482890
DASHSCOPE_APP_ID_4_ANALYSIS=ea7b1c374f574b9a925a2fb5789a9b90
DASHSCOPE_APP_ID_5_TACTICS=46f54e6053df4bb0b83be29366025cf6
DASHSCOPE_APP_ID_6_NOTE=025bb344146b4e4e8be30c444adab3b4
DASHSCOPE_APP_ID_7_CUSTOMER=df35e06991b24d49971c03c6428a9c87
DASHSCOPE_APP_ID_8_CONSOLIDATE=407dfb89283b4196934eec5fefe3ebc2
# 应用 9Session 日志摘要生成Kiro agent_on_stop + batch_generate_summaries 使用)
DASHSCOPE_APP_ID_SUMMARY=e0cf8913b1ee4a4eb9464cc1ee0bf300
# 内部 API 认证 tokenETL 等内部服务调用 /api/internal/* 端点时使用)
INTERNAL_API_TOKEN=C4Rs45fEoMC3u2PR4-jvakl8SBYpU9kV7JFiTj-TJAc
# 后端 API 地址ETL 触发 AI 事件时使用,如 http://localhost:8000
BACKEND_API_URL=http://localhost:8000
# ------------------------------------------------------------------------------
# 微信小程序
@@ -144,6 +156,21 @@ PIPELINE_RATE_MAX=2.0
# 后端运维面板路径配置
# CHANGE 2026-03-06 | 显式锁定,避免 __file__ 推算在不同部署环境指向错误路径
# ------------------------------------------------------------------------------
OPS_SERVER_BASE=C:/NeoZQYY
ETL_PROJECT_PATH=C:/NeoZQYY/apps/etl/connectors/feiqiu
ETL_PYTHON_EXECUTABLE=C:/NeoZQYY/.venv/Scripts/python.exe
OPS_SERVER_BASE=C:/Project/NeoZQYY
ETL_PROJECT_PATH=C:/Project/NeoZQYY/apps/etl/connectors/feiqiu
ETL_PYTHON_EXECUTABLE=C:/Project/NeoZQYY/.venv/Scripts/python.exe
# === Dev Trace Log ===
# 全链路请求追踪日志(仅开发/测试环境使用,生产环境关闭)
DEV_TRACE_ENABLED=true
DEV_TRACE_LOG_DIR=export/dev-trace-logs
DEV_TRACE_LOG_RETENTION_DAYS=7
DEV_TRACE_LOG_SQL=true
DEV_TRACE_LOG_PARAMS=true
# ------------------------------------------------------------------------------
# DWS 工资计算配置
# CHANGE 2026-03-27 | 允许非月初结算期运行工资计算任务(临时开关)
# 正常调度只在月初 1-5 号跑上月工资,此开关允许月中手动跑当月工资
# ------------------------------------------------------------------------------
DWS_SALARY_ALLOW_OUT_OF_CYCLE=true

View File

@@ -96,7 +96,7 @@
- [x] 6. Final checkpoint — 全量验证
- 运行验证脚本 `python scripts/ops/validate_p1_db_foundation.py`,确认所有检查项通过
- 运行属性测试 `cd C:\NeoZQYY && pytest tests/ -v -k p1`,确认所有属性测试通过
- 运行属性测试 `cd C:\Project\NeoZQYY && pytest tests/ -v -k p1`,确认所有属性测试通过
- 如有问题请告知用户
## 说明

View File

@@ -556,7 +556,7 @@ def test_penalty_minutes_formula(actual_minutes, per_hour_contribution):
### 测试配置
- 属性测试:`cd C:\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 单元测试:`cd apps/etl/connectors/feiqiu && pytest tests/unit/test_assistant_order_contribution.py -v`
- 每个属性测试标注 `@settings(max_examples=200)`
- 每个属性测试注释引用设计文档 Property 编号

View File

@@ -65,7 +65,7 @@
- _Requirements: 2.7, 2.8_
- [x] 3. 检查点 — 确保助教订单流水统计测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 确保所有属性测试通过,如有问题请询问用户。
- [x] 4. 扩展会员消费汇总任务
@@ -106,7 +106,7 @@
- **Validates: Requirements 6.1**
- [x] 6. 检查点 — 确保惩罚计算和消费汇总测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 运行单元测试:`cd apps/etl/connectors/feiqiu && pytest tests/unit/ -k "contribution or penalty or consumption" -v`
- 确保所有测试通过,如有问题请询问用户。
@@ -139,7 +139,7 @@
- _Requirements: 1.1_
- [x] 9. 最终检查点 — 确保所有测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_dws_contribution_properties.py -v`
- 运行单元测试:`cd apps/etl/connectors/feiqiu && pytest tests/unit/ -k "contribution or penalty or consumption" -v`
- 确保所有测试通过,如有问题请询问用户。

View File

@@ -1107,7 +1107,7 @@ ON CONFLICT (job_name) DO NOTHING;
### 测试配置
- 属性测试:`cd C:\NeoZQYY && pytest tests/test_core_business_properties.py -v`
- 属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_core_business_properties.py -v`
- 后端单元测试:`cd apps/backend && pytest tests/ -v`
- 每个属性测试标注 `@settings(max_examples=200)`
- 每个属性测试注释引用设计文档 Property 编号

View File

@@ -116,7 +116,7 @@
- **Validates: Requirements 4.1, 4.2, 4.3, 4.4, 14.3**
- [x] 6. 检查点 - 确保任务生成器测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_core_business_properties.py -v -k "property_1 or property_2 or property_3 or property_4"`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_core_business_properties.py -v -k "property_1 or property_2 or property_3 or property_4"`
- 确保所有属性测试通过,如有问题请向用户确认。
- [x] 7. 实现任务管理服务
@@ -160,7 +160,7 @@
- **Validates: Requirements 6.2, 6.3, 14.6**
- [x] 10. 检查点 - 确保任务管理和召回检测测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_core_business_properties.py -v -k "property_5 or property_6 or property_10 or property_15"`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_core_business_properties.py -v -k "property_5 or property_6 or property_10 or property_15"`
- 确保所有属性测试通过,如有问题请向用户确认。
- [-] 11. 实现备注系统
@@ -215,7 +215,7 @@
- _Requirements: 10.1-10.6_
- [x] 13. 检查点 - 确保所有测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_core_business_properties.py -v`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_core_business_properties.py -v`
- 26/26 全部通过16.81s
- [x] 14. 最终检查点 - 全量验证

View File

@@ -161,7 +161,7 @@ if api_val is not None and ods_val is None:
联调脚本在 ETL 全流程执行完成后,运行全链路检查器:
```bash
cd C:\NeoZQYY
cd C:\Project\NeoZQYY
uv run python scripts/ops/etl_consistency_check.py
```

View File

@@ -80,7 +80,7 @@
- [x] 5. 黑盒数据一致性测试
- [x] 5.1 运行全链路检查器,执行 API→ODS→DWD→DWS 四层数据一致性检查
- 运行 `uv run python scripts/ops/etl_consistency_check.py`cwd 为项目根目录 `C:\NeoZQYY`
- 运行 `uv run python scripts/ops/etl_consistency_check.py`cwd 为项目根目录 `C:\Project\NeoZQYY`
- 脚本自动从 `LOG_ROOT` 找到最近一次 ETL 日志,解析本次执行的任务列表
- 脚本自动从 `FETCH_ROOT` 读取 API JSON 落盘文件
- 脚本连接数据库(`PG_DSN`),逐表逐字段比对:

View File

@@ -86,7 +86,7 @@
- _Requirements: 8.3_
- [x] 9. 最终 Checkpoint — 确保所有测试通过
- 运行 `cd apps/etl/connectors/feiqiu && pytest tests/unit``cd C:\NeoZQYY && pytest tests/ -v`
- 运行 `cd apps/etl/connectors/feiqiu && pytest tests/unit``cd C:\Project\NeoZQYY && pytest tests/ -v`
- 确认所有测试通过,无回归,如有问题请询问用户。
- _Requirements: 7.3, 9.1, 9.2, 9.3_

View File

@@ -419,4 +419,4 @@ def test_business_date_round_trip(dt, h):
- 属性测试库:`hypothesis`(已在项目 `pyproject.toml` 中声明)
- 每个属性测试对应设计文档中的一个 Property由单个 `@given` 装饰的测试函数实现
- 运行命令:`cd C:\NeoZQYY && pytest tests/test_property_business_day_cutoff.py -v`
- 运行命令:`cd C:\Project\NeoZQYY && pytest tests/test_property_business_day_cutoff.py -v`

View File

@@ -246,7 +246,7 @@
- _Requirements: 12.4, 12.5_
- [x] 15. Final Checkpoint — 全量验证
- 确保所有属性测试通过:`cd C:\NeoZQYY && pytest tests/test_property_business_day_cutoff.py -v`
- 确保所有属性测试通过:`cd C:\Project\NeoZQYY && pytest tests/test_property_business_day_cutoff.py -v`
- 确保 ETL 单元测试通过:`cd apps/etl/connectors/feiqiu && pytest tests/unit -v`
- 确认所有 12 项需求的验收标准均有对应任务覆盖
- 如有问题请向用户确认。

View File

@@ -432,9 +432,9 @@ flowchart TD
```bash
# 属性测试Monorepo 级)
cd C:\NeoZQYY && pytest tests/test_dwd_panorama_properties.py -v
cd C:\Project\NeoZQYY && pytest tests/test_dwd_panorama_properties.py -v
# 单元测试
cd C:\NeoZQYY && pytest tests/test_dwd_panorama_examples.py -v
cd C:\Project\NeoZQYY && pytest tests/test_dwd_panorama_examples.py -v
```

View File

@@ -262,8 +262,8 @@
- _Requirements: 1.6, 1.7, 2.2, 2.5, 2.7, 3.5, 3.7, 4.5, 7.2, 7.4_
- [x] 10. 最终检查点 - 全部完成确认
- 运行全部属性测试:`cd C:\NeoZQYY && pytest tests/test_dwd_panorama_properties.py -v`
- 运行全部示例测试:`cd C:\NeoZQYY && pytest tests/test_dwd_panorama_examples.py -v`
- 运行全部属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_dwd_panorama_properties.py -v`
- 运行全部示例测试:`cd C:\Project\NeoZQYY && pytest tests/test_dwd_panorama_examples.py -v`
- 确认 5 份文档内容完整、验证状态标注齐全
- Ensure all tests pass, ask the user if questions arise.

View File

@@ -39,7 +39,7 @@
- 文件:`tests/test_dwd_phase1_properties.py`
- [x] 3. 检查点 - 确保窗口统一和回补删除后测试通过
- 运行 `cd apps/etl/pipelines/feiqiu && pytest tests/unit``cd C:\NeoZQYY && pytest tests/ -v`,确保所有测试通过,如有问题请询问用户。
- 运行 `cd apps/etl/pipelines/feiqiu && pytest tests/unit``cd C:\Project\NeoZQYY && pytest tests/ -v`,确保所有测试通过,如有问题请询问用户。
- [x] 4. 清理死代码和未使用常量(需求 3
- [x] 4.1 删除 `_pick_order_column()` 方法和 `FACT_ORDER_CANDIDATES` 常量
@@ -85,7 +85,7 @@
- 文件:`tests/test_dwd_phase1_properties.py`
- [x] 7. 最终检查点 - 确保所有测试通过
- 运行 `cd apps/etl/pipelines/feiqiu && pytest tests/unit``cd C:\NeoZQYY && pytest tests/ -v`,确保所有测试通过,如有问题请询问用户。
- 运行 `cd apps/etl/pipelines/feiqiu && pytest tests/unit``cd C:\Project\NeoZQYY && pytest tests/ -v`,确保所有测试通过,如有问题请询问用户。
## 备注

View File

@@ -122,7 +122,7 @@
- [x] 7. 检查点 - 阶段 2+3 回归测试
- 运行 `cd apps/etl/pipelines/feiqiu && pytest tests/unit -v` 确保所有测试通过
- 运行 `cd C:\NeoZQYY && pytest tests/ -v` 确保 Monorepo 属性测试通过
- 运行 `cd C:\Project\NeoZQYY && pytest tests/ -v` 确保 Monorepo 属性测试通过
- 确保所有测试通过,如有问题请询问用户
- [x] 8. 关键词重命名 pipeline → flow
@@ -160,7 +160,7 @@
- [x] 9.3 运行全量测试确认路径重命名无回归
- `cd apps/etl/connectors/feiqiu && pytest tests/unit -v`
- `cd C:\NeoZQYY && pytest tests/ -v`
- `cd C:\Project\NeoZQYY && pytest tests/ -v`
- _Requirements: 10.5_
- [x] 10. 文档同步更新
@@ -177,7 +177,7 @@
- [x] 11. 最终检查点 - 全量回归测试
- 运行 `cd apps/etl/connectors/feiqiu && pytest tests/unit -v`
- 运行 `cd C:\NeoZQYY && pytest tests/ -v`
- 运行 `cd C:\Project\NeoZQYY && pytest tests/ -v`
- 确保所有测试通过,如有问题请询问用户
- _Requirements: 11.1, 11.2, 11.3, 11.4, 11.5_

View File

@@ -130,7 +130,7 @@
- [x] 9. 最终检查点
- 确保所有测试通过ask the user if questions arise.
- 运行 `cd apps/etl/pipelines/feiqiu && pytest tests/unit -v`
- 运行 `cd C:\NeoZQYY && pytest tests/ -v`monorepo 属性测试)
- 运行 `cd C:\Project\NeoZQYY && pytest tests/ -v`monorepo 属性测试)
## 备注

View File

@@ -280,7 +280,7 @@
- **Validates: Requirements 8.9, 8.10 — Design Property 14**
- [x] 17. Final Checkpoint — 全量验证
- Run all property tests: `cd C:\NeoZQYY && pytest tests/test_board_properties.py -v`
- Run all property tests: `cd C:\Project\NeoZQYY && pytest tests/test_board_properties.py -v`
- Ensure all 12 property tests pass. Ask the user if questions arise.
- [x] 18. 前端到数据库全链路测试

View File

@@ -693,12 +693,12 @@ RNS1.4 采用属性测试Property-Based Testing+ 单元测试Unit Testi
```bash
# 属性测试Hypothesis
cd C:\NeoZQYY && pytest tests/ -v -k "rns1_chat"
cd C:\Project\NeoZQYY && pytest tests/ -v -k "rns1_chat"
# 单元测试
cd apps/backend && pytest tests/unit/ -v -k "xcx_chat"
# FDW 验证脚本
cd C:\NeoZQYY && uv run python scripts/ops/verify_fdw_e2e.py
cd C:\Project\NeoZQYY && uv run python scripts/ops/verify_fdw_e2e.py
```

View File

@@ -1061,7 +1061,7 @@ RNS1.2 采用属性测试Property-Based Testing+ 单元测试Unit Testi
```bash
# 属性测试Hypothesis
cd C:\NeoZQYY && pytest tests/ -v -k "rns1_customer_coach"
cd C:\Project\NeoZQYY && pytest tests/ -v -k "rns1_customer_coach"
# 单元测试
cd apps/backend && pytest tests/unit/ -v -k "customer_detail or customer_records or coach_detail or coach_top or coach_history or coach_task_groups or auth_rns12 or fdw_queries_rns12 or degradation_rns12"

View File

@@ -537,7 +537,7 @@ RNS1.0 采用属性测试Property-Based Testing+ 单元测试Unit Testi
```bash
# 属性测试Hypothesis
cd C:\NeoZQYY && pytest tests/ -v -k "rns1"
cd C:\Project\NeoZQYY && pytest tests/ -v -k "rns1"
# 单元测试
cd apps/backend && pytest tests/unit/ -v -k "response_wrapper or camel_model or xcx_tasks_route"

View File

@@ -923,7 +923,7 @@ RNS1.1 采用属性测试Property-Based Testing+ 单元测试Unit Testi
```bash
# 属性测试Hypothesis
cd C:\NeoZQYY && pytest tests/ -v -k "rns1_task_performance"
cd C:\Project\NeoZQYY && pytest tests/ -v -k "rns1_task_performance"
# 单元测试
cd apps/backend && pytest tests/unit/ -v -k "xcx_tasks_v2 or task_detail or performance or pin_unpin or auth_rns11 or fdw_queries"

View File

@@ -388,7 +388,7 @@ def test_spi_raw_non_negative(level, speed, stability):
### 测试配置
- 属性测试:`cd C:\NeoZQYY && pytest tests/test_spi_properties.py -v`
- 属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_spi_properties.py -v`
- 单元测试:`cd apps/etl/connectors/feiqiu && pytest tests/unit/test_spi_task.py -v`
- 每个属性测试标注 `@settings(max_examples=200)`
- 每个属性测试注释引用设计文档 Property 编号

View File

@@ -56,7 +56,7 @@
- **Validates: Requirements 6.6, 10.5**
- [x] 3. 检查点 - 确保核心算法测试通过
- 运行 `cd C:\NeoZQYY && pytest tests/test_spi_properties.py -v`
- 运行 `cd C:\Project\NeoZQYY && pytest tests/test_spi_properties.py -v`
- 确保所有属性测试通过,如有问题请询问用户。
- [x] 4. 实现数据提取与执行流程
@@ -119,7 +119,7 @@
- _Requirements: 11.2, 11.3_
- [x] 9. 最终检查点 - 确保所有测试通过
- 运行属性测试:`cd C:\NeoZQYY && pytest tests/test_spi_properties.py -v`
- 运行属性测试:`cd C:\Project\NeoZQYY && pytest tests/test_spi_properties.py -v`
- 运行单元测试:`cd apps/etl/connectors/feiqiu && pytest tests/unit/test_spi_task.py -v`
- 确保所有测试通过,如有问题请询问用户。

View File

@@ -15,7 +15,7 @@ uv sync # 安装依赖
cd apps/etl/connectors/feiqiu && python -m cli.main --dry-run --tasks DWD_LOAD_FROM_ODS
cd apps/backend && uvicorn app.main:app --reload
cd apps/etl/connectors/feiqiu && pytest tests/unit # ETL 单元测试
cd C:\NeoZQYY && pytest tests/ -v # 属性测试
cd C:\Project\NeoZQYY && pytest tests/ -v # 属性测试
```
## 脚本规范

View File

@@ -45,4 +45,4 @@ LOG_LEVEL=INFO
# ------------------------------------------------------------------------------
# ETL 项目路径(子进程 cwd缺省按 monorepo 相对路径推算)
# ------------------------------------------------------------------------------
# ETL_PROJECT_PATH=C:/NeoZQYY/apps/etl/connectors/feiqiu
# ETL_PROJECT_PATH=C:/Project/NeoZQYY/apps/etl/connectors/feiqiu

View File

@@ -11,7 +11,7 @@ from pathlib import Path
from dotenv import load_dotenv
# CHANGE 2026-03-07 | 项目根目录定位:防止 junction/symlink 穿透到 D 盘
# 背景C:\NeoZQYY 是 junction → D:\NeoZQYY\...\repo
# 背景C:\Project\NeoZQYY 是 junction → D:\NeoZQYY\...\repo
# Path(__file__).resolve() 和 absolute() 都可能解析到 D 盘,
# 导致加载 D 盘的 .env路径全指向 D 盘ETL 命令因此携带错误路径。
# 策略:环境变量 > 已知固定路径 > __file__ 推算(最后手段)

View File

@@ -1,8 +1,8 @@
============================= test session starts =============================
platform win32 -- Python 3.13.9, pytest-9.0.2, pluggy-1.6.0 -- C:\NeoZQYY\.venv\Scripts\python.exe
platform win32 -- Python 3.13.9, pytest-9.0.2, pluggy-1.6.0 -- C:\Project\NeoZQYY\.venv\Scripts\python.exe
cachedir: .pytest_cache
hypothesis profile 'default'
rootdir: C:\NeoZQYY\apps\backend
rootdir: C:\Project\NeoZQYY\apps\backend
configfile: pyproject.toml
plugins: anyio-4.12.1, hypothesis-6.151.6, asyncio-1.3.0
asyncio: mode=Mode.STRICT, debug=False, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function

View File

@@ -1,8 +1,8 @@
============================= test session starts =============================
platform win32 -- Python 3.13.9, pytest-9.0.2, pluggy-1.6.0 -- C:\NeoZQYY\.venv\Scripts\python.exe
platform win32 -- Python 3.13.9, pytest-9.0.2, pluggy-1.6.0 -- C:\Project\NeoZQYY\.venv\Scripts\python.exe
cachedir: .pytest_cache
hypothesis profile 'default'
rootdir: C:\NeoZQYY\apps\backend
rootdir: C:\Project\NeoZQYY\apps\backend
configfile: pyproject.toml
plugins: anyio-4.12.1, hypothesis-6.151.6, asyncio-1.3.0
asyncio: mode=Mode.STRICT, debug=False, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function

View File

@@ -35,9 +35,9 @@ API_RETRY_MAX=3
# 路径配置
# CHANGE 2026-02-19 | 统一迁移到 export/ETL-Connectors/feiqiu/ 下
# ------------------------------------------------------------------------------
EXPORT_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
LOG_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS
FETCH_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
EXPORT_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
LOG_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS
FETCH_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
WRITE_PRETTY_JSON=true
# ------------------------------------------------------------------------------

View File

@@ -148,7 +148,7 @@ cd apps/etl/connectors/feiqiu && python -m cli.main --dry-run --tasks DWD_LOAD_F
# 测试
cd apps/etl/connectors/feiqiu && pytest tests/unit
cd C:\NeoZQYY && pytest tests/ -v
cd C:\Project\NeoZQYY && pytest tests/ -v
```
## 文件归属规则

View File

@@ -146,7 +146,7 @@ cd apps/admin-web && pnpm exec vitest run
cd apps/admin-web && pnpm exec tsc --noEmit
# 属性测试
cd C:\NeoZQYY && pytest tests/ -v
cd C:\Project\NeoZQYY && pytest tests/ -v
```
## 内联注释决策

View File

@@ -7,7 +7,7 @@
## 变更概述
将单一 ETL 仓库C:\ZQYY\FQ-ETL迁移为 Monorepo 单体仓库C:\NeoZQYY整合 ETL、后端、小程序、GUI 等子项目。一次性搬迁策略,不保留 Git 历史。
将单一 ETL 仓库C:\ZQYY\FQ-ETL迁移为 Monorepo 单体仓库C:\Project\NeoZQYY整合 ETL、后端、小程序、GUI 等子项目。一次性搬迁策略,不保留 Git 历史。
## 变更清单
@@ -76,7 +76,7 @@
## 后续建议
1. 在 C:\NeoZQYY\apps\etl\pipelines\feiqiu\ 下运行 `pytest tests/unit` 确认测试通过
1. 在 C:\Project\NeoZQYY\apps\etl\pipelines\feiqiu\ 下运行 `pytest tests/unit` 确认测试通过
2. 修复源仓库已有的 5 个失败测试
3. DB Schema DDL 在测试环境验证后再应用到生产
4. 首次 `git add . && git commit` 建立基线

View File

@@ -36,7 +36,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- `language-zh.md` — no changes needed
- `db-docs.md` — no changes needed (uses `**/*.sql` wildcards, still valid)
- `steering-readme-maintainer.md` — no changes needed
- **MCP**: Workspace-level `mcp.json` updated to override git server repo path from `C:\ZQYY\FQ-ETL` to `C:\NeoZQYY`
- **MCP**: Workspace-level `mcp.json` updated to override git server repo path from `C:\ZQYY\FQ-ETL` to `C:\Project\NeoZQYY`
- **Skills**: 3 skills kept as-is (`bd-manual-db-docs`, `change-annotation-audit`, `steering-readme-maintainer`)
- **Agents**: `audit-writer.md` already references Monorepo paths (was updated during migration)
- **NOT YET DONE**:
@@ -98,27 +98,27 @@ Exit Code: 0
Hook execution failed with exit code 1.
Error output:
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\NeoZQYY\.kiro\scripts\audit_flagger.ps1:19 <20>ַ<EFBFBD>: 5
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\Project\NeoZQYY\.kiro\scripts\audit_flagger.ps1:19 <20>ַ<EFBFBD>: 5
+ try {
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ͷ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ȱ<EFBFBD><EFBFBD><EFBFBD>ҡ<EFBFBD>}<7D><><EFBFBD><EFBFBD>
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\Project\NeoZQYY\.kiro\scripts\audit_flagger.ps1:72 <20>ַ<EFBFBD>: 40
+ @{ Pattern = "^db/"; Label = "db" }
+ ~
Try <20><><EFBFBD>ȱ<EFBFBD><C8B1><EFBFBD>Լ<EFBFBD><D4BC><EFBFBD> Catch <20><> Finally <20>
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\NeoZQYY\.kiro\scripts\audit_flagger.ps1:72 <20>ַ<EFBFBD>: 40
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\Project\NeoZQYY\.kiro\scripts\audit_flagger.ps1:73 <20>ַ<EFBFBD>: 3
+ )
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʽ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>а<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ı<EFBFBD>ǡ<EFBFBD>)<29><><EFBFBD><EFBFBD>
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\Project\NeoZQYY\.kiro\scripts\audit_flagger.ps1:94 <20>ַ<EFBFBD>: 5
+ }
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʽ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>а<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ı<EFBFBD>ǡ<EFBFBD>}<7D><><EFBFBD><EFBFBD>
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\NeoZQYY\.kiro\scripts\audit_flagger.ps1:73 <20>ַ<EFBFBD>: 3
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\Project\NeoZQYY\.kiro\scripts\audit_flagger.ps1:95 <20>ַ<EFBFBD>: 3
+ }
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʽ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>а<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ı<EFBFBD>ǡ<EFBFBD>}<7D><><EFBFBD><EFBFBD>
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD> C:\Project\NeoZQYY\.kiro\scripts\audit_flagger.ps1:124 <20>ַ<EFBFBD>: 1
+ } catch {
+ ~
<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʽ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>а<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ı<EFBFBD>ǡ<EFBFBD>}<7D><><EFBFBD><EFBFBD>

View File

@@ -124,7 +124,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- 按 `db-docs.md` 规则,需同步更新 `docs/database/` 文档
- MCP 工具在事务块中运行,无法直接 `CREATE DATABASE`,需通过 dblink 绕过
- 系统没有 `psql` 命令行工具,但有 `psycopg2` Python 包可用
- shell cwd 可能不在项目根目录,执行 Python 脚本需用绝对路径 `C:\NeoZQYY\...`
- shell cwd 可能不在项目根目录,执行 Python 脚本需用绝对路径 `C:\Project\NeoZQYY\...`
- 旧 DDL 文件(如 `schema_etl_admin.sql`)保留作为历史参考,不修改
- 审计日志和迁移历史文档中的旧 schema 名保留原样
- 测试代码中的 fixture SQL 使用旧 schema 名是测试 DDL 解析器能力,可以保留不改

View File

@@ -84,7 +84,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- `local-Python` 用户是超级用户
- MCP 工具在事务块中运行,无法直接 `CREATE DATABASE`,需通过 dblink 绕过
- 系统没有 `psql` 命令行工具,但有 `psycopg2` Python 包可用
- shell cwd 可能不在项目根目录,执行 Python 脚本需用绝对路径 `C:\NeoZQYY\...`
- shell cwd 可能不在项目根目录,执行 Python 脚本需用绝对路径 `C:\Project\NeoZQYY\...`
- Windows 控制台需要 `$env:PYTHONIOENCODING="utf-8"` 或 `sys.stdout.reconfigure(encoding="utf-8")` 处理中文输出
- 旧 DDL 文件(如 `schema_etl_admin.sql`)保留作为历史参考,不修改
- 审计日志和迁移历史文档中的旧 schema 名保留原样

View File

@@ -87,7 +87,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
## USER CORRECTIONS AND INSTRUCTIONS:
- 所有说明性文字使用简体中文
- 系统没有 `psql` 命令行工具,但有 `psycopg2` Python 包
- shell cwd 可能不在项目根目录(常在 `C:\NeoZQYY\apps\backend`),执行 Python 脚本需用绝对路径 `C:\NeoZQYY\...`
- shell cwd 可能不在项目根目录(常在 `C:\Project\NeoZQYY\apps\backend`),执行 Python 脚本需用绝对路径 `C:\Project\NeoZQYY\...`
- Windows 控制台需要 `$env:PYTHONIOENCODING="utf-8"` 处理中文输出
- MCP postgres 工具当前连接的是旧库 `LLZQ-test`,新库操作通过 dblink 或 psycopg2 脚本
- `local-Python` 用户是超级用户

View File

@@ -71,8 +71,8 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- 根目录的 docs/tests/scripts 放项目级文件
- admin-web 和 miniprogram 已进入正式开发阶段,需要纳入审计高风险路径
- steering 的 inclusion 类型只有三种always、manual、fileMatch没有 auto
- Shell 工作目录经常卡在 `apps\backend`,需要用绝对路径 `C:\NeoZQYY\...` 执行命令
- 项目根目录是 `C:\NeoZQYY`
- Shell 工作目录经常卡在 `apps\backend`,需要用绝对路径 `C:\Project\NeoZQYY\...` 执行命令
- 项目根目录是 `C:\Project\NeoZQYY`
## Files to read
- `.kiro/steering/structure-lite.md`

View File

@@ -2,5 +2,5 @@
- summary: CMD提示30秒卡住。ps内容Windows PowerShell版权所有 (C) Microsoft Corporation。保留所有权利。加载个人及系统配置文件用了 6673 毫秒。=== 后端 FastAPI ===INFO: W…
- prompt:
```text
CMD提示30秒卡住。ps内容Windows PowerShell版权所有 (C) Microsoft Corporation。保留所有权利。加载个人及系统配置文件用了 6673 毫秒。=== 后端 FastAPI ===INFO: Will watch for changes in these directories: ['C:\\NeoZQYY\\apps\\backend']INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)INFO: Started reloader process [7504] using WatchFilesINFO: Started server process [8776]INFO: Waiting for application startup.INFO: Application startup complete.--------------------Windows PowerShell版权所有 (C) Microsoft Corporation。保留所有权利。加载个人及系统配置文件用了 5839 毫秒。=== 前端 Vite ===> admin-web@0.1.0 dev C:\NeoZQYY\apps\admin-web> viteVITE v6.3.5 ready in 916 ms鉃?[39m Local: http://localhost:5173/鉃?[39m Network: use --host to expose鉃?[39m press h + enter to show help----------------另外我换了新版本的Powershell 7
CMD提示30秒卡住。ps内容Windows PowerShell版权所有 (C) Microsoft Corporation。保留所有权利。加载个人及系统配置文件用了 6673 毫秒。=== 后端 FastAPI ===INFO: Will watch for changes in these directories: ['C:\\NeoZQYY\\apps\\backend']INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)INFO: Started reloader process [7504] using WatchFilesINFO: Started server process [8776]INFO: Waiting for application startup.INFO: Application startup complete.--------------------Windows PowerShell版权所有 (C) Microsoft Corporation。保留所有权利。加载个人及系统配置文件用了 5839 毫秒。=== 前端 Vite ===> admin-web@0.1.0 dev C:\Project\NeoZQYY\apps\admin-web> viteVITE v6.3.5 ready in 916 ms鉃?[39m Local: http://localhost:5173/鉃?[39m Network: use --host to expose鉃?[39m press h + enter to show help----------------另外我换了新版本的Powershell 7
```

View File

@@ -1,6 +1,6 @@
- [P20260215-233930] 2026-02-15 23:39:30 +0800
- summary: ========================================NeoZQYY 管理后台启动脚本========================================项目根目录: C:\NeoZQYY启动失败: T…
- summary: ========================================NeoZQYY 管理后台启动脚本========================================项目根目录: C:\Project\NeoZQYY启动失败: T…
- prompt:
```text
========================================NeoZQYY 管理后台启动脚本========================================项目根目录: C:\NeoZQYY启动失败: The process cannot access the file 'C:\Users\Administrator\AppData\Local\Temp\neozqyy_frontend.log' because it is being used by another process.at <ScriptBlock>, C:\NeoZQYY\scripts\ops\start-admin.ps1: line 29按任意键关闭此窗口...
========================================NeoZQYY 管理后台启动脚本========================================项目根目录: C:\Project\NeoZQYY启动失败: The process cannot access the file 'C:\Users\Administrator\AppData\Local\Temp\neozqyy_frontend.log' because it is being used by another process.at <ScriptBlock>, C:\Project\NeoZQYY\scripts\ops\start-admin.ps1: line 29按任意键关闭此窗口...
```

View File

@@ -2,5 +2,5 @@
- summary: 我打错字了我想说的是登录成功后正常内容闪了一下然后白屏了另外这个前端PowerShell 乱码其他2个PowerShell 窗口是正常的。现实PowerShell 7.5.4=== 前端 Vite ===> admin-web@0
- prompt:
```text
我打错字了我想说的是登录成功后正常内容闪了一下然后白屏了另外这个前端PowerShell 乱码其他2个PowerShell 窗口是正常的。现实PowerShell 7.5.4=== 前端 Vite ===> admin-web@0.1.0 dev C:\NeoZQYY\apps\admin-web> viteVITE v6.3.5 ready in 407 ms鉃?[39m Local: http://localhost:5173/ 鉃?[39m Network: use --host to expose 鉃?[39m press h + enter to show help
我打错字了我想说的是登录成功后正常内容闪了一下然后白屏了另外这个前端PowerShell 乱码其他2个PowerShell 窗口是正常的。现实PowerShell 7.5.4=== 前端 Vite ===> admin-web@0.1.0 dev C:\Project\NeoZQYY\apps\admin-web> viteVITE v6.3.5 ready in 407 ms鉃?[39m Local: http://localhost:5173/ 鉃?[39m Network: use --host to expose 鉃?[39m press h + enter to show help
```

View File

@@ -99,7 +99,7 @@ Exit Code: 0
Command executed successfully with no output.
Exit Code: 0
7. 我打错字了我想说的是登录成功后正常内容闪了一下然后白屏了另外这个前端PowerShell 乱码其他2个PowerShell 窗口是正常的。现实PowerShell 7.5.4=== 前端 Vite ===> admin-web@0.1.0 dev C:\NeoZQYY\apps\admin-web> viteVITE v6.3.5 ready in 407 ms鉃?[39m Local: http://localhost:5173/ 鉃?[39m Network: use --host to expose 鉃?[39m press h + enter to show help
7. 我打错字了我想说的是登录成功后正常内容闪了一下然后白屏了另外这个前端PowerShell 乱码其他2个PowerShell 窗口是正常的。现实PowerShell 7.5.4=== 前端 Vite ===> admin-web@0.1.0 dev C:\Project\NeoZQYY\apps\admin-web> viteVITE v6.3.5 ready in 407 ms鉃?[39m Local: http://localhost:5173/ 鉃?[39m Network: use --host to expose 鉃?[39m press h + enter to show help
8. Output:
Command executed successfully with no output.

View File

@@ -1,6 +1,6 @@
- [P20260216-200641] 2026-02-16 20:06:41 +0800
- summary: PS C:\NeoZQYY>python scripts/ops/analyze_dataflow.py --limit 200表不存在或无列: dwd.assistant_accounts_master表不存在或无列: dwd.settl…
- summary: PS C:\Project\NeoZQYY>python scripts/ops/analyze_dataflow.py --limit 200表不存在或无列: dwd.assistant_accounts_master表不存在或无列: dwd.settl…
- prompt:
```text
PS C:\NeoZQYY>python scripts/ops/analyze_dataflow.py --limit 200表不存在或无列: dwd.assistant_accounts_master表不存在或无列: dwd.settlement_records表不存在或无列: dwd.table_fee_transactions表不存在或无列: dwd.assistant_service_records表不存在或无列: dwd.assistant_cancellation_records表不存在或无列: dwd.store_goods_sales_records表不存在或无列: dwd.payment_transactions表不存在或无列: dwd.refund_transactions表不存在或无列: dwd.platform_coupon_redemption_records表不存在或无列: dwd.member_profiles表不存在或无列: dwd.member_stored_value_cards表不存在或无列: dwd.member_balance_changes表不存在或无列: dwd.recharge_settlements表不存在或无列: dwd.group_buy_packages表不存在或无列: dwd.group_buy_redemption_records表不存在或无列: dwd.goods_stock_summary表不存在或无列: dwd.goods_stock_movements表不存在或无列: dwd.site_tables_master表不存在或无列: dwd.stock_goods_category_tree表不存在或无列: dwd.store_goods_master表不存在或无列: dwd.table_fee_discount_records表不存在或无列: dwd.tenant_goods_master表不存在或无列: dwd.settlement_ticket_details============================================================数据流结构分析完成============================================================输出目录: C:\NeoZQYY\export\dataflow_analysis报告文件名: dataflow_2026-02-16_200507.md分析表数: 23 (23 成功, 0 失败)总记录数: 3405落盘路径:json_trees: C:\NeoZQYY\export\dataflow_analysis\json_treesdb_schemas: C:\NeoZQYY\export\dataflow_analysis\db_schemasmanifest: C:\NeoZQYY\export\dataflow_analysis============================================================PS C:\NeoZQYY>
PS C:\Project\NeoZQYY>python scripts/ops/analyze_dataflow.py --limit 200表不存在或无列: dwd.assistant_accounts_master表不存在或无列: dwd.settlement_records表不存在或无列: dwd.table_fee_transactions表不存在或无列: dwd.assistant_service_records表不存在或无列: dwd.assistant_cancellation_records表不存在或无列: dwd.store_goods_sales_records表不存在或无列: dwd.payment_transactions表不存在或无列: dwd.refund_transactions表不存在或无列: dwd.platform_coupon_redemption_records表不存在或无列: dwd.member_profiles表不存在或无列: dwd.member_stored_value_cards表不存在或无列: dwd.member_balance_changes表不存在或无列: dwd.recharge_settlements表不存在或无列: dwd.group_buy_packages表不存在或无列: dwd.group_buy_redemption_records表不存在或无列: dwd.goods_stock_summary表不存在或无列: dwd.goods_stock_movements表不存在或无列: dwd.site_tables_master表不存在或无列: dwd.stock_goods_category_tree表不存在或无列: dwd.store_goods_master表不存在或无列: dwd.table_fee_discount_records表不存在或无列: dwd.tenant_goods_master表不存在或无列: dwd.settlement_ticket_details============================================================数据流结构分析完成============================================================输出目录: C:\Project\NeoZQYY\export\dataflow_analysis报告文件名: dataflow_2026-02-16_200507.md分析表数: 23 (23 成功, 0 失败)总记录数: 3405落盘路径:json_trees: C:\Project\NeoZQYY\export\dataflow_analysis\json_treesdb_schemas: C:\Project\NeoZQYY\export\dataflow_analysis\db_schemasmanifest: C:\Project\NeoZQYY\export\dataflow_analysis============================================================PS C:\Project\NeoZQYY>
```

View File

@@ -16,7 +16,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
### 第一阶段:数据采集 — DONE
- Ran `python scripts/ops/analyze_dataflow.py --limit 200` successfully
- 23 tables collected, all succeeded, 3405 total records
- Output to `C:\NeoZQYY\export\dataflow_analysis\` with subdirs: `json_trees/`, `db_schemas/`, `collection_manifest.json`
- Output to `C:\Project\NeoZQYY\export\dataflow_analysis\` with subdirs: `json_trees/`, `db_schemas/`, `collection_manifest.json`
- DWD tables all returned 0 columns (DWD table names don't match ODS table names — DWD uses dimension/fact table names like `dim_member`, `dim_assistant`, not the ODS raw table names). This is expected behavior.
### 第二阶段:语义分析 — IN PROGRESS (data reading complete, analysis not started)
@@ -33,7 +33,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- JSON→ODS mapping (matched, payload-only, ignored fields)
- ODS→DWD mapping (direct, ETL-derived, SCD2 version control columns)
- Field coverage stats, type distribution, upstream/downstream mapping coverage
- Save to `SYSTEM_ANALYZE_ROOT` (`C:\NeoZQYY\export\dataflow_analysis\`) as `dataflow_YYYY-MM-DD_HHMMSS.md`
- Save to `SYSTEM_ANALYZE_ROOT` (`C:\Project\NeoZQYY\export\dataflow_analysis\`) as `dataflow_YYYY-MM-DD_HHMMSS.md`
### Key Data Summary from collection_manifest.json:
| Table | Records | ODS Cols | DWD Cols |
@@ -69,7 +69,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- DDL COMMENTs follow pattern: `【说明】...【示例】...【JSON字段】...`
**NEXT STEPS**:
1. Read remaining ODS schema files (18 more) from `C:\NeoZQYY\export\dataflow_analysis\db_schemas\ods_*.json`
1. Read remaining ODS schema files (18 more) from `C:\Project\NeoZQYY\export\dataflow_analysis\db_schemas\ods_*.json`
2. Read ETL source code for data flow understanding:
- `apps/etl/pipelines/feiqiu/loaders/ods/generic.py` (ODS loader)
- `apps/etl/pipelines/feiqiu/loaders/base_loader.py`
@@ -80,15 +80,15 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Field purpose inference using DDL COMMENT + JSON samples + ETL code
- ODS→DWD mapping (requires reading DWD DDL files from `db/etl_feiqiu/schemas/` since runtime query returned empty)
4. Generate per-table statistics: field coverage rate, type distribution, mapping coverage
5. Assemble final Markdown report and save to `C:\NeoZQYY\export\dataflow_analysis\dataflow_2026-02-16_HHMMSS.md`
5. Assemble final Markdown report and save to `C:\Project\NeoZQYY\export\dataflow_analysis\dataflow_2026-02-16_HHMMSS.md`
**FILEPATHS**:
- `scripts/ops/analyze_dataflow.py` — CLI entry point
- `scripts/ops/dataflow_analyzer.py` — core collection module with ODS_SPECS
- `C:\NeoZQYY\export\dataflow_analysis\collection_manifest.json` — collection results
- `C:\NeoZQYY\export\dataflow_analysis\json_trees\*.json` — 23 JSON tree files (all read)
- `C:\NeoZQYY\export\dataflow_analysis\db_schemas\ods_*.json` — 23 ODS schema files (5 read)
- `C:\NeoZQYY\export\dataflow_analysis\db_schemas\dwd_*.json` — 23 DWD schema files (all empty/0 cols)
- `C:\Project\NeoZQYY\export\dataflow_analysis\collection_manifest.json` — collection results
- `C:\Project\NeoZQYY\export\dataflow_analysis\json_trees\*.json` — 23 JSON tree files (all read)
- `C:\Project\NeoZQYY\export\dataflow_analysis\db_schemas\ods_*.json` — 23 ODS schema files (5 read)
- `C:\Project\NeoZQYY\export\dataflow_analysis\db_schemas\dwd_*.json` — 23 DWD schema files (all empty/0 cols)
- `apps/etl/pipelines/feiqiu/loaders/` — ETL loader code
- `apps/etl/pipelines/feiqiu/docs/architecture/data_flow.md` — architecture doc (read)
- `.kiro/specs/dataflow-structure-audit/tasks.md` — spec tasks (all completed)
@@ -96,14 +96,14 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
**USER CORRECTIONS AND INSTRUCTIONS**:
- 当前仅分析飞球feiqiu连接器
- 报告使用中文
- Output to `SYSTEM_ANALYZE_ROOT` = `C:\NeoZQYY\export\dataflow_analysis`
- Output to `SYSTEM_ANALYZE_ROOT` = `C:\Project\NeoZQYY\export\dataflow_analysis`
- Filename format: `dataflow_YYYY-MM-DD_HHMMSS.md`
- DWD layer uses different table names (dim_member, dim_assistant, fact_* etc.) not the ODS raw table names — need to look at DDL files or ETL code to find the actual DWD table mappings
- The workspace steering rules require: Chinese output, UTF-8, audit for high-risk changes, Python scripts for complex ops
- This is a Kiro Hook-triggered analysis workflow (hook at `.kiro/hooks/dataflow-analyze.kiro.hook`)
## Files to read
- `C:\NeoZQYY\export\dataflow_analysis\collection_manifest.json`
- `C:\Project\NeoZQYY\export\dataflow_analysis\collection_manifest.json`
- `apps/etl/pipelines/feiqiu/loaders/ods/generic.py`
- `apps/etl/pipelines/feiqiu/loaders/base_loader.py`
- `apps/etl/pipelines/feiqiu/docs/architecture/data_flow.md`

View File

@@ -58,7 +58,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- **NOT YET VERIFIED**: Tests have NOT been run after the duplicate deletion fix. The last test run showed 1 failure (`test_foreign_key`) due to the duplicate. Need to re-run tests to confirm the fix works.
**NEXT STEPS**:
1. Run full test suite: `Set-Location C:\NeoZQYY ; python -m pytest tests/test_dataflow_analyzer.py -v --tb=short` to verify the duplicate deletion fixed the `test_foreign_key` failure
1. Run full test suite: `Set-Location C:\Project\NeoZQYY ; python -m pytest tests/test_dataflow_analyzer.py -v --tb=short` to verify the duplicate deletion fixed the `test_foreign_key` failure
2. If tests pass, verify no diagnostics issues on `scripts/ops/gen_dataflow_report.py`
3. Confirm all 96 tests pass (74 original + 10 diff sub-table + 8 guess purpose + 4 purpose column)
@@ -75,7 +75,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Report generated by 2-phase process: Python script collects data → `gen_dataflow_report.py` assembles Markdown
- Anchors: `api-{table-name}`, `ods-{table-name}`, `dwd-{dwd-short-name}` (underscores → hyphens)
- Diff anchors: `diff-{table-name}` (underscores → hyphens)
- Shell working directory may drift — always use `Set-Location C:\NeoZQYY` before running root-level commands
- Shell working directory may drift — always use `Set-Location C:\Project\NeoZQYY` before running root-level commands
## Key Architecture Context:
- `dataflow_analyzer.py` — core collection module

View File

@@ -18,7 +18,7 @@ The user requested a two-phase dataflow structure analysis for the feiqiu (飞
**Phase 2 - Report Generation**: Ran `gen_dataflow_report.py` to generate a comprehensive Markdown report with enhanced content (field diffs, coverage stats, business descriptions, sample values, cross-layer anchor links).
**Key challenge**: The shell's cwd was stuck at `C:\NeoZQYY\apps\etl\pipelines\feiqiu` and couldn't be changed (cd is forbidden). Both scripts require running from project root `C:\NeoZQYY` because they use relative paths for `.env` files and `TABLE_MAP` source parsing.
**Key challenge**: The shell's cwd was stuck at `C:\Project\NeoZQYY\apps\etl\pipelines\feiqiu` and couldn't be changed (cd is forbidden). Both scripts require running from project root `C:\Project\NeoZQYY` because they use relative paths for `.env` files and `TABLE_MAP` source parsing.
**Solution**: Created wrapper scripts (`scripts/ops/_run_dataflow.py` and `scripts/ops/_run_report.py`) that use `os.chdir(root)` to set the correct working directory before importing and calling the main functions.
@@ -45,7 +45,7 @@ The user requested a two-phase dataflow structure analysis for the feiqiu (飞
**DETAILS**: The workspace has an existing spec at `.kiro/specs/ods-dedup-standardize/` with tasks mostly completed (tasks 1-7 done, task 8 in-progress at subtasks 8.4-8.5, task 9 queued). This spec covers ODS dedup and soft-delete standardization. The dataflow analysis task was independent of this spec work.
**USER CORRECTIONS AND INSTRUCTIONS**:
- Shell cwd is `C:\NeoZQYY\apps\etl\pipelines\feiqiu` — cannot use `cd` command, must use absolute paths or wrapper scripts
- Shell cwd is `C:\Project\NeoZQYY\apps\etl\pipelines\feiqiu` — cannot use `cd` command, must use absolute paths or wrapper scripts
- Scripts that need project root context should use `os.chdir()` in a wrapper
- Follow `tech.md` script execution rules: prefer Python scripts over complex PowerShell
- All output in simplified Chinese per `language-zh.md`

View File

@@ -74,7 +74,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- 改动命中高风险路径(`config/`、`tasks/`),完成后需提醒用户运行 `/audit`
- 脚本执行优先用 Python 脚本,避免 PowerShell 复杂逻辑
- 单元测试命令: `cd apps/etl/pipelines/feiqiu && pytest tests/unit`
- 语法检查命令: `python -c "import ast; ast.parse(open(r'C:\NeoZQYY\...', encoding='utf-8').read()); print('OK')"`
- 语法检查命令: `python -c "import ast; ast.parse(open(r'C:\Project\NeoZQYY\...', encoding='utf-8').read()); print('OK')"`
- `test_gen_audit_dashboard.py::TestRealAuditFiles::test_parses_all_real_files` 是已有失败,与本次改动无关
## Files to read

File diff suppressed because one or more lines are too long

View File

@@ -37,7 +37,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- 每层有彩色 Tag 标签(蓝色 ODS 抽取、绿色 DWD 装载、橙色 DWS 汇总、紫色 DWS 指数)
- `buildDomainGroups()` 函数按 `layer` 字段分组
- 域排序:助教→结算→台桌→会员→商品→团购→库存→财务→指数→通用→工具
- 文件通过 `mcp_filesystem_write_file` 写入 `C:\NeoZQYY\apps\admin-web\src\components\TaskSelector.tsx`
- 文件通过 `mcp_filesystem_write_file` 写入 `C:\Project\NeoZQYY\apps\admin-web\src\components\TaskSelector.tsx`
**B. 同步检查 UI** ✅ 已通过 Playwright 验证(显示绿色"已同步"按钮):
- 工具栏右侧显示同步状态按钮

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -94,7 +94,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- ETL 侧文件路径:`apps/etl/pipelines/feiqiu/`(不是 `apps/etl/connectors/feiqiu/`,后者是旧路径但 fileSearch 仍能找到)
## Files to read
- `apps/admin-web/src/components/TaskSelector.tsx`(用 Python 脚本读取:`pathlib.Path(r'C:\NeoZQYY\apps\admin-web\src\components\TaskSelector.tsx').read_text('utf-8')`
- `apps/admin-web/src/components/TaskSelector.tsx`(用 Python 脚本读取:`pathlib.Path(r'C:\Project\NeoZQYY\apps\admin-web\src\components\TaskSelector.tsx').read_text('utf-8')`
- `apps/admin-web/src/pages/TaskConfig.tsx`(同上方式读取)
- `apps/backend/app/services/task_registry.py`

View File

@@ -15,8 +15,8 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
The user requested running the full dataflow structure analysis pipeline for the feiqiu (飞球) connector. This involved two existing scripts that were already fully implemented.
**Phase 1 — Data Collection** (`analyze_dataflow.py`):
- Successfully ran `C:\ProgramData\miniconda3\python.exe C:\NeoZQYY\scripts\ops\analyze_dataflow.py`
- Had shell working directory issues (kept defaulting to `C:\NeoZQYY\apps\etl\connectors\feiqiu`), resolved by using absolute paths for both Python and script
- Successfully ran `C:\ProgramData\miniconda3\python.exe C:\Project\NeoZQYY\scripts\ops\analyze_dataflow.py`
- Had shell working directory issues (kept defaulting to `C:\Project\NeoZQYY\apps\etl\connectors\feiqiu`), resolved by using absolute paths for both Python and script
- Script performed 3 rounds of adaptive date expansion (30→60→90 days) for 11 tables with insufficient records
- Final date range: 2025-11-22 ~ 2026-02-20
- Results: 23 tables, all successful, 3395 total records
@@ -28,7 +28,7 @@ The user requested running the full dataflow structure analysis pipeline for the
- `collection_manifest.json` — with json_field_count, date_from, date_to
**Phase 2 — Report Generation** (`gen_dataflow_report.py`):
- Successfully ran `C:\ProgramData\miniconda3\python.exe C:\NeoZQYY\scripts\ops\gen_dataflow_report.py`
- Successfully ran `C:\ProgramData\miniconda3\python.exe C:\Project\NeoZQYY\scripts\ops\gen_dataflow_report.py`
- Output: `export/SYSTEM/REPORTS/dataflow_analysis/dataflow_2026-02-20_002258.md` (568.6 KB)
- Report confirmed to contain all required enhanced content:
- Report header with API date range and JSON data volume
@@ -61,7 +61,7 @@ The user requested running the full dataflow structure analysis pipeline for the
- Python 3.10+, uv workspace, PostgreSQL (4 databases: etl_feiqiu, test_etl_feiqiu, zqyy_app, test_zqyy_app)
- All output paths via `.env` environment variables → `export/` directory tree
- Scripts in `scripts/ops/` use `_env_paths.get_output_path()` for path resolution
- Shell quirk: PowerShell working directory often stuck at `C:\NeoZQYY\apps\etl\connectors\feiqiu`; use absolute paths for Python executable and script paths
- Shell quirk: PowerShell working directory often stuck at `C:\Project\NeoZQYY\apps\etl\connectors\feiqiu`; use absolute paths for Python executable and script paths
**Existing specs**: `.kiro/specs/dataflow-structure-audit/` has completed requirements.md, design.md, tasks.md (all tasks marked done)

View File

@@ -85,7 +85,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Tests: `pytest tests/ -v` from root for property tests; `cd apps/etl/connectors/feiqiu && pytest tests/unit` for unit tests
- This is "Run All Tasks" mode — orchestrator delegates to subagents, does not write code itself
- Tasks must be executed sequentially, one at a time
- When running property tests from root, may need `--override-ini="rootdir=C:\NeoZQYY"` due to ETL subdirectory pytest.ini
- When running property tests from root, may need `--override-ini="rootdir=C:\Project\NeoZQYY"` due to ETL subdirectory pytest.ini
## Files to read

View File

@@ -81,7 +81,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Tests: `pytest tests/ -v` from root for property tests; `cd apps/etl/connectors/feiqiu && pytest tests/unit` for unit tests
- This is "Run All Tasks" mode — orchestrator delegates to subagents, does not write code itself
- Tasks must be executed sequentially, one at a time
- When running property tests from root, may need `--override-ini="rootdir=C:\NeoZQYY"` due to ETL subdirectory pytest.ini
- When running property tests from root, may need `--override-ini="rootdir=C:\Project\NeoZQYY"` due to ETL subdirectory pytest.ini
## Files to read

View File

@@ -82,7 +82,7 @@ I had just read the `flow_runner.py` code signatures when context ran out:
- API sample cache for checker: `API_SAMPLE_CACHE_ROOT` environment variable
- Property-based tests use `hypothesis` library; run with `warning: "LongRunningPBT"`
- ETL unit tests: `cd apps/etl/connectors/feiqiu && pytest tests/unit`
- Property tests: `cd C:\NeoZQYY && pytest tests/ -v`
- Property tests: `cd C:\Project\NeoZQYY && pytest tests/ -v`
- There are 2-3 pre-existing test failures (`test_cli_args`, `test_cli_param_in_docs` related to `--force-full`) unrelated to this spec
- Python scripts preferred over PowerShell for complex operations
- Audit reminder is pending for: root-file, dir:admin-web, dir:backend, dir:db, db-schema-change

View File

@@ -99,7 +99,7 @@ The user is saying the actual front-end/back-end integration test was never perf
- API sample cache for checker: `API_SAMPLE_CACHE_ROOT` environment variable
- Property-based tests use `hypothesis` library; run with `warning: "LongRunningPBT"`
- ETL unit tests: `cd apps/etl/connectors/feiqiu && pytest tests/unit`
- Property tests: `cd C:\NeoZQYY && pytest tests/ -v`
- Property tests: `cd C:\Project\NeoZQYY && pytest tests/ -v`
- There are 6 pre-existing test failures unrelated to this spec
- Python scripts preferred over PowerShell for complex operations
- Audit reminder is pending for: root-file, dir:admin-web, dir:backend, dir:db, db-schema-change

View File

@@ -135,7 +135,7 @@ User also mentioned: "注意任务执行的常识,如果极短时间内退出
- Consistency report output: `ETL_REPORT_ROOT` env var
- API sample cache: `API_SAMPLE_CACHE_ROOT` env var
- ETL unit tests: `cd apps/etl/connectors/feiqiu && pytest tests/unit`
- Property tests: `cd C:\NeoZQYY && pytest tests/ -v`
- Property tests: `cd C:\Project\NeoZQYY && pytest tests/ -v`
- There are 6 pre-existing test failures unrelated to this spec
- Audit reminder is pending for: root-file, dir:admin-web, dir:backend, dir:db, db-schema-change
- Frontend dev server is running as processId 6 (`pnpm dev` in `apps/admin-web`)

View File

@@ -14,14 +14,14 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
**DETAILS**:
Two spurious `Asia/Shanghai/ODS_JSON_ARCHIVE/` directory trees were found:
1. `C:\NeoZQYY\Asia\Shanghai\ODS_JSON_ARCHIVE\` — 2 empty run dirs (created 2026-02-20 9:59)
2. `C:\NeoZQYY\apps\etl\connectors\feiqiu\Asia\Shanghai\ODS_JSON_ARCHIVE\` — 38 empty run dirs (created from 2026-02-19 23:29)
1. `C:\Project\NeoZQYY\Asia\Shanghai\ODS_JSON_ARCHIVE\` — 2 empty run dirs (created 2026-02-20 9:59)
2. `C:\Project\NeoZQYY\apps\etl\connectors\feiqiu\Asia\Shanghai\ODS_JSON_ARCHIVE\` — 38 empty run dirs (created from 2026-02-19 23:29)
Both contain only empty subdirectories (no actual JSON files). The directory structure `Asia/Shanghai/ODS_JSON_ARCHIVE/ODS_JSON_ARCHIVE-{timestamp}` matches what `_build_fetch_dir` would produce if `self.fetch_root` resolved to `"Asia/Shanghai"` (i.e., `str(ZoneInfo("Asia/Shanghai"))`).
**Root cause analysis performed (not yet concluded)**:
- `.env` files (both root and `feiqiu/.env`) currently have correct `FETCH_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON`
- `.env` files (both root and `feiqiu/.env`) currently have correct `FETCH_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON`
- `AppConfig.load()` currently returns correct `io.fetch_root` value — verified via Python one-liner
- `task_executor.py` line 63-68: `self.fetch_root` reads from `config.get("io.fetch_root") or config.get("pipeline.fetch_root") or config["io"]["export_root"]`
- `_build_fetch_dir` returns `Path(self.fetch_root) / task_code / f"{task_code}-{run_id}-{ts}"`
@@ -31,7 +31,7 @@ Both contain only empty subdirectories (no actual JSON files). The directory str
- Both `Asia/` dirs are NOT in git, NOT in `.gitignore`
- `feiqiu/.env` was last modified 2026-02-20 0:10, but `feiqiu/Asia` was created 2026-02-19 23:29 (BEFORE the .env edit)
- `feiqiu/.env` was created 2026-02-19 16:47
- The git-committed version of `feiqiu/.env` had `FETCH_ROOT=C:/NeoZQYY/export/ETL/JSON` (old path)
- The git-committed version of `feiqiu/.env` had `FETCH_ROOT=C:/Project/NeoZQYY/export/ETL/JSON` (old path)
- Backend subprocess passes `os.environ.copy()` to ETL CLI child process, with `cwd=ETL_PROJECT_PATH` (feiqiu dir)
- `env_parser._load_dotenv_values` reads from `Path(__file__).resolve().parents[1] / ".env"` (i.e., `feiqiu/.env`)

View File

@@ -47,7 +47,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Testing environment rules apply: must load `.env` properly, never skip config
- Output paths must come from `.env` environment variables (export-paths.md steering)
- Script execution convention: run Python scripts via `uv run python` or `python`
- The workspace root is `C:\NeoZQYY` on Windows with cmd shell
- The workspace root is `C:\Project\NeoZQYY` on Windows with cmd shell
- Four database connections available via MCP: `mcp_pg_etl` (production), `mcp_pg_etl_test` (test), `mcp_pg_app`, `mcp_pg_app_test`
- store_id: `2790685415443269`

View File

@@ -32,7 +32,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Four DB instances: `etl_feiqiu`, `test_etl_feiqiu`, `zqyy_app`, `test_zqyy_app`
- Environment variables control all output paths (see `export-paths.md` steering)
- Output paths come from `.env` - key vars: `SYSTEM_ANALYZE_ROOT`, `FULL_DATAFLOW_DOC_ROOT`
- Scripts must be run with `uv run python` or `python` from project root `C:\NeoZQYY`
- Scripts must be run with `uv run python` or `python` from project root `C:\Project\NeoZQYY`
- OS is Windows with cmd shell
- Whitelist rules (v4): ETL meta cols, SCD2 cols, siteProfile nested fields - still checked but folded in report
- Only analyzing feiqiu connector currently

View File

@@ -12,7 +12,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
**USER QUERIES**: 1 ("执行数据流结构分析,按以下步骤完成")
**DETAILS**:
- Phase 1 (数据采集): Ran `python scripts/ops/analyze_dataflow.py` from project root (`C:\NeoZQYY`). Successfully collected data for 22 ODS tables, 3388 total records, date range 2025-11-23 ~ 2026-02-21 (auto-expanded to 90 days).
- Phase 1 (数据采集): Ran `python scripts/ops/analyze_dataflow.py` from project root (`C:\Project\NeoZQYY`). Successfully collected data for 22 ODS tables, 3388 total records, date range 2025-11-23 ~ 2026-02-21 (auto-expanded to 90 days).
- Phase 2 (报告生成): Ran `python scripts/ops/gen_dataflow_report.py`. Generated 511.1 KB Markdown report with all required enhanced content (API date range, JSON field counts, field diff with whitelist folding, business descriptions, anchor links, sample values).
- Output: `export/SYSTEM/REPORTS/dataflow_analysis/dataflow_2026-02-21_124205.md`
- Manifest: `export/SYSTEM/REPORTS/dataflow_analysis/collection_manifest.json` with 42 DWD→ODS mappings
@@ -79,7 +79,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- Prefer fixing scripts directly over creating separate hooks for script-specific issues
- `scripts/ops/` scripts should work regardless of cwd — `_env_paths.py` uses `Path(__file__).resolve().parents[2]` for root `.env`
- Python scripts over PowerShell for complex operations (per `tech.md`)
- OS is Windows with cmd shell; project root is `C:\NeoZQYY`
- OS is Windows with cmd shell; project root is `C:\Project\NeoZQYY`
- Audit reminder fired but user confirmed no audit needed for Task 1 (only ran existing scripts, no source code changes). Task 3 modifies `scripts/ops/analyze_dataflow.py` which may need audit per governance rules.
**Files to read**:

View File

@@ -96,7 +96,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
## USER CORRECTIONS AND INSTRUCTIONS:
- 所有对话和文档使用简体中文
- 测试/验证必须使用测试库 `TEST_DB_DSN``test_etl_feiqiu`),不能用正式库
- 脚本执行 cwd 在项目根目录 `C:\NeoZQYY`
- 脚本执行 cwd 在项目根目录 `C:\Project\NeoZQYY`
- 复杂操作写 Python 脚本执行,避免 PowerShell 多行字符串转义问题
- 输出路径必须从 `.env` 环境变量读取,禁止硬编码
- DB schema 变更后必须同步 `docs/database/` 文档db-docs.md steering

View File

@@ -82,7 +82,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
## USER CORRECTIONS AND INSTRUCTIONS:
- 所有对话和文档使用简体中文
- 测试/验证必须使用测试库 `TEST_DB_DSN``test_etl_feiqiu`),不能用正式库
- 脚本执行 cwd 在项目根目录 `C:\NeoZQYY`
- 脚本执行 cwd 在项目根目录 `C:\Project\NeoZQYY`
- 复杂操作写 Python 脚本执行,避免 PowerShell 多行字符串转义问题
- 输出路径必须从 `.env` 环境变量读取,禁止硬编码
- DB schema 变更后必须同步 `docs/database/` 文档db-docs.md steering

View File

@@ -73,7 +73,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
## USER CORRECTIONS AND INSTRUCTIONS:
- 所有对话和文档使用简体中文
- 测试/验证必须使用测试库 `TEST_DB_DSN``test_etl_feiqiu`),不能用正式库
- 脚本执行 cwd 在项目根目录 `C:\NeoZQYY`
- 脚本执行 cwd 在项目根目录 `C:\Project\NeoZQYY`
- 复杂操作写 Python 脚本执行,避免 PowerShell 多行字符串转义问题
- 输出路径必须从 `.env` 环境变量读取,禁止硬编码
- DB schema 变更后必须同步 `docs/database/` 文档db-docs.md steering

View File

@@ -28,7 +28,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
- 门店: 已选 1 个门店site_id 2790685415443269
4. **任务已提交并正在执行** — 点击"直接执行"后,页面跳转到任务管理页面,历史标签显示任务状态为 `running`,开始时间 2026/2/21 15:29:20
5. **实时日志正常** — WebSocket 日志显示 ETL 正在按 4 段窗口执行 ODS 任务ODS_ASSISTANT_ACCOUNT 已完成fetched 276, updated 276ODS_ASSISTANT_LEDGER 正在执行中
6. **执行命令确认正确**`C:\NeoZQYY\.venv\Scripts\python.exe -m cli.main --flow api_full --processing-mode full_window --tasks ODS_ASSISTANT_ACCOUNT,...,DWD_LOAD_FROM_ODS --window-start 2025-11-01 --window-end 2026-02-20 --window-split day --window-split-days 30 --force-full --store-id 2790685415443269`
6. **执行命令确认正确**`C:\Project\NeoZQYY\.venv\Scripts\python.exe -m cli.main --flow api_full --processing-mode full_window --tasks ODS_ASSISTANT_ACCOUNT,...,DWD_LOAD_FROM_ODS --window-start 2025-11-01 --window-end 2026-02-20 --window-split day --window-split-days 30 --force-full --store-id 2790685415443269`
### 当前状态:
- ETL 任务正在后台执行中(预计总耗时约 85 分钟,参考上次执行 85m35s

View File

@@ -78,7 +78,7 @@ What was completed:
## USER CORRECTIONS AND INSTRUCTIONS:
- All output must go to `export/` directory structure via `.env` environment variables
- Scripts run from `scripts/ops/` — but cwd is currently stuck at `scripts/ops/` in the shell, so use absolute paths like `python C:\NeoZQYY\scripts\ops\script.py`
- Scripts run from `scripts/ops/` — but cwd is currently stuck at `scripts/ops/` in the shell, so use absolute paths like `python C:\Project\NeoZQYY\scripts\ops\script.py`
- Database testing uses `TEST_DB_DSN` (test_etl_feiqiu), never production
- All explanatory text in simplified Chinese
- Python scripts preferred over PowerShell for complex operations

File diff suppressed because one or more lines are too long

View File

@@ -83,7 +83,7 @@ CONTEXT TRANSFER: We are continuing a conversation that had gotten too long. Her
**USER CORRECTIONS AND INSTRUCTIONS**:
- All responses must be in simplified Chinese
- This is `docs/h5_ui/` prototype work — NOT high-risk path, no audit needed
- PowerShell cwd is stuck at `C:\NeoZQYY\apps\etl\connectors\feiqiu\` — use absolute paths or Python scripts for shell commands
- PowerShell cwd is stuck at `C:\Project\NeoZQYY\apps\etl\connectors\feiqiu\` — use absolute paths or Python scripts for shell commands
- Export path rules don't apply here (no file output to `export/`)
**Files to read** (for continuing work):

View File

@@ -34,16 +34,16 @@ export/
| 环境变量 | 默认值(开发机) | 对应目录 | 说明 |
|----------|------------------|----------|------|
| `EXPORT_ROOT` | `C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON` | `ETL-Connectors/feiqiu/JSON/` | ODS 抓取 JSON 落盘根目录 |
| `LOG_ROOT` | `C:/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS` | `ETL-Connectors/feiqiu/LOGS/` | ETL 运行日志 |
| `FETCH_ROOT` | `C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON` | `ETL-Connectors/feiqiu/JSON/` | FETCH_ONLY 模式 JSON 输出(通常与 EXPORT_ROOT 相同) |
| `ETL_REPORT_ROOT` | `C:/NeoZQYY/export/ETL-Connectors/feiqiu/REPORTS` | `ETL-Connectors/feiqiu/REPORTS/` | ETL 质检/完整性报告 |
| `SYSTEM_ANALYZE_ROOT` | `C:/NeoZQYY/export/SYSTEM/REPORTS/dataflow_analysis` | `SYSTEM/REPORTS/dataflow_analysis/` | 数据流结构分析报告 |
| `FIELD_AUDIT_ROOT` | `C:/NeoZQYY/export/SYSTEM/REPORTS/field_audit` | `SYSTEM/REPORTS/field_audit/` | 字段排查报告 |
| `FULL_DATAFLOW_DOC_ROOT` | `C:/NeoZQYY/export/SYSTEM/REPORTS/full_dataflow_doc` | `SYSTEM/REPORTS/full_dataflow_doc/` | 全链路数据流文档 |
| `API_SAMPLE_CACHE_ROOT` | `C:/NeoZQYY/export/SYSTEM/CACHE/api_samples` | `SYSTEM/CACHE/api_samples/` | API 样本缓存 |
| `SYSTEM_LOG_ROOT` | `C:/NeoZQYY/export/SYSTEM/LOGS` | `SYSTEM/LOGS/` | 系统级运维日志 |
| `BACKEND_LOG_ROOT` | `C:/NeoZQYY/export/BACKEND/LOGS` | `BACKEND/LOGS/` | 后端结构化日志 |
| `EXPORT_ROOT` | `C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON` | `ETL-Connectors/feiqiu/JSON/` | ODS 抓取 JSON 落盘根目录 |
| `LOG_ROOT` | `C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS` | `ETL-Connectors/feiqiu/LOGS/` | ETL 运行日志 |
| `FETCH_ROOT` | `C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON` | `ETL-Connectors/feiqiu/JSON/` | FETCH_ONLY 模式 JSON 输出(通常与 EXPORT_ROOT 相同) |
| `ETL_REPORT_ROOT` | `C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/REPORTS` | `ETL-Connectors/feiqiu/REPORTS/` | ETL 质检/完整性报告 |
| `SYSTEM_ANALYZE_ROOT` | `C:/Project/NeoZQYY/export/SYSTEM/REPORTS/dataflow_analysis` | `SYSTEM/REPORTS/dataflow_analysis/` | 数据流结构分析报告 |
| `FIELD_AUDIT_ROOT` | `C:/Project/NeoZQYY/export/SYSTEM/REPORTS/field_audit` | `SYSTEM/REPORTS/field_audit/` | 字段排查报告 |
| `FULL_DATAFLOW_DOC_ROOT` | `C:/Project/NeoZQYY/export/SYSTEM/REPORTS/full_dataflow_doc` | `SYSTEM/REPORTS/full_dataflow_doc/` | 全链路数据流文档 |
| `API_SAMPLE_CACHE_ROOT` | `C:/Project/NeoZQYY/export/SYSTEM/CACHE/api_samples` | `SYSTEM/CACHE/api_samples/` | API 样本缓存 |
| `SYSTEM_LOG_ROOT` | `C:/Project/NeoZQYY/export/SYSTEM/LOGS` | `SYSTEM/LOGS/` | 系统级运维日志 |
| `BACKEND_LOG_ROOT` | `C:/Project/NeoZQYY/export/BACKEND/LOGS` | `BACKEND/LOGS/` | 后端结构化日志 |
---
@@ -262,14 +262,14 @@ ETL 模块的路径变量通过 `env_parser.py` 的 `ENV_MAP` 映射到 `AppConf
## 服务器环境配置示例
开发机(`C:\NeoZQYY\.env`
开发机(`C:\Project\NeoZQYY\.env`
```env
EXPORT_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
LOG_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS
FETCH_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
ETL_REPORT_ROOT=C:/NeoZQYY/export/ETL-Connectors/feiqiu/REPORTS
SYSTEM_ANALYZE_ROOT=C:/NeoZQYY/export/SYSTEM/REPORTS/dataflow_analysis
BACKEND_LOG_ROOT=C:/NeoZQYY/export/BACKEND/LOGS
EXPORT_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
LOG_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/LOGS
FETCH_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/JSON
ETL_REPORT_ROOT=C:/Project/NeoZQYY/export/ETL-Connectors/feiqiu/REPORTS
SYSTEM_ANALYZE_ROOT=C:/Project/NeoZQYY/export/SYSTEM/REPORTS/dataflow_analysis
BACKEND_LOG_ROOT=C:/Project/NeoZQYY/export/BACKEND/LOGS
```
服务器测试环境(`D:\NeoZQYY\test\repo\.env`

View File

@@ -163,7 +163,7 @@ Git 排除方案说明(统一 .gitignore + skip-worktree
| 环境 | 位置 | Git 分支 | 数据库 | 用途 |
|------|------|----------|--------|------|
| 开发 | 本机 `C:\NeoZQYY` | `dev` | `test_etl_feiqiu` / `test_zqyy_app` | 日常开发 |
| 开发 | 本机 `C:\Project\NeoZQYY` | `dev` | `test_etl_feiqiu` / `test_zqyy_app` | 日常开发 |
| 测试 | 服务器 `D:\NeoZQYY\test\repo` | `test` | `test_etl_feiqiu` / `test_zqyy_app` | 集成测试 + 小程序体验版 |
| 正式 | 服务器 `D:\NeoZQYY\prod\repo` | `master` | `etl_feiqiu` / `zqyy_app` | 生产环境 + 小程序正式版 |

View File

@@ -10,7 +10,7 @@
|------|-----|
| 开发者工具路径 | `C:\dev\WechatDevtools\微信开发者工具.exe` |
| CLI 路径 | `C:\dev\WechatDevtools\cli.bat` |
| 小程序项目路径 | `C:\NeoZQYY\apps\miniprogram` |
| 小程序项目路径 | `C:\Project\NeoZQYY\apps\miniprogram` |
| 自动化端口 | `9420`(固定) |
| AppID | `wx7c07793d82732921` |
@@ -23,7 +23,7 @@
### 第 2 步:在终端启动自动化端口
```powershell
& "C:\dev\WechatDevtools\cli.bat" auto --project "C:\NeoZQYY\apps\miniprogram" --auto-port 9420
& "C:\dev\WechatDevtools\cli.bat" auto --project "C:\Project\NeoZQYY\apps\miniprogram" --auto-port 9420
```
成功输出:
@@ -40,7 +40,7 @@
```
strategy: wsEndpoint
wsEndpoint: ws://127.0.0.1:9420
projectPath: C:\NeoZQYY\apps\miniprogram
projectPath: C:\Project\NeoZQYY\apps\miniprogram
healthCheck: true
```

View File

@@ -15,7 +15,7 @@
### 1.2 迁移目标
将所有项目整合为 NeoZQYY Monorepo`C:\NeoZQYY\`),实现:
将所有项目整合为 NeoZQYY Monorepo`C:\Project\NeoZQYY\`),实现:
- 清晰的模块边界apps/packages/db/docs 分离)
- uv workspace 统一依赖管理
- 六层数据库 Schema 架构meta/ods/dwd/core/dws/app
@@ -27,7 +27,7 @@
## 2. Monorepo 最终结构
```
C:\NeoZQYY\
C:\Project\NeoZQYY\
├── apps/
│ ├── etl/pipelines/feiqiu/ # 飞球 Connector数据源连接器从 FQ-ETL 平移)
│ ├── backend/ # FastAPI 后端(新建骨架)
@@ -292,7 +292,7 @@ C:\NeoZQYY\
```bash
# 安装依赖
cd C:\NeoZQYY
cd C:\Project\NeoZQYY
uv sync
# ETL 开发
@@ -308,7 +308,7 @@ cd apps/etl/pipelines/feiqiu
pytest tests/unit
# 运行属性测试
cd C:\NeoZQYY
cd C:\Project\NeoZQYY
pytest tests/ -v
```

View File

@@ -199,7 +199,7 @@ audit-writer 子代理(独立执行)
| 服务器名 | 命令 | 用途 | 状态 | 自动批准 |
|---|---|---|---|---|
| `filesystem` | `npx @modelcontextprotocol/server-filesystem` | 文件系统读写(作用域:`C:\NeoZQYY` | 启用 | 全部 |
| `filesystem` | `npx @modelcontextprotocol/server-filesystem` | 文件系统读写(作用域:`C:\Project\NeoZQYY` | 启用 | 全部 |
| `git` | `uvx mcp-server-git@2025.12.18` | Git 操作status/diff/commit/log 等) | 启用 | `git_status` |
| `postgres` | `uvx postgres-mcp --access-mode=unrestricted` | PostgreSQL 数据库操作(查询/DDL/健康检查/索引分析) | 启用 | 全部 |
| `playwright` | `npx @playwright/mcp@latest` | 浏览器自动化(截图/快照/点击/表单填写) | 启用 | 无 |

View File

@@ -46,7 +46,7 @@
### 基本用法(创建店铺管理员)
```bash
cd C:\NeoZQYY
cd C:\Project\NeoZQYY
python scripts/ops/init_test_user.py
```

View File

@@ -2,7 +2,7 @@
"""批量替换运行时代码中残留的旧 schema 引用。"""
import os
ROOT = r"C:\NeoZQYY"
ROOT = r"C:\Project\NeoZQYY"
import glob

View File

@@ -4,7 +4,7 @@
分析手动获取的结账数据,检查时间分布
用法:
cd C:/NeoZQYY
cd C:/Project/NeoZQYY
python scripts/ops/_analyze_settlement_data.py
"""

View File

@@ -1,7 +1,7 @@
"""快速检查 ETL 日志尾部 — 一次性脚本"""
import os, sys
LOG = r"C:\NeoZQYY\export\ETL-Connectors\feiqiu\LOGS\2681a85399e64c76a040163f956e1907.log"
LOG = r"C:\Project\NeoZQYY\export\ETL-Connectors\feiqiu\LOGS\2681a85399e64c76a040163f956e1907.log"
f = open(LOG, "rb")
f.seek(0, 2)

View File

@@ -18,7 +18,7 @@ SYSTEM_LOG_ROOT = os.environ.get("SYSTEM_LOG_ROOT")
if not SYSTEM_LOG_ROOT:
raise RuntimeError("SYSTEM_LOG_ROOT 环境变量未设置")
LOG_FILE = Path(r"C:\NeoZQYY\export\ETL-Connectors\feiqiu\LOGS\2681a85399e64c76a040163f956e1907.log")
LOG_FILE = Path(r"C:\Project\NeoZQYY\export\ETL-Connectors\feiqiu\LOGS\2681a85399e64c76a040163f956e1907.log")
if not LOG_FILE.exists():
raise FileNotFoundError(f"日志文件不存在: {LOG_FILE}")

View File

@@ -7,6 +7,6 @@ sys.path.insert(0, str(pathlib.Path(__file__).resolve().parents[2] / "apps" / "b
from app.main import app
spec = app.openapi()
out = pathlib.Path(r"C:\NeoZQYY\docs\contracts\openapi\backend-api.json")
out = pathlib.Path(r"C:\Project\NeoZQYY\docs\contracts\openapi\backend-api.json")
out.write_text(json.dumps(spec, ensure_ascii=False, indent=2), encoding="utf-8")
print(f"Done: {len(spec['paths'])} paths, {len(spec['components']['schemas'])} schemas")

View File

@@ -7,7 +7,7 @@
需要手动调用 API 获取最新的结账数据,确认是否为 API 数据源问题。
用法:
cd C:/NeoZQYY
cd C:/Project/NeoZQYY
python scripts/ops/_fetch_settlement_data_manual.py
"""

View File

@@ -4,7 +4,7 @@
SPI 问题最终诊断报告 - 基于数据库实际查询
用法:
cd C:/NeoZQYY
cd C:/Project/NeoZQYY
python scripts/ops/_final_spi_diagnosis_report.py
"""

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw
@@ -33,11 +33,11 @@ for i, line in enumerate(lines):
break
if not found:
open('c:/NeoZQYY/scripts/ops/_fix_status.txt','w').write('target not found')
open('c:/Project/NeoZQYY/scripts/ops/_fix_status.txt','w').write('target not found')
else:
doc = '\n'.join(lines)
if crlf:
doc = doc.replace('\n', '\r\n')
with open(path, 'wb') as f:
f.write(doc.encode('utf-8'))
open('c:/NeoZQYY/scripts/ops/_fix_status.txt','w').write('done')
open('c:/Project/NeoZQYY/scripts/ops/_fix_status.txt','w').write('done')

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
raw = f.read()
content = raw.decode('utf-8')

View File

@@ -4,7 +4,7 @@
生成飞球 API 结账数据问题综合报告
用法:
cd C:/NeoZQYY
cd C:/Project/NeoZQYY
python scripts/ops/_generate_settlement_issue_report.py
"""

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
doc = raw.decode('utf-8').replace('\r\n', '\n')

View File

@@ -4,7 +4,7 @@
检查结账数据 JSON 文件的结构,了解实际字段名称
用法:
cd C:/NeoZQYY
cd C:/Project/NeoZQYY
python scripts/ops/_inspect_settlement_json.py
"""

View File

@@ -4,7 +4,7 @@
调查 ETL 在 2026-02-14 后停止处理的原因
用法:
cd C:/NeoZQYY
cd C:/Project/NeoZQYY
python scripts/ops/_investigate_etl_gap.py
"""

View File

@@ -1,7 +1,7 @@
p1 = 'c:/NeoZQYY/scripts/ops/txt/progress_p1.txt'
p2 = 'c:/NeoZQYY/scripts/ops/txt/progress_p2.txt'
p3 = 'c:/NeoZQYY/scripts/ops/txt/progress_p3.txt'
out = 'c:/NeoZQYY/docs/h5_ui/compare/PROGRESS.md'
p1 = 'c:/Project/NeoZQYY/scripts/ops/txt/progress_p1.txt'
p2 = 'c:/Project/NeoZQYY/scripts/ops/txt/progress_p2.txt'
p3 = 'c:/Project/NeoZQYY/scripts/ops/txt/progress_p3.txt'
out = 'c:/Project/NeoZQYY/docs/h5_ui/compare/PROGRESS.md'
parts = []
for p in [p1, p2, p3]:
with open(p, 'rb') as f:

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')

View File

@@ -1,6 +1,6 @@
import sys
sys.stdout.reconfigure(encoding='utf-8')
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
raw = f.read()
content = raw.decode('utf-8')

View File

@@ -1,6 +1,6 @@
import sys
sys.stdout.reconfigure(encoding='utf-8')
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
lines = content.split('\n')

View File

@@ -1,5 +1,5 @@
import sys
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
c = f.read().decode('utf-8')

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw

View File

@@ -1,10 +1,10 @@
import sys
files = [
('c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md',
('c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md',
'*v4.1\uff082026-03-11\uff09',
'*v4.2\uff082026-03-11\uff09\uff1a\u95f4\u8ddd\u6d4b\u91cf\u4e13\u7528\u5b50\u4ee3\u7406\uff08SPACING-AGENT.md\uff09\uff1b\u901a\u7528\u5de5\u5177 measure_gaps.py\uff1b\u00a70.8 \u65b0\u589e\u95f4\u8ddd\u4ee3\u7406\u8c03\u7528\u63d0\u793a'
),
('c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md',
('c:/Project/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md',
'*v1.9\uff082026-03-11\uff09',
'*v2.0\uff082026-03-11\uff09\uff1a\u65b0\u589e\u95f4\u8ddd\u6d4b\u91cf\u4ee3\u7406\uff08\u6309\u9700\u8c03\u7528\uff09\uff1b\u4fee\u590d\u6587\u4ef6\u5934\u90e8\u63cf\u8ff0\u4e3a 4 \u79cd\u4e13\u804c\u5b50\u4ee3\u7406\u6a21\u5f0f'
),

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')

View File

@@ -1,4 +1,4 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path = 'c:/Project/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')

Some files were not shown because too many files have changed in this diff Show More