This commit is contained in:
Neo
2026-03-15 10:15:02 +08:00
parent 2dd217522c
commit 72bb11b34f
916 changed files with 65306 additions and 16102803 deletions

View File

@@ -1,37 +0,0 @@
# scripts/
## 作用说明
运维与构建脚本目录,存放项目级的审计工具、日常运维、一次性迁移等脚本。
## 内部结构
```
scripts/
├── audit/ # 审计工具
│ └── gen_audit_dashboard.py # 审计一览表生成(扫描 docs/audit/changes/
├── ops/ # 日常运维
│ ├── start-admin.ps1 # 一键启动管理后台(后端 + 前端)
│ ├── init_databases.py # 初始化 etl_feiqiu / zqyy_app DDL + 种子
│ └── clone_to_test_db.py # 正式库 → 测试库完整镜像
├── migrate/ # 一次性迁移LLZQ-test → etl_feiqiu
│ ├── migrate_data.py # 跨库 COPY 数据迁移
│ ├── migrate_finalize.py # 物化视图 + ANALYZE + 验证
│ ├── migrate_fix_remaining.py# 修复部分导入的重复键
│ ├── fix_remaining.py # 补执行失败的 DDL/种子
│ ├── fix_schema_refs.py # 批量替换运行时旧 schema 引用
│ └── batch_schema_rename.py # 批量 schema 重命名billiards_xxx → xxx
└── README.md
```
## 说明
- `audit/` — 审计工具脚本(审计一览表生成等)
- `ops/` — 可反复使用的运维脚本
- `migrate/` — 从旧库搬迁到新库的一次性脚本,迁移完成后仅保留备查
- 脚本运行产生的临时输出文件统一放在项目根目录 `tmp/`(已被 .gitignore 忽略)
## Roadmap
- 补充 CI/CD 辅助脚本lint、test、build
- 补充数据库备份/恢复脚本

View File

@@ -0,0 +1,43 @@
"""检查测试库数据范围和 settle_type 分布"""
import os
from dotenv import load_dotenv
load_dotenv()
dsn = os.environ.get("TEST_DB_DSN")
if not dsn:
raise RuntimeError("TEST_DB_DSN 未配置")
import psycopg2
conn = psycopg2.connect(dsn)
cur = conn.cursor()
queries = [
("pay_time 范围(全表)",
"SELECT MIN(pay_time)::date, MAX(pay_time)::date, COUNT(*) FROM dwd.dwd_settlement_head"),
("pay_time 范围(有会员手机号)",
"SELECT MIN(pay_time)::date, MAX(pay_time)::date, COUNT(*) FROM dwd.dwd_settlement_head WHERE member_phone IS NOT NULL AND member_phone != ''"),
("settle_type 分布",
"SELECT settle_type, COUNT(*) FROM dwd.dwd_settlement_head GROUP BY settle_type ORDER BY settle_type"),
("2025-12-09 之后的记录数(不限 settle_type",
"SELECT COUNT(*) FROM dwd.dwd_settlement_head WHERE pay_time >= '2025-12-09'"),
("2025-12-09 之后 settle_type 分布",
"SELECT settle_type, COUNT(*) FROM dwd.dwd_settlement_head WHERE pay_time >= '2025-12-09' GROUP BY settle_type ORDER BY settle_type"),
("2025-12-09 之后有会员手机号的记录",
"SELECT settle_type, COUNT(*) FROM dwd.dwd_settlement_head WHERE pay_time >= '2025-12-09' AND member_phone IS NOT NULL AND member_phone != '' GROUP BY settle_type ORDER BY settle_type"),
]
for title, sql in queries:
print(f"\n{'='*60}")
print(title)
print('-'*60)
cur.execute(sql)
rows = cur.fetchall()
cols = [d[0] for d in cur.description]
print(f" {' '.join(cols)}")
for r in rows:
print(f" {' '.join(str(v) for v in r)}")
if not rows:
print(" (无数据)")
cur.close()
conn.close()

View File

@@ -29,7 +29,7 @@ cash_online AS (
FROM day_label dl
LEFT JOIN dwd.dwd_settlement_head h
ON h.create_time >= dl.day_start AND h.create_time < dl.day_end
AND h.settle_type = 1
AND h.settle_type IN (1, 3)
GROUP BY dl.day_name, dl.day_start
),
recharge AS (
@@ -57,7 +57,7 @@ member_guest AS (
FROM day_label dl
LEFT JOIN dwd.dwd_settlement_head h
ON h.create_time >= dl.day_start AND h.create_time < dl.day_end
AND h.settle_type = 1
AND h.settle_type IN (1, 3)
GROUP BY dl.day_name, dl.day_start
),
new_member AS (

11
scripts/ops/_find.py Normal file
View File

@@ -0,0 +1,11 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw
lines = raw.decode('utf-8').replace('\r\n', '\n').split('\n')
print(f'Total: {len(lines)}')
# Show first 6 lines and lines around 28-45 (main agent section)
print('--- HEAD ---')
for i in range(0, 6): print(f'{i+1}: {repr(lines[i])}')
print('--- LINES 28-50 ---')
for i in range(27, 50): print(f'{i+1}: {repr(lines[i])}')

43
scripts/ops/_fix1.py Normal file
View File

@@ -0,0 +1,43 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw
lines = raw.decode('utf-8').replace('\r\n', '\n').split('\n')
# Find line with '8. 页面所有屏验证通过后' and insert after it
target = '8. \u9875\u9762\u6240\u6709\u5c4f\u9a8c\u8bc1\u901a\u8fc7\u540e \u2192 \u4e0b\u53d1\u9a8c\u8bc1\u4ee3\u7406\u6267\u884c\u5168\u91cf\u56de\u5f52\u6821\u9a8c'
insert = [
'',
'**\u5355\u5143\u5185\u5b50\u4ee3\u7406\u8c03\u7528\u987a\u5e8f\uff08\u4e3b\u4ee3\u7406\u4e25\u683c\u6309\u6b64\u6b65\u9aa4\u6267\u884c\uff09\uff1a**',
'',
'```',
'Step 1\uff1a\u4e0b\u53d1\u3010\u622a\u56fe\u4ee3\u7406\u3011\u2192 \u7b49\u5f85\u8fd4\u56de H5/MP \u622a\u56fe\u8def\u5f84',
'Step 2\uff1a\u4e0b\u53d1\u3010\u5ba1\u8ba1\u4ee3\u7406\u3011\u2192 \u7b49\u5f85\u8fd4\u56de audit.md + \u4fee\u6b63\u6e05\u5355 + \u521d\u59cb\u5dee\u5f02\u7387',
'Step 3\uff1a\u4e0b\u53d1\u3010\u4fee\u6b63\u4ee3\u7406\u3011\u2192 \u7b49\u5f85\u8fd4\u56de\u4fee\u6b63\u540e\u5dee\u5f02\u7387',
' \u2193',
' \u5dee\u5f02\u7387 \u2265 5% \u4e14\u672a\u89e6\u53d1\u8df3\u8fc7\uff1a\u4e0b\u53d1\u3010\u9a8c\u8bc1\u4ee3\u7406\u3011\u2192 \u91cd\u622a\u56fe\u5bf9\u6bd4 \u2192 \u8fd4\u56de\u4fee\u6b63\u4ee3\u7406\uff08\u5faa\u73af\uff09',
' \u5dee\u5f02\u7387 < 5%\uff1a\u8fdb\u5165 Step 4',
' \u8fde\u7eed 5 \u8f6e\u65e0\u5b9e\u8d28\u4e0b\u964d\uff1a\u8df3\u8fc7\uff0c\u8fdb\u5165 Step 4',
' \u5dee\u5f02\u7387 > 20% \u4e14\u8fde\u7eed 3 \u8f6e\u65e0\u6cd5\u7a81\u7834\uff1a\u89e6\u53d1\u7ed3\u6784\u91cd\u5199\uff08\u00a7\u4e94.2.5\uff09\uff0c\u7136\u540e\u8fd4\u56de\u4fee\u6b63\u4ee3\u7406',
'Step 4\uff1a\u4e3b\u4ee3\u7406\u6c47\u603b \u2192 \u66f4\u65b0 PROGRESS.md \u2192 \u4e0b\u53d1\u4e0b\u4e00\u5355\u5143',
'```',
'',
'> \u6bcf\u4e2a Step \u5fc5\u987b\u7b49\u5f85\u5f53\u524d\u5b50\u4ee3\u7406\u8fd4\u56de\u7ed3\u679c\u540e\u624d\u4e0b\u53d1\u4e0b\u4e00\u4e2a\u3002\u7981\u6b62\u5e76\u53d1\u8c03\u7528\u591a\u4e2a\u5b50\u4ee3\u7406\u3002',
]
found = False
for i, line in enumerate(lines):
if target in line:
lines = lines[:i+1] + insert + lines[i+1:]
found = True
break
if not found:
open('c:/NeoZQYY/scripts/ops/_fix_status.txt','w').write('target not found')
else:
doc = '\n'.join(lines)
if crlf:
doc = doc.replace('\n', '\r\n')
with open(path, 'wb') as f:
f.write(doc.encode('utf-8'))
open('c:/NeoZQYY/scripts/ops/_fix_status.txt','w').write('done')

72
scripts/ops/_fix2.py Normal file
View File

@@ -0,0 +1,72 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw
doc = raw.decode('utf-8').replace('\r\n', '\n')
# Fix 2: Add main agent duties after core model
old2 = (
'\u8be6\u7ec6\u5b50\u4ee3\u7406\u804c\u8d23\u4e0e\u8c03\u5ea6\u89c4\u5219\u89c1 `docs/h5_ui/compare/AGENT-PLAYBOOK.md`\u3002\n'
'\n'
'---\n'
'\n'
'## \u7f16\u6392\u89c4\u5219'
)
new2 = (
'\u8be6\u7ec6\u5b50\u4ee3\u7406\u804c\u8d23\u4e0e\u8c03\u5ea6\u89c4\u5219\u89c1 `docs/h5_ui/compare/AGENT-PLAYBOOK.md`\u3002\n'
'\n'
'**\u4e3b\u4ee3\u7406\u8c03\u5ea6\u804c\u8d23\uff1a**\n'
'1. **\u6bcf\u6b21\u4f1a\u8bdd\u5f00\u59cb**\uff1a\u8bfb `docs/h5_ui/compare/PROGRESS.md`\uff0c\u786e\u8ba4\u5f53\u524d\u8fdb\u5ea6\u548c\u4e0b\u4e00\u4e2a\u5355\u5143\n'
'2. \u68c0\u67e5 MCP \u5c31\u7eea\u72b6\u6001\uff08\u89c1 PROGRESS.md \u300cMCP \u5c31\u7eea\u68c0\u67e5\u6e05\u5355\u300d\uff09\n'
'3. \u5f00\u59cb\u65b0\u9875\u9762\uff1a\u9690\u85cf dev-fab/ai-float-button\uff0c\u786e\u8ba4\u9ed8\u8ba4\u7ef4\u5ea6\n'
'4. \u9010\u5c4f\u4e0b\u53d1\u622a\u56fe\u4ee3\u7406 \u2192 \u5ba1\u8ba1\u4ee3\u7406 \u2192 \u4fee\u6b63/\u9a8c\u8bc1\u5faa\u73af\n'
'5. \u9a8c\u8bc1\u901a\u8fc7 \u2192 **\u66f4\u65b0 PROGRESS.md \u5bf9\u5e94\u884c\u72b6\u6001** \u2192 \u4e0b\u53d1\u4e0b\u4e00\u5c4f\u622a\u56fe\u4ee3\u7406\n'
'6. \u300c\u9700\u56de\u9000\u300d\u65f6 \u2192 \u81ea\u52a8\u56de\u9000\u5230\u6307\u5b9a step \u91cd\u5f00\u59cb\n'
'7. \u300c\u8df3\u8fc7\u300d\u65f6 \u2192 \u5728 PROGRESS.md \u5907\u6ce8\u680f\u8bb0\u5f55\u539f\u56e0\uff0c\u7ee7\u7eed\u4e0b\u4e00\u5c4f\n'
'8. \u9875\u9762\u6240\u6709\u5c4f\u9a8c\u8bc1\u901a\u8fc7\u540e \u2192 \u4e0b\u53d1\u9a8c\u8bc1\u4ee3\u7406\u6267\u884c\u5168\u91cf\u56de\u5f52\u6821\u9a8c\n'
'\n'
'---\n'
'\n'
'## \u7f16\u6392\u89c4\u5219'
)
if old2 in doc:
doc = doc.replace(old2, new2, 1)
print('Fix 2 OK: main agent duties added')
else:
print('Fix 2 SKIP')
# Fix 3: Add PROGRESS.md update to standard unit template end
old3 = (
'10. \u5982\u679c\u504f\u5dee\u6839\u56e0\u662f\u5168\u5c40\u6837\u5f0f\uff08\u5f71\u54cd\u6240\u6709\u5c4f\uff09\uff0c\u6807\u6ce8\u201c\u5168\u5c40\u6837\u5f0f\u53d8\u66f4\uff0c\u53ef\u80fd\u5f71\u54cd\u524d\u5e8f\u5c4f\u201d\u8fd4\u56de\u4e3b\u4ee3\u7406\n'
'```'
)
new3 = (
'10. \u5982\u679c\u504f\u5dee\u6839\u56e0\u662f\u5168\u5c40\u6837\u5f0f\uff08\u5f71\u54cd\u6240\u6709\u5c4f\uff09\uff0c\u6807\u6ce8\u201c\u5168\u5c40\u6837\u5f0f\u53d8\u66f4\uff0c\u53ef\u80fd\u5f71\u54cd\u524d\u5e8f\u5c4f\u201d\u8fd4\u56de\u4e3b\u4ee3\u7406\n'
'\n'
'\u5b8c\u6210\u540e\uff08\u4e3b\u4ee3\u7406\u6267\u884c\uff09\uff1a\n'
'\u2192 \u66f4\u65b0 docs/h5_ui/compare/PROGRESS.md \u5bf9\u5e94\u884c\uff1a\u586b\u5199\u521d\u59cb\u5dee\u5f02\u7387\u3001\u4fee\u6b63\u8f6e\u6b21\u3001\u6700\u7ec8\u5dee\u5f02\u7387\u3001\u72b6\u6001\n'
'\u2192 \u66f4\u65b0\u300c\u5f53\u524d\u72b6\u6001\u300d\u533a\u5757\u7684\u300c\u4e0b\u4e00\u4e2a\u5355\u5143\u300d\u5b57\u6bb5\n'
'```'
)
if old3 in doc:
doc = doc.replace(old3, new3, 1)
print('Fix 3 OK: PROGRESS update step added')
else:
print('Fix 3 SKIP')
# Fix 4: prereq reference
if '\u524d\u7f6e\u4efb\u52a1\u72b6\u6001\u89c1' not in doc:
old4 = '## \u524d\u7f6e\u4efb\u52a1\uff08\u5728 A \u6279\u6b21\u4e4b\u524d\uff09\n\n### P0.'
new4 = '## \u524d\u7f6e\u4efb\u52a1\uff08\u5728 A \u6279\u6b21\u4e4b\u524d\uff09\n\n\u003e \u524d\u7f6e\u4efb\u52a1\u72b6\u6001\u89c1 `docs/h5_ui/compare/PROGRESS.md` \u524d\u7f6e\u4efb\u52a1\u8868\u3002\n\n### P0.'
if old4 in doc:
doc = doc.replace(old4, new4, 1)
print('Fix 4 OK: prereq reference added')
else:
print('Fix 4 SKIP')
else:
print('Fix 4 SKIP: already present')
with open(path, 'wb') as f:
out = doc.replace('\n', '\r\n') if crlf else doc
f.write(out.encode('utf-8'))
print('Saved')

View File

@@ -0,0 +1,23 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
raw = f.read()
content = raw.decode('utf-8')
# Fix all remaining instances of wrong characters
content = content.replace('\u9501\u70b9\u6cd5', '\u9501\u70b9\u6cd5'.replace('\u9501', '\u9576')) # won't work this way
# Direct string replacements
content = content.replace('\u5207\u6362\u5230\u9501\u70b9\u6cd5', '\u5207\u6362\u5230\u9501\u70b9\u6cd5')
# Just replace all occurrences of the wrong chars
import re
# \u9501 = 锁, \u9576 = 锚
content = content.replace('\u9501\u70b9', '\u9576\u70b9')
with open(path, 'wb') as f:
f.write(content.encode('utf-8'))
# Verify
count = content.count('\u9576\u70b9') # 锚点
remaining = content.count('\u9501\u70b9') # 锁点
print('锚点 count:', count, '| 锁点 remaining:', remaining)

View File

@@ -0,0 +1 @@
done

View File

@@ -0,0 +1,37 @@
"""一次性测试脚本H5 scrollTop=0 截图,用于确认 MCP 截图区域对齐。"""
import asyncio
from pathlib import Path
from playwright.async_api import async_playwright
ROOT = Path(__file__).resolve().parents[2]
async def main():
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True, args=["--hide-scrollbars"])
ctx = await browser.new_context(
viewport={"width": 430, "height": 752}, device_scale_factor=3
)
page = await ctx.new_page()
await page.goto(
"http://127.0.0.1:5500/docs/h5_ui/pages/board-finance.html",
wait_until="load", timeout=15000,
)
await page.wait_for_timeout(2500)
await page.evaluate("""() => {
document.documentElement.style.overflow = 'auto';
document.documentElement.style.scrollbarWidth = 'none';
const s = document.createElement('style');
s.textContent = '::-webkit-scrollbar { display: none !important; }';
document.head.appendChild(s);
}""")
await page.wait_for_timeout(300)
await page.evaluate("() => { window.scrollTo({top: 0, behavior: 'instant'}); }")
await page.wait_for_timeout(500)
out = ROOT / "docs" / "h5_ui" / "screenshots" / "h5-board-finance--scroll0-test.png"
await page.screenshot(path=str(out), full_page=False)
info = await page.evaluate("() => ({ scrollY: window.scrollY, innerH: window.innerHeight })")
print(f"H5 scrollY={info['scrollY']}, innerHeight={info['innerH']}")
print(f"截图保存: {out}")
await browser.close()
asyncio.run(main())

8
scripts/ops/_inspect.py Normal file
View File

@@ -0,0 +1,8 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
doc = raw.decode('utf-8').replace('\r\n', '\n')
# find the third > line and print its full content
lines = doc.split('\n')
for i, line in enumerate(lines[:8]):
print(i, line.encode('unicode_escape'))

View File

@@ -0,0 +1,138 @@
"""
[一次性诊断工具] 测量 17 个 H5 页面的实际高度,校准 anchor_compare.py 锚点配置。
测量 430×752 视口下各页面展开全部折叠区域后的 scrollHeight计算需要几屏。
输出全页面截图 + JSON 数据到 export/SYSTEM/REPORTS/h5_page_heights/。
2026-03-09 执行结果已写入 design.md §5.1,后续无需重复运行(除非 H5 原型页面结构变更)。
用法:
1. 先启动 Live ServerVS Code 右键 Open with Live Server
2. python scripts/ops/_measure_h5_page_heights.py
"""
import asyncio
import json
import sys
from pathlib import Path
from playwright.async_api import async_playwright
BASE_URL = "http://127.0.0.1:5500/docs/h5_ui/pages"
VIEWPORT_W = 430
VIEWPORT_H = 752 # 与 anchor_compare.py 一致
DPR = 3
# 17 个目标页面
PAGES = [
"board-finance", "board-coach", "board-customer",
"task-detail", "task-detail-callback", "task-detail-priority", "task-detail-relationship",
"coach-detail", "customer-detail", "performance",
"task-list", "my-profile", "customer-service-records",
"performance-records", "chat", "chat-history", "notes",
]
OUT_DIR = Path(__file__).resolve().parents[2] / "export" / "SYSTEM" / "REPORTS" / "h5_page_heights"
HIDE_SCROLLBAR_JS = """
() => {
document.documentElement.style.overflow = 'auto';
document.documentElement.style.scrollbarWidth = 'none';
const s = document.createElement('style');
s.textContent = '::-webkit-scrollbar { display: none !important; }';
document.head.appendChild(s);
}
"""
EXPAND_ALL_JS = """
() => {
// 点击所有"展开更多"/"查看更多"按钮
const btns = document.querySelectorAll('[onclick*="More"], [onclick*="expand"], [onclick*="toggle"]');
btns.forEach(b => b.click());
// 显示所有 hidden 的展开区域
document.querySelectorAll('[id*="More"], [id*="more"]').forEach(el => {
el.classList.remove('hidden');
el.style.display = '';
});
}
"""
async def main():
OUT_DIR.mkdir(parents=True, exist_ok=True)
results = []
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
context = await browser.new_context(
viewport={"width": VIEWPORT_W, "height": VIEWPORT_H},
device_scale_factor=DPR,
)
page = await context.new_page()
for name in PAGES:
url = f"{BASE_URL}/{name}.html"
try:
await page.goto(url, wait_until="load", timeout=15000)
await page.wait_for_timeout(2500) # Tailwind CDN JIT
await page.evaluate(HIDE_SCROLLBAR_JS)
await page.wait_for_timeout(300)
# 展开所有折叠区域
await page.evaluate(EXPAND_ALL_JS)
await page.wait_for_timeout(500)
# 测量页面高度
heights = await page.evaluate("""
() => ({
scrollHeight: document.documentElement.scrollHeight,
bodyScrollHeight: document.body.scrollHeight,
clientHeight: document.documentElement.clientHeight,
})
""")
scroll_h = heights["scrollHeight"]
screens = round(scroll_h / VIEWPORT_H, 2)
# 全页面截图
out_path = OUT_DIR / f"{name}.png"
await page.screenshot(path=str(out_path), full_page=True)
file_size = out_path.stat().st_size
# 物理像素高度
phys_h = scroll_h * DPR
info = {
"page": name,
"scrollHeight_css": scroll_h,
"scrollHeight_phys": phys_h,
"viewportHeight": VIEWPORT_H,
"screens": screens,
"screenshot_size_bytes": file_size,
}
results.append(info)
print(f" {name:35s} {scroll_h:5d}px = {screens:5.2f} 屏 ({file_size:,} bytes)")
except Exception as e:
print(f"{name}: {e}", file=sys.stderr)
results.append({"page": name, "error": str(e)})
await browser.close()
# 输出 JSON 汇总
json_path = OUT_DIR / "page_heights.json"
with open(json_path, "w", encoding="utf-8") as f:
json.dump(results, f, ensure_ascii=False, indent=2)
print(f"\n汇总已写入: {json_path}")
# 输出 Markdown 表格
print("\n| # | 页面 | CSS高度(px) | 物理高度(px) | 屏数 |")
print("|---|------|------------|-------------|------|")
for i, r in enumerate(results, 1):
if "error" in r:
print(f"| {i} | {r['page']} | ❌ | ❌ | {r['error']} |")
else:
print(f"| {i} | {r['page']} | {r['scrollHeight_css']} | {r['scrollHeight_phys']} | {r['screens']} |")
if __name__ == "__main__":
asyncio.run(main())

12
scripts/ops/_merge.py Normal file
View File

@@ -0,0 +1,12 @@
p1 = 'c:/NeoZQYY/scripts/ops/txt/progress_p1.txt'
p2 = 'c:/NeoZQYY/scripts/ops/txt/progress_p2.txt'
p3 = 'c:/NeoZQYY/scripts/ops/txt/progress_p3.txt'
out = 'c:/NeoZQYY/docs/h5_ui/compare/PROGRESS.md'
parts = []
for p in [p1, p2, p3]:
with open(p, 'rb') as f:
parts.append(f.read().decode('utf-8'))
result = ''.join(parts)
with open(out, 'wb') as f:
f.write(result.encode('utf-8'))
print(f'Written {len(result)} chars, {result.count(chr(10))} lines to {out}')

27
scripts/ops/_p2.py Normal file
View File

@@ -0,0 +1,27 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
# Insert 5.4.4 before 5.5
marker = '### 5.5 '
idx = content.find(marker)
print('5.5 at:', idx)
assert idx >= 0
insert = (
'### 5.4.4 \u6700\u5927\u91cd\u8bd5\u9650\u5236\uff08\u5f3a\u5236\uff09\n\n'
'\u91cd\u8bd5\u8ba1\u6570\u89c4\u5219\uff1a\n'
'- \u6bcf\u8f6e\u4fee\u6b63\u540e\u5dee\u5f02\u7387\u4e0b\u964d **>0.5%** \u2192 \u91cd\u8bd5\u8ba1\u6570\u91cd\u7f6e\u4e3a 0\n'
'- \u5dee\u5f02\u7387\u4e0b\u964d \u22640.5%\uff08\u6216\u4e0a\u5347\uff09\u2192 \u91cd\u8bd5\u8ba1\u6570 +1\n'
'- \u91cd\u8bd5\u8ba1\u6570\u8fbe\u5230 **5** \u4e14\u5dee\u5f02\u7387\u4f9d\u7136 \u22655% \u2192 \u6267\u884c\u8df3\u8fc7\u6d41\u7a0b\uff1a\n\n'
'\u8df3\u8fc7\u6d41\u7a0b\uff1a\n'
'1. \u5c06\u5f53\u524d\u6700\u4f73\u5dee\u5f02\u7387\u3001\u5269\u4f59\u504f\u5dee\u6e05\u5355\u8bb0\u5f55\u5230 `report.md`\n'
'2. \u5728 report.md \u6807\u6ce8\uff1a`[\u8df3\u8fc7] \u5dee\u5f02\u7387 X.XX%\uff0c\u8fde\u7eed 5 \u8f6e\u65e0\u5b9e\u8d28\u4e0b\u964d\uff0c\u8df3\u8fc7\u539f\u56e0\uff1a<\u7b80\u8ff0>`\n'
'3. \u8fd4\u56de\u4e3b\u4ee3\u7406\uff0c\u4e3b\u4ee3\u7406\u8bb0\u5f55\u540e\u7ee7\u7eed\u4e0b\u4e00\u5355\u5143\n\n'
'> \u793a\u4f8b\uff1a11%\u2192\u4fee\u6b633\u6b21\u65e0\u6548\uff0c\u7b2c4\u8f6e\u964d\u81f38%\uff08\u4e0b\u964d3%>0.5%\uff09\u2192\u8ba1\u6570\u91cd\u7f6e\u4e3a0\uff0c\u7ee7\u7eed\u4fee\u6b63\u3002\n\n'
)
content = content[:idx] + insert + content[idx:]
with open(path, 'wb') as f:
f.write(content.encode('utf-8'))
print('Done. 5.4.4 inserted.')

58
scripts/ops/_p3.py Normal file
View File

@@ -0,0 +1,58 @@
import sys
sys.stdout.reconfigure(encoding='utf-8')
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
raw = f.read()
content = raw.decode('utf-8')
lines = content.split('\n')
# Insert §0.8 before line 154 (the --- separator before ## 一)
insert_before = 153 # 0-indexed = line 154
new_lines = [
'---',
'',
'### 0.8 \u8fc1\u79fb\u524d\u9884\u5148\u8ba1\u7b97\u5b50\u8868\uff08step-0 \u5904\u7406\u524d\u5fc5\u505a\uff09',
'',
'\u5f00\u59cb\u5199 WXSS \u4e4b\u524d\uff0c\u5148\u4ece H5 \u6e90\u7801\u9884\u5148\u8ba1\u7b97\u4ee5\u4e0b\u5b50\u8868\uff0c\u907f\u514d\u8fb9\u770b\u8fb9\u6362\u7b97\u5bfc\u81f4\u6f0f\u6539\u6216\u9519\u6539\uff1a',
'',
'**A. \u5168\u5c40\u5185\u8fb9\u8ddd \u2192 rpx**',
'```',
'H5 \u9875\u9762\u5bbd 430px\u3002\u5c06 H5 \u5185\u8fb9\u8ddd px \u5e94\u7528\u516c\u5f0f\uff1arpx = px \u00d7 1.75\uff08\u53d6\u5076\u6570\uff09',
'\u793a\u4f8b\uff1ap-4(16px)\u219228rpx p-6(24px)\u219242rpx p-7(28px)\u219250rpx',
'\u5185\u5bb9\u533a\u5bbd(px) = 430 - 2\u00d7\u5185\u8fb9\u8ddd(px)\uff0c\u5bf9\u5e94 rpx \u5e94\u4e0e WXSS \u5185\u5bb9\u5bbd\u4e00\u81f4',
'```',
'',
'**B. Sticky \u5143\u7d20\u9ad8\u5ea6\uff08\u6709 sticky \u7684\u9875\u9762\uff09**',
'```',
'\u8fdd\u5143\u7d20\u8ba1\u7b97\uff1a\u9ad8\u5ea6(px) \u00d7 1.75 \u53d6\u5076\u6570 = rpx\uff0c\u7d2f\u52a0\u5f97 sticky \u533a\u57df\u603b\u9ad8',
'\u586b\u5165 \u00a75.5 \u8868\u683c\uff0c\u7528\u4e8e\u9501\u70b9\u5bf9\u9f50\u548c\u6eda\u52a8\u88c1\u526a\u504f\u79fb\u8ba1\u7b97',
'```',
'',
'**C. \u6bcf\u4e2a Section \u7406\u8bba\u9ad8\u5ea6\uff08\u8f85\u52a9\u9a8c\u8bc1\uff09**',
'```',
'\u5bf9\u91cd\u590d\u5217\u8868\u9879\uff1an \u9879 \u00d7 \u5355\u9879\u9ad8 + gap\u00d7(n-1)',
'\u5bf9\u5361\u7247\uff1apadding-top + padding-bottom + \u5185\u5bb9\u884c\u9ad8\u00d7\u884c\u6570 + gap\u00d7(\u884c\u6570-1)',
'\u7406\u8bba\u9ad8\u5ea6\u4e0e \u00a79 scrollHeight \u5bf9\u7167\uff0c\u8d85\u8fc7 50px \u5dee\u5f02\u8bf4\u660e\u6709\u5143\u7d20\u9057\u6f0f',
'```',
'',
'**D. Tailwind \u7c7b\u540d \u2192 rpx \u6620\u5c04\u8868\uff08\u5168\u9875\u6240\u6709\u5143\u7d20\uff09**',
'```',
'\u626b\u63cf H5 \u6e90\u7801\u6240\u6709 Tailwind \u7c7b\u540d\uff0c\u5bf9\u7167 \u00a77.2 \u9884\u5148\u8f93\u51fa\u6620\u5c04\u8868\uff0c\u907f\u514d\u4fee\u6b63\u65f6\u8fb9\u67e5\u8fb9\u9057\u6f0f',
'\u683c\u5f0f\uff1a[ \u5143\u7d20\u540d | Tailwind\u7c7b | H5 px | WXSS rpx | \u5907\u6ce8 ]',
'```',
'',
'**E. \u6a21\u5f0f B \u9875\u9762\uff1a\u81ea\u5b9a\u4e49 navBar \u5b9e\u9645\u8bbe\u8ba1\u9ad8\u5ea6**',
'```',
'\u8bfb H5 \u6e90\u7801\u4e2d\u81ea\u5b9a\u4e49 navBar \u7684\u8bbe\u8ba1\u9ad8\u5ea6(px)\uff0c\u6362\u7b97\u5230 rpx \u586b\u5165 WXML',
'\u8be5\u9ad8\u5ea6\u5373 \u00a72.3 \u88c1\u526a\u504f\u79fb\u7684\u4f9d\u636e\uff08\u9ed8\u8ba4 64px \u903b\u8f91 \u00d7 1.5 = 96px \u7269\u7406\uff09',
'```',
'',
]
lines = lines[:insert_before] + new_lines + lines[insert_before:]
content_new = '\n'.join(lines)
with open(path, 'wb') as f:
f.write(content_new.encode('utf-8'))
print(f'Done. Lines inserted: {len(new_lines)}. Total lines: {len(lines)}')

27
scripts/ops/_p4.py Normal file
View File

@@ -0,0 +1,27 @@
import sys
sys.stdout.reconfigure(encoding='utf-8')
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
lines = content.split('\n')
# Replace lines 14-18 (0-indexed: 13-17) with updated 主代理职责
old_block = lines[13:18]
print('Old block:')
for l in old_block:
print(' ', l.encode('ascii','replace').decode('ascii')[:80])
new_block = [
' \u4e3b\u4ee3\u7406\u804c\u8d23\uff1a',
' 1. \u5f00\u59cb\u65b0\u9875\u9762\u65f6\u9690\u85cf dev-fab/ai-float-button\uff0c\u786e\u8ba4\u9ed8\u8ba4\u7ef4\u5ea6/\u72b6\u6001',
' 2. \u6309 step \u987a\u5e8f\u9010\u4e2a\u4e0b\u53d1\u5355\u5143\u7ed9\u5b50\u4ee3\u7406\uff08\u5168\u81ea\u52a8\uff0c\u65e0\u9700\u4eba\u5de5\u786e\u8ba4\u6bcf\u5c4f\uff09',
' 3. \u5b50\u4ee3\u7406\u8fd4\u56de\u901a\u8fc7 \u2192 \u81ea\u52a8\u4e0b\u53d1\u4e0b\u4e00\u5c4f',
' 4. \u5b50\u4ee3\u7406\u8fd4\u56de\u300c\u9700\u56de\u9000\u300d\u65f6 \u2192 \u81ea\u52a8\u56de\u9000\u5230\u6307\u5b9a step \u91cd\u5904\u7406',
' 5. \u5b50\u4ee3\u7406\u8fd4\u56de\u300c\u8df3\u8fc7\u300d\u65f6 \u2192 \u8bb0\u5f55\u5230\u8df3\u8fc7\u65e5\u5fd7\uff0c\u7ee7\u7eed\u4e0b\u4e00\u5355\u5143',
]
lines = lines[:13] + new_block + lines[18:]
content_new = '\n'.join(lines)
with open(path, 'wb') as f:
f.write(content_new.encode('utf-8'))
print('Done. New line count:', len(lines))

34
scripts/ops/_p5.py Normal file
View File

@@ -0,0 +1,34 @@
import sys
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
c = f.read().decode('utf-8')
idx_s = c.find('\u6267\u884c\u8981\u6c42\uff1a\n1. \u4f60\u8d1f\u8d23\u622a\u53d6\u672c\u5c4f')
idx_e = c.find('\n10. ', idx_s)
idx_e = c.find('\n', idx_e + 1) # end of line 10
print(f's={idx_s} e={idx_e}')
assert idx_s >= 0 and idx_e > idx_s
new_req = (
'\u9636\u6bb5\u4e00\uff08\u622a\u56fe\u4ee3\u7406\uff09\uff1a\u6309 AGENT-PLAYBOOK.md \u00a7\u4e09 \u622a\u53d6 H5 + MP \u53cc\u7aef\u622a\u56fe\u3002\u6a21\u5f0f B \u9875\u9762\u88c1\u526a MP: crop(0,96,645,1224)\u21921128px\u3002\n'
'\u8f93\u51fa\uff1ah5--step-N.png / mp--step-N.png\n'
'\n'
'\u9636\u6bb5\u4e8c\uff08\u5ba1\u8ba1\u4ee3\u7406\uff09\uff1a\u50cf\u7d20\u5bf9\u6bd4\uff08\u00a7\u56db\uff09\u3002\u8bfb H5 + MP \u6e90\u7801\uff0c\u4ea7\u51fa audit.md\uff08\u00a7\u4e94.1\uff09\u3002\n'
'\u5fc5\u542b\u590d\u6742\u7ed3\u6784\u626b\u63cf\uff08\u00a7\u4e94.1 F \u9879\uff09\uff1aBanner\u7eb9\u7406\u3001AI\u56fe\u6807\u3001\u76d6\u6233\u5370\u7ae0\u3001inline SVG\u3001\u6e10\u53d8\u6587\u5b57\u3002\n'
'\u8f93\u51fa\uff1aaudit.md + \u4fee\u6b63\u4f18\u5148\u7ea7\u6e05\u5355\n'
'\n'
'\u9636\u6bb5\u4e09\uff08\u4fee\u6b63\u4ee3\u7406 + \u9a8c\u8bc1\u4ee3\u7406\u5faa\u73af\uff09\uff1a\u6309 P0-P7 \u6279\u6b21\u4fee\u6b63 WXSS/WXML\uff0c\u6bcf\u8f6e 1-5 \u5904\u3002\n'
'\u4e0b\u964d >0.5% \u5219\u91cd\u8bd5\u8ba1\u6570\u91cd\u7f6e\uff1b\u8fde\u7eed 5 \u8f6e\u65e0\u5b9e\u8d28\u4e0b\u964d\u5219\u8df3\u8fc7\uff08\u00a7\u4e94.4.4\uff09\u3002\n'
'\u6536\u655b\u5230 <5% \u540e\u66f4\u65b0 report.md\uff0c\u6807\u8bb0\u901a\u8fc7\uff0c\u8fd4\u56de\u4e3b\u4ee3\u7406\u3002\n'
'\n'
'\u5173\u952e\u7ea6\u675f\uff1a\n'
'- \u6bcf\u6b21\u6539\u540e getDiagnostics \u786e\u8ba4\u96f6 TS \u9519\u8bef\n'
'- \u53ea\u6539\u5f53\u524d\u5c4f\u53ef\u89c1\u5143\u7d20\u5bf9\u5e94\u7684\u6e90\u7801\n'
'- \u504f\u5dee\u6839\u56e0\u5728\u524d\u5e8f\u5c4f \u2192 \u6807\u6ce8"\u9700\u56de\u9000\u5230 step-X"\u8fd4\u56de\u4e3b\u4ee3\u7406\n'
'- \u504f\u5dee\u6839\u56e0\u662f\u5168\u5c40\u6837\u5f0f \u2192 \u6807\u6ce8"\u5168\u5c40\u6837\u5f0f\u53d8\u66f4"\u8fd4\u56de\u4e3b\u4ee3\u7406'
)
c = c[:idx_s] + new_req + c[idx_e:]
with open(path, 'wb') as f:
f.write(c.encode('utf-8'))
print('Done')

26
scripts/ops/_p6.py Normal file
View File

@@ -0,0 +1,26 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw
doc = raw.decode('utf-8').replace('\r\n', '\n')
# Exact old lines from unicode_escape output above
old = (
'> \u4e3b\u4ee3\u7406\u6309\u672c\u8ba1\u5212\u9010\u5355\u5143\u4e0b\u53d1\u4efb\u52a1\u7ed9\u5b50\u4ee3\u7406\u3002\n'
'> \u6bcf\u4e2a\u5b50\u4ee3\u7406\u63a5\u6536\u4e00\u4e2a\u5355\u5143\u540e\uff0c\u72ec\u7acb\u5b8c\u6210\u622a\u56fe\u2192\u5bf9\u6bd4\u2192\u5ba1\u8ba1\u2192\u4fee\u6b63\u2192\u9a8c\u8bc1\u7684\u5b8c\u6574\u95ed\u73af\u3002\n'
'> \u4e0d\u5b58\u5728"\u6279\u91cf\u622a\u56fe"\u6216"\u6279\u91cf\u5bf9\u6bd4"\u9636\u6bb5\u2014\u2014\u6240\u6709\u64cd\u4f5c\u5728\u5355\u5143\u5185\u95ed\u73af\u3002'
)
new = (
'> \u4e3b\u4ee3\u7406\u6309\u672c\u8ba1\u5212\u9010\u5355\u5143\u8c03\u5ea6 4 \u79cd\u4e13\u804c\u5b50\u4ee3\u7406\uff08\u622a\u56fe\u2192\u5ba1\u8ba1\u2192\u4fee\u6b63\u21c6\u9a8c\u8bc1\uff09\u5b8c\u6210\u6bcf\u5c4f\u8fd8\u539f\u3002\n'
'> \u6240\u6709\u64cd\u4f5c\u5728\u5355\u5143\u5185\u95ed\u73af\uff0c\u4e0d\u5b58\u5728\u8de8\u5355\u5143\u6279\u91cf\u622a\u56fe\u6216\u6279\u91cf\u5bf9\u6bd4\u9636\u6bb5\u3002'
)
if old not in doc:
print('ERROR: not found')
else:
doc2 = doc.replace(old, new, 1)
if crlf:
doc2 = doc2.replace('\n', '\r\n')
with open(path, 'wb') as f:
f.write(doc2.encode('utf-8'))
print('OK')

25
scripts/ops/_p7.py Normal file
View File

@@ -0,0 +1,25 @@
import sys
files = [
('c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md',
'*v4.1\uff082026-03-11\uff09',
'*v4.2\uff082026-03-11\uff09\uff1a\u95f4\u8ddd\u6d4b\u91cf\u4e13\u7528\u5b50\u4ee3\u7406\uff08SPACING-AGENT.md\uff09\uff1b\u901a\u7528\u5de5\u5177 measure_gaps.py\uff1b\u00a70.8 \u65b0\u589e\u95f4\u8ddd\u4ee3\u7406\u8c03\u7528\u63d0\u793a'
),
('c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md',
'*v1.9\uff082026-03-11\uff09',
'*v2.0\uff082026-03-11\uff09\uff1a\u65b0\u589e\u95f4\u8ddd\u6d4b\u91cf\u4ee3\u7406\uff08\u6309\u9700\u8c03\u7528\uff09\uff1b\u4fee\u590d\u6587\u4ef6\u5934\u90e8\u63cf\u8ff0\u4e3a 4 \u79cd\u4e13\u804c\u5b50\u4ee3\u7406\u6a21\u5f0f'
),
]
for path, old_stamp, new_stamp in files:
with open(path, 'rb') as f:
c = f.read()
crlf = b'\r\n' in c
doc = c.decode('utf-8').replace('\r\n', '\n')
if old_stamp in doc:
doc2 = doc.replace(old_stamp, new_stamp, 1)
if crlf: doc2 = doc2.replace('\n', '\r\n')
with open(path, 'wb') as f: f.write(doc2.encode('utf-8'))
print(f'OK: {path}')
else:
print(f'NOT FOUND in {path}')
idx = doc.find('v4.')
if idx >= 0: print(' found v4. at', idx, repr(doc[idx:idx+60]))

27
scripts/ops/_p8.py Normal file
View File

@@ -0,0 +1,27 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
raw = f.read()
crlf = b'\r\n' in raw
lines = raw.decode('utf-8').replace('\r\n', '\n').split('\n')
# Remove duplicate line 460 (0-indexed) - the OLD标准方法 line
del lines[460]
doc = '\n'.join(lines)
# Fix U+9559 (镙) -> U+9547 (锚) throughout
doc = doc.replace('\u9559\u70b9', '\u9547\u70b9') # 镙点 -> 锚点
# Also fix '値' (if any stray U+5024 that differs)
# U+5024 ord=20516 is same as '值' so no fix needed
count_jiao = doc.count('\u9547\u70b9') # 锚点
count_bad = doc.count('\u9559\u70b9') # 镙点
print(f'锚点 occurrences: {count_jiao}, 镙点 remaining: {count_bad}')
print(f'Total lines: {len(lines)}')
if crlf:
doc = doc.replace('\n', '\r\n')
with open(path, 'wb') as f:
f.write(doc.encode('utf-8'))
print('Done')

64
scripts/ops/_patch1.py Normal file
View File

@@ -0,0 +1,64 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
# Find and replace the 2.2 section
old_marker = '### 2.2 \u5c3a\u5bf8\u53c2\u6570\u8868'
next_section = '## \u4e09\u3001\u622a\u56fe\u64cd\u4f5c'
idx_s = content.find(old_marker)
idx_e = content.find(next_section)
print(f'idx_s={idx_s} idx_e={idx_e}')
assert idx_s >= 0 and idx_e > idx_s
new_block = (
'### 2.2 \u4e24\u79cd\u9875\u9762\u6a21\u5f0f\u4e0e\u622a\u56fe\u5c3a\u5bf8\n'
'\n'
'\u5fae\u4fe1\u5c0f\u7a0b\u5e8f\u9875\u9762\u6709\u4e24\u79cd\u5bfc\u822a\u680f\u6a21\u5f0f\uff0c\u5f71\u54cd MP \u622a\u56fe\u9ad8\u5ea6\uff1a\n'
'\n'
'**\u6a21\u5f0f A\uff1a\u7cfb\u7edf\u9ed8\u8ba4 navigationBar**\uff08\u9875\u9762 json \u65e0 `navigationStyle: custom`\uff09\n'
'- \u7cfb\u7edf\u6e32\u67d3\u6807\u9898\u680f\uff0c\u9875\u9762\u5185\u5bb9\u533a\u5728\u6807\u9898\u680f\u4e0b\u65b9\uff0cwindowHeight = 752px\n'
'- MP \u539f\u59cb\u622a\u56fe\uff1a**645\u00d71128**\uff0c\u65e0\u9700\u88c1\u526a\n'
'- **\u6b64\u6a21\u5f0f\u9875\u9762**\uff1aboard-finance, board-coach, board-customer, task-list, my-profile\n'
'\n'
'**\u6a21\u5f0f B\uff1a\u81ea\u5b9a\u4e49 navigationBar**\uff08\u9875\u9762 json \u542b `\"navigationStyle\": \"custom\"`\uff09\n'
'- \u9875\u9762\u81ea\u5df1\u7ba1\u7406\u5bfc\u822a\u680f\uff0c\u5185\u5bb9\u533a\u4ece\u5c4f\u5e55\u9876\u7aef\u5f00\u59cb\uff08\u5305\u542b\u72b6\u6001\u680f+\u81ea\u5b9a\u4e49\u5bfc\u822a\u680f\u533a\u57df\uff09\uff0cwindowHeight \u66f4\u9ad8\n'
'- MP \u539f\u59cb\u622a\u56fe\uff1a**645\u00d71224**\uff0c\u88c1\u526a\u53c2\u6570\uff1acrop(0, 96, 645, 1224) \u2192 645\u00d71128\n'
'- **\u6b64\u6a21\u5f0f\u9875\u9762**\uff1atask-detail, task-detail-callback, task-detail-priority, task-detail-relationship, coach-detail, customer-detail, performance, notes, chat, chat-history, customer-service-records, performance-records\n'
'\n'
'| \u53c2\u6570 | H5\uff08\u6240\u6709\u9875\u9762\uff09 | \u6a21\u5f0f A MP | \u6a21\u5f0f B MP |\n'
'|------|-----|-----|------|\n'
'| viewport \u5bbd | 430px | 430px | 430px |\n'
'| DPR | 1.5 | 1.5 | 1.5 |\n'
'| \u539f\u59cb\u622a\u56fe\u5c3a\u5bf8 | 645\u00d71128 | 645\u00d71128 | 645\u00d71224 |\n'
'| **\u5bf9\u6bd4\u57fa\u51c6\u5c3a\u5bf8** | **645\u00d71128** | **645\u00d71128\uff08\u65e0\u9700\u88c1\u526a\uff09** | **645\u00d71128\uff08\u88c1\u526a\u540e\uff09** |\n'
'| \u6eda\u52a8\u6b65\u957f | 600px | 600px | 600px |\n'
'| \u622a\u56fe\u683c\u5f0f | PNG | PNG | PNG |\n'
'\n'
'### 2.3 MP \u622a\u56fe\u88c1\u526a\uff08\u6a21\u5f0f B \u5fc5\u987b\u6267\u884c\uff09\n'
'\n'
'\u6a21\u5f0f B \u9875\u9762\u6bcf\u5f20 MP \u622a\u56fe\u540e\u7acb\u5373\u88c1\u526a\uff1a\n'
'\n'
'```python\n'
'from PIL import Image\n'
'img = Image.open(\'mp--step-N.png\') # 645x1224\n'
'img.crop((0, 96, 645, 1224)).save(\'mp--step-N.png\') # \u88c1\u526a\u540e 645x1128\n'
'```\n'
'\n'
'\u6216\u4f7f\u7528 anchor_compare.py\uff1a\n'
'```bash\n'
'uv run python scripts/ops/anchor_compare.py crop <mp_path> 0 96 645 1224\n'
'```\n'
'\n'
'\u6a21\u5f0f A \u9875\u9762\u65e0\u9700\u88c1\u526a\uff0cMP \u622a\u56fe\u76f4\u63a5\u7528\u4e8e\u5bf9\u6bd4\u3002\n'
'\n'
'> \u4ee5\u4e0a\u9ad8\u5ea6\u57fa\u4e8e 430px \u5bbd\u6a21\u62df\u5668\u5b9e\u6d4b\u3002step-0 \u622a\u56fe\u540e\u5148\u786e\u8ba4\u5b9e\u9645\u5c3a\u5bf8\uff0c\u4e0e\u4e0a\u8868\u4e0d\u7b26\u65f6\u4ee5\u5b9e\u6d4b\u4e3a\u51c6\u5e76\u66f4\u65b0\u672c\u8868\u3002\n'
'\n'
)
content = content[:idx_s] + new_block + content[idx_e:]
print(f'Section replaced. New length: {len(content)}')
with open(path, 'wb') as f:
f.write(content.encode('utf-8'))
print('Done')

29
scripts/ops/_patch2.py Normal file
View File

@@ -0,0 +1,29 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
# Find 5.4.3 end and insert 5.4.4 after it
marker = '### 5.5 \u957f\u9875\u9762\u7ea7\u8054\u5f71\u54cd'
idx = content.find(marker)
print('5.5 found at:', idx)
assert idx >= 0
insert = (
'\n### 5.4.4 \u6700\u5927\u91cd\u8bd5\u6b21\u6570\u9650\u5236\uff08\u5f3a\u5236\uff09\n'
'\n'
'\u6bcf\u4e2a\u5355\u5143\u6700\u591a\u5141\u8bb8 **5 \u8f6e\u4fee\u6b63\u5faa\u73af**\uff08\u6bcf\u8f6e\u53ef\u5305\u542b\u591a\u5904\u4fee\u6539\uff09\u3002'
'\u5224\u5b9a\u65b9\u5f0f\uff1a\u4fee\u6b63\u5faa\u73af\u6b21\u6570 \u22655 \u4e14\u5dee\u5f02\u7387\u4f9d\u7136 \u22655%\u65f6\uff0c\u6267\u884c\u4ee5\u4e0b\u6d41\u7a0b\uff1a\n'
'\n'
'1. \u5c06\u5f53\u524d\u6700\u4f73\u6208\u51b7\u5c06\u5dee\u5f02\u7387\u3001\u5269\u4f59\u504f\u5dee\u6e05\u5355\u8bb0\u5f55\u5230 `report.md` \u7684 `[\u8df3\u8fc7]` \u6761\u76ee\u4e0b\n'
'2. \u6807\u6ce8\uff1a`[\u8df3\u8fc7] \u5df2\u91cd\u8bd5 5 \u8f6e\uff0c\u6700\u7ec8\u5dee\u5f02\u7387 X.XX%\uff0c\u5269\u4f59\u504f\u5dee\u5c5e\u4e0d\u53ef\u6d88\u9664\u5dee\u5f02\uff08\u89c1 \u00a76\uff09\u6216\u8fc7\u4e8e\u590d\u6742`\n'
'3. \u8fd4\u56de\u4e3b\u4ee3\u7406\uff0c\u8bf4\u660e\u8df3\u8fc7\u539f\u56e0\n'
'4. \u4e3b\u4ee3\u7406\u6216\u8005\u7ee7\u7eed\u4e0b\u4e00\u4e2a\u5355\u5143\uff0c\u6216\u8005\u6307\u5b9a\u9879\u76ee\u5185\u5bb9\u91cd\u5199\u540e\u91cd\u8bd5\n'
'\n'
'> \u6ce8\uff1a5 \u8f6e\u91cd\u8bd5\u548c 5 \u5904\u4fee\u6b63\u4e0d\u540c\u3002\u5982\u679c\u5355\u8f6e\u4fee\u5900.5 \u5904\uff0c5 \u8f6e\u5171 25 \u5904\u4fee\u6b63\u540e\u4ecd\u65e0\u6cd5\u6536\u655b\uff0c\u624d\u89e6\u53d1\u8df3\u8fc7\u3002\n'
'\n'
)
content = content[:idx] + insert + content[idx:]
with open(path, 'wb') as f:
f.write(content.encode('utf-8'))
print('5.4.4 inserted. Done.')

View File

@@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
with open(path, 'rb') as f:
content = f.read().decode('utf-8')
changes = 0
# 1. Fix 镶点 -> 锚点 (\u9557 -> \u9576)
old_count = content.count('\u9557\u70b9')
content = content.replace('\u9557\u70b9', '\u9576\u70b9')
print(f'Fix \u9576\u70b9: {old_count} replacements')
changes += old_count
# 2. Replace the §二 screenshot params table
# Find the section by unique substring
marker_start = '## \u4e8c\u3001\u622a\u56fe\u53c2\u6570'
marker_end = '## \u4e09\u3001\u622a\u56fe\u64cd\u4f5c'
idx_start = content.find(marker_start)
idx_end = content.find(marker_end)
if idx_start >= 0 and idx_end > idx_start:
old_section = content[idx_start:idx_end]
new_section = '''## \u4e8c\u3001\u622a\u56fe\u53c2\u6570\uff08\u53cc\u7aef\u5bf9\u9f50\u57fa\u51c6\uff09
### 2.1 \u5173\u952e\u7ed3\u8bba\uff1aMP \u622a\u56fe\u4e0d\u542b\u72b6\u6001\u680f/\u6807\u9898\u680f/tabBar
`mcp_weixin_devtools_mcp_screenshot` \u622a\u53d6\u7684\u662f\u6a21\u62df\u5668**\u9875\u9762\u5185\u5bb9\u533a\u57df**\uff08WXML \u6e32\u67d3\u5185\u5bb9\uff09\uff0c\u4e0d\u542b\uff1a
- \u72b6\u6001\u680f\uff08statusBar\uff09
- \u7cfb\u7edf \u6807\u9898\u680f\uff08navigationBar\uff09
- \u539f\u751f tabBar\uff08\u5e95\u90e8\u5207\u6362\u680f\uff09
\u540c\u6837\uff0cH5 \u622a\u56fe\u4e5f\u5df2\u9690\u85cf `#bottomNav`\uff08H5 \u6a21\u62df tabBar\uff09\u548c `.safe-area-top`\uff08\u6a21\u62df\u72b6\u6001\u680f\u5360\u4f4d\uff09\u3002
**\u53cc\u7aef\u5bf9\u6bd4\u7684\u5747\u662f\u9875\u9762\u4e3b\u4f53\u5185\u5bb9\uff0c\u5bf9\u9f50\u57fa\u51c6\u4e00\u81f4\u3002**
### 2.2 \u5c3a\u5bf8\u53c2\u6570\u8868
| \u53c2\u6570 | H5 | MP | \u8bf4\u660e |
|------|-----|-----|------|
| viewport \u5bbd | 430px | 430px | H5 `width=430`\uff1bMP \u6a21\u62df\u5668\u9009\u5bbd430px \u673a\u578b |
| viewport \u9ad8 | **752px** | **834px**\uff08windowHeight\uff09 | \u4e0d\u4e00\u81f4\uff01MP \u622a\u56fe\u540e\u5fc5\u987b\u88c1\u526a |
| DPR | 1.5 | 1.5 | H5 `deviceScaleFactor:1.5`\uff1bMP \u539f\u751f DPR=1.5 |
| \u539f\u59cb\u622a\u56fe\u5c3a\u5bf8 | 645\u00d71128 | **645\u00d71251** | \u9ad8\u5ea6\u4e0d\u4e00\u81f4 |
| **\u5bf9\u6bd4\u57fa\u51c6\u5c3a\u5bf8** | **645\u00d71128** | **645\u00d71128**\uff08\u88c1\u526a\u540e\uff09 | \u50cf\u7d20\u5bf9\u6bd4\u7684\u5b9e\u9645\u5c3a\u5bf8 |
| \u6eda\u52a8\u6b65\u957f | 600px | 600px | \u56fa\u5b9a\u6b65\u957f\uff08\u903b\u8f91\u50cf\u7d20\uff09 |
| \u622a\u56fe\u683c\u5f0f | PNG | PNG | `scale:\'device\'` \u8f93\u51fa\u7269\u7406\u50cf\u7d20 |
### 2.3 MP \u622a\u56fe\u540e\u88c1\u526a\uff08\u5fc5\u987b\u6267\u884c\uff09
\u6bcf\u5f20 MP \u622a\u56fe\u540e\u7acb\u5373\u88c1\u526a\uff0c\u4f7f\u7528\u4ee5\u4e0b \u811a\u672c \uff08\u5df2\u5185\u7f6e\u4e8e image_compare MCP \u524d\u7f6e\u6b65\u9aa4\uff09\uff1a
```python
from PIL import Image
img = Image.open(\'mp--step-N.png\') # 645\u00d71251
img.crop((0, 0, 645, 1128)).save(\'mp--step-N.png\') # \u88c1\u526a\u540e 645\u00d71128
```
\u6216\u5728 MCP \u622a\u56fe\u8c03\u7528\u540e\u7acb\u5373\u8c03\u7528\uff1a
```
\u2192 mcp_image_compare_crop_image (\u82e5\u53ef\u7528)
\u6216\u76f4\u63a5\u8c03\u7528 anchor_compare.py crop \u5b50\u547d\u4ee4
```
'''
content = content[:idx_start] + new_section + content[idx_end:]
print('Patch: \u00a7\u4e8c section replaced')
changes += 1
else:
print(f'ERROR: section markers not found. start={idx_start} end={idx_end}')
# 3. Add MP crop step after screenshot in §3.2 step 3d
# Find the step 3d screenshot call and add crop after
old_3d = " 3d. \u622a\u56fe\r\n \u2192 mcp_weixin_devtools_mcp_screenshot\r\n path: \"docs/h5_ui/compare/<page>/mp--step-<scrollTop>.png\""
if old_3d not in content:
old_3d = old_3d.replace('\r\n', '\n')
if old_3d in content:
new_3d = old_3d + '''
3e. \u88c1\u526a MP \u622a\u56fe\u5230\u5bf9\u6bd4\u9ad8\u5ea6\uff08\u5fc5\u987b\u6267\u884c\uff09
MP \u622a\u56fe\u9ad8\u5ea6\u4e3a 1251px\uff08\u903b\u8f91 834px\uff09\uff0c\u9700\u88c1\u526a\u81f3 1128px\uff08\u903b\u8f91 752px\uff09\u4e0e H5 \u5bf9\u9f50:
\u2192 mcp_image_compare_crop_image
image_path: "docs/h5_ui/compare/<page>/mp--step-<scrollTop>.png"
x: 0
y: 0
width: 645
height: 1128
output_path: "docs/h5_ui/compare/<page>/mp--step-<scrollTop>.png"
(\u82e5 mcp \u4e0d\u652f\u6301 crop\uff0c\u6539\u7528 anchor_compare.py\uff1a)
uv run python scripts/ops/anchor_compare.py crop docs/h5_ui/compare/<page>/mp--step-<scrollTop>.png 0 0 645 1128
'''
content = content.replace(old_3d, new_3d)
print('Patch: step 3e crop added')
changes += 1
else:
print('Patch 3e: old_3d not found (CRLF issue), trying \\n only')
old_3d2 = " 3d. \u622a\u56fe\n \u2192 mcp_weixin_devtools_mcp_screenshot\n path: \"docs/h5_ui/compare/<page>/mp--step-<scrollTop>.png\""
if old_3d2 in content:
new_3d2 = old_3d2 + '\n\n 3e. \u88c1\u526a MP \u622a\u56fe\u5230\u5bf9\u6bd4\u9ad8\u5ea6\uff08\u5fc5\u987b\u6267\u884c\uff09\n MP \u622a\u56fe\u9ad8\u5ea6\u4e3a 1251px\uff08\u903b\u8f91 834px\uff09\uff0c\u9700\u88c1\u526a\u81f3 1128px\uff08\u903b\u8f91 752px\uff09\u4e0e H5 \u5bf9\u9f50\uff1a\n uv run python scripts/ops/anchor_compare.py crop <mp_path> 0 0 645 1128\n'
content = content.replace(old_3d2, new_3d2)
print('Patch 3e (lf): crop step added')
changes += 1
else:
print('3e not found either, skipping')
idx = content.find('3d.')
if idx >= 0:
print(repr(content[idx:idx+200]))
with open(path, 'wb') as f:
f.write(content.encode('utf-8'))
print(f'Total changes: {changes}. Done.')

29
scripts/ops/_replace.py Normal file
View File

@@ -0,0 +1,29 @@
import sys
old_file = sys.argv[1]
new_file = sys.argv[2]
target = sys.argv[3]
with open(old_file, 'rb') as f:
old = f.read().decode('utf-8').replace('\r\n', '\n').replace('\r', '\n')
with open(new_file, 'rb') as f:
new = f.read().decode('utf-8').replace('\r\n', '\n').replace('\r', '\n')
with open(target, 'rb') as f:
raw = f.read()
# detect line ending in target
crlf = b'\r\n' in raw
doc = raw.decode('utf-8').replace('\r\n', '\n').replace('\r', '\n')
if old not in doc:
print('ERROR: old_string not found in', target)
# show closest match
words = old[:30]
idx = doc.find(words[:20])
print('Closest at:', idx, repr(doc[idx:idx+60]) if idx >= 0 else 'not found')
sys.exit(1)
doc2 = doc.replace(old, new, 1)
if crlf:
doc2 = doc2.replace('\n', '\r\n')
with open(target, 'wb') as f:
f.write(doc2.encode('utf-8'))
print('OK replaced 1 occurrence in', target)

View File

@@ -0,0 +1,47 @@
"""批量运行剩余 AI 测试文件,收集结果汇总。"""
import subprocess
import sys
import time
TEST_FILES = [
"apps/backend/tests/test_ai_cache.py",
"apps/backend/tests/test_ai_chat.py",
"apps/backend/tests/test_ai_app2.py",
"apps/backend/tests/test_ai_apps_prompt.py",
"apps/backend/tests/test_ai_clue_writer.py",
"apps/backend/tests/test_ai_dispatcher.py",
"tests/test_p5_ai_integration_properties.py",
]
TIMEOUT = 300 # 5 分钟每个文件
results = []
for f in TEST_FILES:
print(f"\n{'='*60}")
print(f"Running: {f}")
print(f"{'='*60}", flush=True)
start = time.time()
try:
proc = subprocess.run(
[sys.executable, "-m", "pytest", f, "-v", "--tb=short", "-x"],
timeout=TIMEOUT,
)
elapsed = time.time() - start
results.append((f, proc.returncode, f"{elapsed:.1f}s"))
except subprocess.TimeoutExpired:
elapsed = time.time() - start
results.append((f, "TIMEOUT", f"{elapsed:.1f}s"))
print(f" >>> TIMEOUT after {elapsed:.1f}s")
except Exception as e:
elapsed = time.time() - start
results.append((f, f"ERROR: {e}", f"{elapsed:.1f}s"))
print(f"\n\n{'='*60}")
print("SUMMARY")
print(f"{'='*60}")
print(f"{'File':<55} {'Result':<10} {'Time'}")
print("-" * 80)
for f, rc, t in results:
short = f.split("/")[-1]
status = "PASS" if rc == 0 else ("TIMEOUT" if rc == "TIMEOUT" else f"FAIL(rc={rc})")
print(f"{short:<55} {status:<10} {t}")

37
scripts/ops/_split.py Normal file
View File

@@ -0,0 +1,37 @@
path = 'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md'
path_hist = 'c:/NeoZQYY/docs/h5_ui/compare/HISTORY.md'
with open(path, 'rb') as f:
lines = f.read().decode('utf-8').split('\n')
# Split at line 1439 (0-indexed 1438)
split_idx = 1438 # 0-indexed
playbook_lines = lines[:split_idx] # keep up to line 1438
history_lines = lines[split_idx:] # from line 1439 onward
# Add history file header
history_header = [
'# \u5386\u53f2\u63a8\u5bfc\u4e0e\u57fa\u51c6\u6d4b\u8bd5\u8bb0\u5f55',
'',
'\u672c\u6587\u4ef6\u5305\u542b H5 \u2192 \u5fae\u4fe1\u5c0f\u7a0b\u5e8f\u8fc1\u79fb\u9879\u76ee\u7684\u5386\u53f2\u63a8\u5bfc\u8fc7\u7a0b\u3001Benchmark \u5206\u6790\u3001\u65e7\u7248\u672c\u6362\u7b97\u7cfb\u6570\u8bb0\u5f55\u3002',
'\u4e0d\u5c5e\u4e8e\u6267\u884c\u624b\u518c\uff0c\u4ec5\u4f9b\u5386\u53f2\u53c2\u8003\u3002',
'',
]
history_content = '\n'.join(history_header + history_lines)
# Remove trailing whitespace from playbook
while playbook_lines and playbook_lines[-1].strip() == '':
playbook_lines.pop()
playbook_lines.append('') # single trailing newline
playbook_content = '\n'.join(playbook_lines)
# Write history file
with open(path_hist, 'wb') as f:
f.write(history_content.encode('utf-8'))
print(f'HISTORY.md: {len(history_lines)} lines')
# Write trimmed playbook
with open(path, 'wb') as f:
f.write(playbook_content.encode('utf-8'))
print(f'AGENT-PLAYBOOK.md: {len(playbook_lines)} lines')

41
scripts/ops/_verify.py Normal file
View File

@@ -0,0 +1,41 @@
import os
checks = []
# 1. Check all 5 doc files exist
for f in [
'c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md',
'c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md',
'c:/NeoZQYY/docs/h5_ui/compare/SPACING-AGENT.md',
'c:/NeoZQYY/docs/h5_ui/compare/PROGRESS.md',
'c:/NeoZQYY/docs/h5_ui/compare/CHANGELOG.md',
'c:/NeoZQYY/docs/h5_ui/compare/HISTORY.md',
]:
exists = os.path.exists(f)
size = os.path.getsize(f) if exists else 0
fname = f.split('/')[-1]
checks.append(('EXISTS' if exists else 'MISSING', fname, f'{size//1024}KB'))
# 2. Check key content in each exec doc
for path, needles, label in [
('c:/NeoZQYY/docs/h5_ui/compare/AGENT-PLAYBOOK.md', [
'PROGRESS.md', 'mcp_recompile', '5.2.5', 'diff \u56fe\u7279\u5f81',
'\u9547\u70b9\u5bf9\u9f50\u6cd5', 'crop', '645', '1128'
], 'PLAYBOOK'),
('c:/NeoZQYY/docs/h5_ui/compare/ORCHESTRATION-PLAN.md', [
'PROGRESS.md', '\u6bcf\u6b21\u4f1a\u8bdd\u5f00\u59cb', 'mcp_recompile', '5.2.5'
], 'ORCH'),
('c:/NeoZQYY/docs/h5_ui/compare/PROGRESS.md', [
'\u4e3b\u4ee3\u7406\u4f1a\u8bdd\u6062\u590d\u6d41\u7a0b', 'MCP \u5c31\u7eea\u68c0\u67e5', '\U0001f501 \u91cd\u5199\u4e2d'
], 'PROGRESS'),
]:
with open(path, 'rb') as f:
doc = f.read().decode('utf-8')
for needle in needles:
found = needle in doc
checks.append(('OK' if found else 'MISSING', label, needle[:30]))
all_ok = all(s == 'OK' or s == 'EXISTS' for s, *_ in checks)
for status, *rest in checks:
print(f'[{status}] {": ".join(rest)}')
print()
print('\u2705 Ready to launch' if all_ok else '\u26a0\ufe0f Issues found')

View File

@@ -0,0 +1,148 @@
"""
[一次性验证工具] 用 Playwright 实际模拟 600px 固定步长滚动,
记录每个页面实际能截几屏scrollTop 被 clamp 后的真实值)。
用法:
1. 先启动 Live ServerVS Code 右键 Open with Live Server端口 5500
2. python scripts/ops/_verify_step_counts.py
输出:每页的 scrollHeight、maxScrollTop、实际步数、scrollTop 序列。
"""
import asyncio
import json
from pathlib import Path
from playwright.async_api import async_playwright
BASE_URL = "http://127.0.0.1:5500/docs/h5_ui/pages"
VIEWPORT_W = 430
VIEWPORT_H = 752
DPR = 1.5
STEP = 600
PAGES = [
"board-finance", "board-coach", "board-customer",
"task-detail", "task-detail-callback", "task-detail-priority",
"task-detail-relationship", "coach-detail", "customer-detail",
"performance", "task-list", "my-profile",
"customer-service-records", "performance-records",
"chat", "chat-history", "notes",
]
# 维度数映射
DIMS = {"board-coach": 4, "board-customer": 8}
HIDE_FLOAT_JS = """
() => {
const nav = document.getElementById('bottomNav');
if (nav) nav.style.display = 'none';
document.querySelectorAll('.ai-float-btn-container').forEach(el => el.style.display = 'none');
document.documentElement.style.scrollbarWidth = 'none';
const s = document.createElement('style');
s.textContent = '::-webkit-scrollbar { display: none !important; }';
document.head.appendChild(s);
}
"""
EXPAND_ALL_JS = """
() => {
document.querySelectorAll('[onclick*="More"], [onclick*="expand"], [onclick*="toggle"]').forEach(b => b.click());
document.querySelectorAll('[id*="More"], [id*="more"]').forEach(el => {
el.classList.remove('hidden');
el.style.display = '';
});
}
"""
async def main():
results = []
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
context = await browser.new_context(
viewport={"width": VIEWPORT_W, "height": VIEWPORT_H},
device_scale_factor=DPR,
)
page = await context.new_page()
for name in PAGES:
url = f"{BASE_URL}/{name}.html"
try:
await page.goto(url, wait_until="load", timeout=15000)
await page.wait_for_timeout(2000)
await page.evaluate(HIDE_FLOAT_JS)
await page.evaluate(EXPAND_ALL_JS)
await page.wait_for_timeout(500)
# 获取页面高度
scroll_height = await page.evaluate("() => document.documentElement.scrollHeight")
max_scroll = scroll_height - VIEWPORT_H
# 模拟固定步长滚动,记录实际 scrollTop
actual_steps = []
target = 0
while True:
# 滚动到目标位置
await page.evaluate(f"window.scrollTo(0, {target})")
await page.wait_for_timeout(200)
# 读取实际 scrollTop
actual = await page.evaluate(
"() => window.pageYOffset || document.documentElement.scrollTop"
)
actual_steps.append({"target": target, "actual": round(actual)})
# 如果实际值已经到达 maxScroll 或者目标已超过 maxScroll结束
if actual >= max_scroll - 1 or target >= max_scroll:
break
target += STEP
dims = DIMS.get(name, 1)
units = len(actual_steps) * dims
info = {
"page": name,
"scrollHeight": scroll_height,
"maxScroll": round(max_scroll),
"steps": len(actual_steps),
"dims": dims,
"units": units,
"sequence": actual_steps,
}
results.append(info)
seq_str = ", ".join(str(s["actual"]) for s in actual_steps)
print(f" {name:35s} H={scroll_height:5d} max={max_scroll:5.0f}"
f" steps={len(actual_steps):2d} dims={dims} units={units:3d}"
f" seq=[{seq_str}]")
except Exception as e:
print(f"{name}: {e}")
results.append({"page": name, "error": str(e)})
await browser.close()
# 汇总
total = sum(r.get("units", 0) for r in results)
print(f"\n总对照处理单元数: {total}")
# 输出 JSON
out_path = Path(__file__).resolve().parents[2] / "export" / "SYSTEM" / "REPORTS" / "h5_page_heights" / "step_counts_verified.json"
out_path.parent.mkdir(parents=True, exist_ok=True)
with open(out_path, "w", encoding="utf-8") as f:
json.dump(results, f, ensure_ascii=False, indent=2)
print(f"详细数据已写入: {out_path}")
# Markdown 表格
print(f"\n| 页面 | scrollHeight | maxScroll | 步数 | 维度 | 单元 |")
print(f"|------|-------------|-----------|------|------|------|")
for r in results:
if "error" in r:
print(f"| {r['page']} | ❌ | | | | |")
else:
print(f"| {r['page']} | {r['scrollHeight']} | {r['maxScroll']} | {r['steps']} | {r['dims']} | {r['units']} |")
print(f"| **合计** | | | | | **{total}** |")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,755 +1,277 @@
"""
分区锚点对齐的长页面像素级对比脚本v2逐段截图方案)。
固定步长滚动截图辅助工具v3纯参数计算 + 文件管理)。
核心思路
H5 和 MP 两端都按 section 锚点逐段滚动 + 单屏截图,不再使用长截图裁剪。
两端视口高度统一为 430×752MP windowHeight截图自然 1:1 对齐。
H5 DPR=3 → 1290×2256 物理像素MP DPR=1.5 → 645×1128 → ×2 缩放到 1290×2256。
本脚本不执行截图或对比,仅提供
1. scrollTop 序列计算(给定 scrollHeight → 步数和序列)
2. 文件命名和目录结构管理
3. 差异率汇总报告生成
实际截图和对比通过 MCP 工具在对话中执行:
- H5 截图Playwright MCPbrowser_navigate → browser_evaluate → browser_take_screenshot
- MP 截图:微信 MCPnavigate_to → evaluate_script → screenshot
- 像素对比image_compare MCPcompare_images
用法:
# 第一步:提取 H5 锚点坐标 + 逐段截图(需要 Go Live 运行在 5500 端口)
python scripts/ops/anchor_compare.py extract-h5 board-finance
python scripts/ops/anchor_compare.py calc <scrollHeight> # 计算 scrollTop 序列
python scripts/ops/anchor_compare.py status [<page>] # 查看截图状态
python scripts/ops/anchor_compare.py report <page> # 生成/更新差异率报告
python scripts/ops/anchor_compare.py list # 列出所有页面及状态
# 第二步:生成 MP 截图指令AI 通过 MCP 工具手动执行)
python scripts/ops/anchor_compare.py mp-inst board-finance
# 第三步:逐区域对比
python scripts/ops/anchor_compare.py compare board-finance
# 一键执行(仅 H5 端 + 对比MP 截图需手动)
python scripts/ops/anchor_compare.py full board-finance
依赖:
pip install playwright Pillow
playwright install chromium
DPR 换算关系:
H5: viewport 430×752, DPR=3 → 截图 1290×2256
MP: viewport 430×752, DPR=1.5 → 截图 645×1128 → ×2 缩放到 1290×2256
截图参数(双端统一):
viewport 430×752, DPR=1.5, 输出 645×1128
步长 600px, maxScroll ≤ 10 视为单屏
"""
import asyncio
import io
import json
import math
import sys
import time
from pathlib import Path
from typing import Optional
from PIL import Image
def _ensure_utf8_stdio():
"""Windows 终端 GBK 编码兼容:强制 stdout/stderr 为 UTF-8,避免 emoji 输出报错
仅在 CLI 入口调用,不在模块导入时执行——否则与 Playwright asyncio 冲突导致空输出。
"""
"""Windows 终端 GBK 编码兼容:强制 stdout/stderr 为 UTF-8。"""
if sys.platform == "win32":
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")
sys.stdout = io.TextIOWrapper(
sys.stdout.buffer, encoding="utf-8", errors="replace"
)
sys.stderr = io.TextIOWrapper(
sys.stderr.buffer, encoding="utf-8", errors="replace"
)
# ─── 路径常量 ───
ROOT = Path(__file__).resolve().parents[2]
SCREENSHOTS_DIR = ROOT / "docs" / "h5_ui" / "screenshots"
ANCHORS_DIR = ROOT / "docs" / "h5_ui" / "anchors"
COMPARE_DIR = ROOT / "docs" / "h5_ui" / "compare"
H5_PAGES_DIR = ROOT / "docs" / "h5_ui" / "pages"
# 确保目录存在
SCREENSHOTS_DIR.mkdir(parents=True, exist_ok=True)
ANCHORS_DIR.mkdir(parents=True, exist_ok=True)
# ─── DPR / 尺寸常量 ───
H5_DPR = 3
MP_DPR = 1.5
# ─── 截图参数 ───
VIEWPORT_W = 430
VIEWPORT_H = 752 # MP windowHeight两端统一
TARGET_W = 1290 # 统一对比宽度 = 430 × 3
TARGET_H = 2256 # 统一对比高度 = 752 × 3
VIEWPORT_H = 752
DPR = 1.5
TARGET_W = 645 # 430 × 1.5
TARGET_H = 1128 # 752 × 1.5
STEP_PX = 600 # 固定步长(逻辑像素)
SINGLE_SCREEN_THRESHOLD = 10 # maxScroll ≤ 此值视为单屏
# H5 截图参数(与 screenshot_h5_pages.py 一致)
H5_BASE_URL = "http://127.0.0.1:5500/docs/h5_ui/pages"
TAILWIND_WAIT_MS = 2500 # Tailwind CDN JIT 渲染等待
SCROLLBAR_HIDE_JS = """
() => {
document.documentElement.style.overflow = 'auto';
document.documentElement.style.scrollbarWidth = 'none';
const s = document.createElement('style');
s.textContent = '::-webkit-scrollbar { display: none !important; }';
document.head.appendChild(s);
}
"""
# ═══════════════════════════════════════════════════════════
# 锚点配置:每个长页面的语义区域定义
# 页面清单(来自 design.md §52026-03-10 Playwright 实测)
# ═══════════════════════════════════════════════════════════
# 锚点选择器规则:
# - id 优先(如 #section-overview
# - 无 id 时用 CSS 选择器(如 .section-title.pink 的父容器)
# - 每个锚点指向区域的「顶部边界元素」
#
# sticky_selectors: 页面中 sticky/fixed 定位的元素选择器
# fixed_bottom_selectors: 底部 fixed 元素,截图时需要隐藏
# mp_scroll_mode: "scroll_into_view" 用于 scroll-view 页面
PAGE_ANCHORS: dict = {
"board-finance": {
"sticky_selectors": [".safe-area-top", "#filterBar"],
"fixed_bottom_selectors": [".ai-float-btn-container"],
"mp_scroll_mode": "scroll_into_view",
"anchors": [
{"selector": "#section-overview", "name": "经营一览", "scroll_into_view_id": "section-overview"},
{"selector": "#section-recharge", "name": "预收资产", "scroll_into_view_id": "section-recharge"},
{"selector": "#section-revenue", "name": "应计收入确认", "scroll_into_view_id": "section-revenue"},
{"selector": "#section-cashflow", "name": "现金流入", "scroll_into_view_id": "section-cashflow"},
{"selector": "#section-expense", "name": "现金流出", "scroll_into_view_id": "section-expense"},
{"selector": "#section-coach", "name": "助教分析", "scroll_into_view_id": "section-coach"},
],
},
"task-detail": {
"sticky_selectors": [],
"fixed_bottom_selectors": ["div[class*='fixed'][class*='bottom-0']", ".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section", "name": "客户信息Banner"},
{"selector": ".section-title.pink", "name": "与我的关系", "use_parent": True},
{"selector": ".section-title.orange", "name": "任务建议", "use_parent": True},
{"selector": ".section-title.green", "name": "维客线索", "use_parent": True},
],
},
"task-detail-callback": {
"sticky_selectors": [],
"fixed_bottom_selectors": ["div[class*='fixed'][class*='bottom-0']", ".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section", "name": "客户信息Banner"},
{"selector": ".section-title.pink", "name": "与我的关系", "use_parent": True},
{"selector": ".section-title.orange", "name": "任务建议", "use_parent": True},
{"selector": ".section-title.green", "name": "维客线索", "use_parent": True},
],
},
"task-detail-priority": {
"sticky_selectors": [],
"fixed_bottom_selectors": ["div[class*='fixed'][class*='bottom-0']", ".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section", "name": "客户信息Banner"},
{"selector": ".section-title.pink", "name": "与我的关系", "use_parent": True},
{"selector": ".section-title.orange", "name": "任务建议", "use_parent": True},
{"selector": ".section-title.green", "name": "维客线索", "use_parent": True},
],
},
"task-detail-relationship": {
"sticky_selectors": [],
"fixed_bottom_selectors": ["div[class*='fixed'][class*='bottom-0']", ".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section", "name": "客户信息Banner"},
{"selector": ".section-title.pink", "name": "与我的关系", "use_parent": True},
{"selector": ".section-title.orange", "name": "任务建议", "use_parent": True},
{"selector": ".section-title.green", "name": "维客线索", "use_parent": True},
],
},
"coach-detail": {
"sticky_selectors": [],
"fixed_bottom_selectors": ["div[class*='fixed'][class*='bottom-0']", ".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section, .coach-banner, header", "name": "助教信息Banner"},
{"selector": ".st.blue, [class*='perf']", "name": "绩效概览", "use_parent": True},
{"selector": ".st.green, [class*='income']", "name": "收入明细", "use_parent": True},
{"selector": ".st.purple, [class*='customer']", "name": "前10客户", "use_parent": True},
],
},
"customer-detail": {
"sticky_selectors": [],
"fixed_bottom_selectors": ["div[class*='fixed'][class*='bottom-0']", ".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section, header", "name": "客户信息Banner"},
{"selector": ".section-title, .card-section", "name": "消费习惯", "use_parent": True},
],
},
"performance": {
"sticky_selectors": [],
"fixed_bottom_selectors": [".ai-float-btn-container"],
"anchors": [
{"selector": ".banner-section, header, .perf-banner", "name": "业绩总览Banner"},
{"selector": ".perf-card, .card-section", "name": "本月业绩进度"},
],
},
"board-coach": {
"sticky_selectors": [".safe-area-top", "#filterBar"],
"fixed_bottom_selectors": ["#bottomNav", ".ai-float-btn-container"],
"anchors": [
{"selector": ".coach-card, .card-list, .dim-container.active", "name": "助教卡片列表"},
],
},
"board-customer": {
"sticky_selectors": [".safe-area-top", "#filterBar"],
"fixed_bottom_selectors": ["#bottomNav", ".ai-float-btn-container"],
"anchors": [
{"selector": ".customer-card, .card-list, .dim-container.active", "name": "客户卡片列表"},
],
},
PAGE_DATA: dict[str, dict] = {
"board-finance": {"scrollHeight": 5600, "maxScroll": 4848, "steps": 10, "dims": 1},
"board-coach": {"scrollHeight": 754, "maxScroll": 2, "steps": 1, "dims": 4},
"board-customer": {"scrollHeight": 752, "maxScroll": 0, "steps": 1, "dims": 8},
"task-detail": {"scrollHeight": 2995, "maxScroll": 2243, "steps": 5, "dims": 1},
"task-detail-callback": {"scrollHeight": 2397, "maxScroll": 1645, "steps": 4, "dims": 1},
"task-detail-priority": {"scrollHeight": 2389, "maxScroll": 1637, "steps": 4, "dims": 1},
"task-detail-relationship": {"scrollHeight": 2275,
"maxScroll": 1523, "steps": 4, "dims": 1},
"coach-detail": {"scrollHeight": 2918, "maxScroll": 2166, "steps": 5, "dims": 1},
"customer-detail": {"scrollHeight": 3070, "maxScroll": 2318, "steps": 5, "dims": 1},
"performance": {"scrollHeight": 7705, "maxScroll": 6953, "steps": 13, "dims": 1},
"task-list": {"scrollHeight": 1428, "maxScroll": 676, "steps": 3, "dims": 1},
"my-profile": {"scrollHeight": 752, "maxScroll": 0, "steps": 1, "dims": 1},
"customer-service-records": {"scrollHeight": 961, "maxScroll": 209, "steps": 2, "dims": 1},
"performance-records": {"scrollHeight": 2677, "maxScroll": 1925, "steps": 5, "dims": 1},
"chat": {"scrollHeight": 1061, "maxScroll": 309, "steps": 2, "dims": 1},
"chat-history": {"scrollHeight": 752, "maxScroll": 0, "steps": 1, "dims": 1},
"notes": {"scrollHeight": 1709, "maxScroll": 957, "steps": 3, "dims": 1},
}
# ═══════════════════════════════════════════════════════════
# 工具函数
# ═══════════════════════════════════════════════════════════
def load_anchor_data(page_name: str) -> Optional[dict]:
"""加载已保存的锚点坐标数据"""
path = ANCHORS_DIR / f"{page_name}.json"
if not path.exists():
return None
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
def save_anchor_data(page_name: str, data: dict) -> Path:
"""保存锚点坐标数据"""
path = ANCHORS_DIR / f"{page_name}.json"
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
print(f" 💾 锚点数据已保存: {path.relative_to(ROOT)}")
return path
def resize_to_width(img: Image.Image, target_w: int) -> Image.Image:
"""缩放图片到指定宽度,保持宽高比"""
if img.width == target_w:
return img.copy()
ratio = target_w / img.width
new_h = int(img.height * ratio)
return img.resize((target_w, new_h), Image.LANCZOS)
# board-coach 排序维度
BOARD_COACH_DIMS = ["perf", "salary", "sv", "task"]
# board-customer 客户维度
BOARD_CUSTOMER_DIMS = ["recall", "potential", "balance", "recharge",
"recent", "spend60", "freq60", "loyal"]
# ═══════════════════════════════════════════════════════════
# H5 端:提取锚点坐标 + 逐段截图
# 核心计算
# ═══════════════════════════════════════════════════════════
def build_h5_extract_js(anchors: list, sticky_selectors: list,
fixed_bottom_selectors: list) -> str:
"""构建提取锚点坐标的 JS 代码(仅获取坐标,不截图)"""
anchor_configs = json.dumps(anchors, ensure_ascii=False)
sticky_json = json.dumps(sticky_selectors)
fixed_json = json.dumps(fixed_bottom_selectors)
def compute_scroll_sequence(scroll_height: int, viewport_height: int = VIEWPORT_H) -> list[int]:
"""根据 scrollHeight 和 viewportHeight 计算固定步长的 scrollTop 序列。
return f"""
() => {{
const anchors = {anchor_configs};
const stickySelectors = {sticky_json};
const fixedSelectors = {fixed_json};
const pageHeight = Math.max(
document.body.scrollHeight,
document.documentElement.scrollHeight
);
const anchorPositions = [];
for (const anchor of anchors) {{
const selectors = anchor.selector.split(',').map(s => s.trim());
let el = null;
for (const sel of selectors) {{
el = document.querySelector(sel);
if (el) break;
}}
if (!el) {{
anchorPositions.push({{
name: anchor.name,
selector: anchor.selector,
found: false,
top: 0, bottom: 0, height: 0,
}});
continue;
}}
const target = anchor.use_parent ? el.parentElement : el;
const rect = target.getBoundingClientRect();
const scrollY = window.scrollY || window.pageYOffset;
anchorPositions.push({{
name: anchor.name,
selector: anchor.selector,
found: true,
top: rect.top + scrollY,
bottom: rect.bottom + scrollY,
height: rect.height,
}});
}}
let stickyTotalHeight = 0;
const stickyDetails = [];
for (const sel of stickySelectors) {{
const el = document.querySelector(sel);
if (el) {{
const rect = el.getBoundingClientRect();
stickyDetails.push({{ selector: sel, height: rect.height }});
stickyTotalHeight += rect.height;
}}
}}
let fixedBottomHeight = 0;
const fixedDetails = [];
for (const sel of fixedSelectors) {{
const el = document.querySelector(sel);
if (el) {{
const rect = el.getBoundingClientRect();
fixedDetails.push({{ selector: sel, height: rect.height }});
fixedBottomHeight += rect.height;
}}
}}
return {{
pageHeight,
viewportHeight: window.innerHeight,
anchorPositions,
stickyTotalHeight,
stickyDetails,
fixedBottomHeight,
fixedDetails,
}};
}}
规则design.md §2.2
maxScroll = scrollHeight - viewportHeight
maxScroll ≤ 10 → 单屏 [0]
N = floor(maxScroll / 600) + 1
序列0, 600, 1200, ... 最后一步 clamp 到 maxScroll
"""
max_scroll = scroll_height - viewport_height
if max_scroll <= SINGLE_SCREEN_THRESHOLD:
return [0]
sequence = []
for i in range(math.floor(max_scroll / STEP_PX) + 1):
target = i * STEP_PX
if target >= max_scroll:
target = max_scroll
sequence.append(target)
if sequence[-1] != max_scroll:
sequence.append(max_scroll)
return sequence
def build_scroll_to_anchor_js(anchor_top: float, sticky_height: float) -> str:
"""构建滚动到锚点位置的 JS 代码。
滚动目标:让 section 顶部紧贴 sticky 区域下方。
scrollTo = anchor_top - sticky_height
"""
scroll_y = max(0, anchor_top - sticky_height)
return f"""
() => {{
window.scrollTo({{ top: {scroll_y}, behavior: 'instant' }});
return {{ scrollY: window.scrollY, target: {scroll_y} }};
}}
"""
def page_output_dir(page_name: str, dimension: str | None = None) -> Path:
"""返回页面的截图输出目录。多维度页面按维度分子目录。"""
base = COMPARE_DIR / page_name
if dimension:
return base / dimension
return base
async def extract_h5_anchors(page_name: str) -> dict:
"""
用 Playwright 提取 H5 页面的锚点坐标,并逐段截图。
视口 430×752与 MP windowHeight 一致DPR=3 → 每张截图 1290×2256。
每个 section 滚动到锚点位置后截一屏(非 full_page
"""
from playwright.async_api import async_playwright
config = PAGE_ANCHORS.get(page_name)
if not config:
print(f"❌ 页面 '{page_name}' 未配置锚点,请在 PAGE_ANCHORS 中添加")
return {}
url = f"{H5_BASE_URL}/{page_name}.html"
html_path = H5_PAGES_DIR / f"{page_name}.html"
if not html_path.exists():
print(f"❌ H5 源文件不存在: {html_path.relative_to(ROOT)}")
return {}
print(f"\n{'='*60}")
print(f"📐 提取 H5 锚点坐标 + 逐段截图: {page_name}")
print(f" URL: {url}")
print(f" 视口: {VIEWPORT_W}×{VIEWPORT_H}, DPR={H5_DPR}")
print(f" 每张截图: {TARGET_W}×{TARGET_H}")
print(f"{'='*60}")
max_retries = 3
for attempt in range(1, max_retries + 1):
try:
async with async_playwright() as p:
browser = await p.chromium.launch(
headless=True,
args=["--hide-scrollbars"],
)
context = await browser.new_context(
viewport={"width": VIEWPORT_W, "height": VIEWPORT_H},
device_scale_factor=H5_DPR,
)
page = await context.new_page()
# 验证 DPR
dpr = await page.evaluate("() => window.devicePixelRatio")
assert dpr == H5_DPR, f"DPR 应为 {H5_DPR},实际为 {dpr}"
# 导航 + 等待渲染
await page.goto(url, wait_until="load", timeout=15000)
await page.wait_for_timeout(TAILWIND_WAIT_MS)
await page.evaluate(SCROLLBAR_HIDE_JS)
await page.wait_for_timeout(300)
# 提取锚点坐标
js_code = build_h5_extract_js(
config["anchors"],
config.get("sticky_selectors", []),
config.get("fixed_bottom_selectors", []),
)
result = await page.evaluate(js_code)
# 检查锚点是否全部找到
not_found = [a for a in result["anchorPositions"] if not a["found"]]
if not_found:
print(f"\n⚠️ 以下锚点未找到:")
for a in not_found:
print(f" - {a['name']}: {a['selector']}")
# 打印坐标信息
print(f"\n📊 页面总高度: {result['pageHeight']:.0f}px (逻辑)")
print(f" 视口高度: {result['viewportHeight']}px")
print(f" Sticky 总高度: {result['stickyTotalHeight']:.0f}px")
for a in result["anchorPositions"]:
status = "" if a["found"] else ""
print(f" {status} {a['name']}: top={a['top']:.0f}px")
# 逐段截图:滚动到每个锚点位置,截一屏
sticky_h = result["stickyTotalHeight"]
segments = []
for i, anchor in enumerate(result["anchorPositions"]):
if not anchor["found"]:
print(f"\n ⏭️ 跳过未找到的锚点: {anchor['name']}")
continue
# 滚动到锚点位置section 顶部紧贴 sticky 下方)
scroll_js = build_scroll_to_anchor_js(anchor["top"], sticky_h)
scroll_result = await page.evaluate(scroll_js)
await page.wait_for_timeout(500) # 等待滚动完成 + 渲染
# 截一屏(非 full_page
seg_path = SCREENSHOTS_DIR / f"h5-{page_name}--seg-{i}.png"
await page.screenshot(path=str(seg_path), full_page=False)
# 验证截图尺寸
img = Image.open(seg_path)
assert img.width == TARGET_W, (
f"截图宽度应为 {TARGET_W},实际为 {img.width}"
)
assert img.height == TARGET_H, (
f"截图高度应为 {TARGET_H},实际为 {img.height}"
)
segments.append({
"index": i,
"name": anchor["name"],
"scroll_y": scroll_result.get("scrollY", 0),
"screenshot": str(seg_path.relative_to(ROOT)),
"width": img.width,
"height": img.height,
})
print(f"\n 📸 段 {i} [{anchor['name']}]: "
f"scrollY={scroll_result.get('scrollY', 0):.0f}"
f"{seg_path.name} ({img.width}×{img.height})")
await browser.close()
# 组装结果
result["segments"] = segments
result["page_name"] = page_name
result["viewport"] = {"width": VIEWPORT_W, "height": VIEWPORT_H}
result["dpr"] = H5_DPR
result["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
save_anchor_data(page_name, result)
print(f"\n✅ 完成: {len(segments)} 段截图")
return result
except Exception as e:
print(f"\n⚠️ 第 {attempt}/{max_retries} 次尝试失败: {e}")
if attempt < max_retries:
wait_sec = attempt * 3
print(f" 等待 {wait_sec}s 后重试...")
await asyncio.sleep(wait_sec)
else:
print(f"❌ 提取失败,已重试 {max_retries}")
raise
return {}
def h5_url(page_name: str) -> str:
"""返回 H5 页面的 Live Server URL。"""
return f"{H5_BASE_URL}/{page_name}.html"
# ═══════════════════════════════════════════════════════════
# MP 端辅助:生成 MP 截图指
# CLI 命
# ═══════════════════════════════════════════════════════════
def generate_mp_capture_instructions(page_name: str) -> list[dict]:
"""
根据已保存的锚点数据,生成 MP 端的截图指令列表。
AI 通过 MCP 工具按指令逐步执行。
def cmd_calc(scroll_height: int):
"""计算 scrollTop 序列"""
seq = compute_scroll_sequence(scroll_height)
max_scroll = scroll_height - VIEWPORT_H
print(f"scrollHeight: {scroll_height}px")
print(f"viewportHeight: {VIEWPORT_H}px")
print(f"maxScroll: {max_scroll}px")
print(f"步数: {len(seq)}")
print(f"序列: {seq}")
v2 方案不再裁剪MP 截图即为一屏完整内容430×752 逻辑 = 645×1128 物理)。
两端截图从顶部自然对齐:
H5: safe-area-top(44.67) + filterBar(70) ≈ 114.67px
MP: board-tabs(45) + filter-bar(70) = 115px
差异 ~0.33px 可忽略。
返回格式:
[
{
"region_name": "经营一览",
"scroll_mode": "scroll_into_view",
"scroll_into_view_id": "section-overview",
"wait_ms": 800,
"screenshot_name": "mp-board-finance--seg-0.png",
"notes": "scroll-into-view → section-overview"
},
...
]
"""
data = load_anchor_data(page_name)
if not data:
print(f"❌ 未找到 {page_name} 的锚点数据,请先运行 extract-h5")
return []
def cmd_status(page_name: str | None = None):
"""查看截图状态"""
pages = [page_name] if page_name else list(PAGE_DATA.keys())
anchors = data.get("anchorPositions", [])
found_anchors = [a for a in anchors if a.get("found", False)]
print(f"\n{'页面':<30} {'步数':>4} {'维度':>4} {'H5':>4} {'MP':>4} {'diff':>4}")
print("-" * 60)
# 从 PAGE_ANCHORS 获取滚动模式和 scroll_into_view_id
page_config = PAGE_ANCHORS.get(page_name, {})
scroll_mode = page_config.get("mp_scroll_mode", "page_scroll")
anchors_config = page_config.get("anchors", [])
sticky_height = data.get("stickyTotalHeight", 0)
for name in pages:
data = PAGE_DATA.get(name)
if not data:
print(f" 未知页面: {name}")
continue
instructions = []
for i, anchor in enumerate(found_anchors):
inst = {
"region_index": i,
"region_name": anchor["name"],
"scroll_mode": scroll_mode,
"wait_ms": 800 if i == 0 else 1000,
"screenshot_name": f"mp-{page_name}--seg-{i}.png",
}
dims = data["dims"]
steps = data["steps"]
if scroll_mode == "scroll_into_view" and i < len(anchors_config):
view_id = anchors_config[i].get("scroll_into_view_id", "")
inst["scroll_into_view_id"] = view_id
inst["notes"] = f"scroll-into-view → {view_id}"
if dims > 1:
# 多维度页面:检查每个维度子目录
dim_list = BOARD_COACH_DIMS if name == "board-coach" else BOARD_CUSTOMER_DIMS
for dim in dim_list:
out_dir = page_output_dir(name, dim)
h5_count = len(list(out_dir.glob("h5--step-*.png"))) if out_dir.exists() else 0
mp_count = len(list(out_dir.glob("mp--step-*.png"))) if out_dir.exists() else 0
diff_count = len(list(out_dir.glob("diff--step-*.png"))) if out_dir.exists() else 0
print(f" {name}/{dim:<22} {steps:>4} {1:>4} {h5_count:>4} {mp_count:>4} {diff_count:>4}")
else:
# page_scroll 模式:滚动到 section 顶部 - sticky 高度
scroll_top = max(0, anchor["top"] - sticky_height)
inst["scroll_top"] = int(scroll_top)
inst["notes"] = f"scrollTop → {int(scroll_top)}px"
instructions.append(inst)
# 打印指令
print(f"\n{'='*60}")
print(f"📋 MP 截图指令: {page_name} ({len(instructions)} 段)")
print(f" 滚动模式: {scroll_mode}")
print(f" 每张截图: 645×1128 物理 (430×752 逻辑)")
print(f"{'='*60}")
for inst in instructions:
print(f"\n{inst['region_index']}: {inst['region_name']}")
if "scroll_into_view_id" in inst:
print(f" → scrollIntoView: {inst['scroll_into_view_id']}")
elif "scroll_top" in inst:
print(f" → scrollTop: {inst['scroll_top']}px")
print(f" → 等待: {inst['wait_ms']}ms")
print(f" → 截图: {inst['screenshot_name']}")
print(f"{inst['notes']}")
# 保存指令
inst_path = ANCHORS_DIR / f"{page_name}-mp-instructions.json"
with open(inst_path, "w", encoding="utf-8") as f:
json.dump(instructions, f, ensure_ascii=False, indent=2)
print(f"\n💾 指令已保存: {inst_path.relative_to(ROOT)}")
return instructions
out_dir = page_output_dir(name)
h5_count = len(list(out_dir.glob("h5--step-*.png"))) if out_dir.exists() else 0
mp_count = len(list(out_dir.glob("mp--step-*.png"))) if out_dir.exists() else 0
diff_count = len(list(out_dir.glob("diff--step-*.png"))) if out_dir.exists() else 0
print(f" {name:<30} {steps:>4} {dims:>4} {h5_count:>4} {mp_count:>4} {diff_count:>4}")
# ═══════════════════════════════════════════════════════════
# 对比:逐段截图 1:1 对比
# ═══════════════════════════════════════════════════════════
def compare_regions(page_name: str) -> list[dict]:
"""
逐段对比 H5 和 MP 截图。
H5 截图已是 1290×2256DPR=3MP 截图 645×1128 → ×2 缩放到 1290×2256。
两端截图从顶部自然对齐,直接输出配对图片供 pixelmatch 对比。
输出文件:
seg-h5-<page>-<i>.png — H5 区域截图1290px 宽,直接复制)
seg-mp-<page>-<i>.png — MP 区域截图(缩放到 1290px 宽)
"""
data = load_anchor_data(page_name)
if not data:
print(f"❌ 未找到 {page_name} 的锚点数据")
return []
segments = data.get("segments", [])
if not segments:
print(f"{page_name} 无有效段(可能需要重新运行 extract-h5")
return []
results = []
print(f"\n{'='*60}")
print(f"📊 逐段对比: {page_name} ({len(segments)} 段)")
print(f"{'='*60}")
for seg in segments:
i = seg["index"]
name = seg["name"]
# H5 段截图
h5_seg_path = SCREENSHOTS_DIR / f"h5-{page_name}--seg-{i}.png"
if not h5_seg_path.exists():
print(f"\n ❌ H5 段截图不存在: {h5_seg_path.name}")
results.append({"region": name, "index": i, "error": "H5 截图缺失"})
continue
# MP 段截图
mp_seg_path = SCREENSHOTS_DIR / f"mp-{page_name}--seg-{i}.png"
if not mp_seg_path.exists():
print(f"\n ❌ MP 段截图不存在: {mp_seg_path.name}")
print(f" 请通过 MCP 工具执行截图指令后重试")
results.append({"region": name, "index": i, "error": "MP 截图缺失"})
continue
h5_img = Image.open(h5_seg_path)
mp_img = Image.open(mp_seg_path)
print(f"\n{''*40}")
print(f"📦 段 {i}: {name}")
print(f" H5: {h5_img.width}×{h5_img.height}")
print(f" MP: {mp_img.width}×{mp_img.height}")
# H5 应该已经是 TARGET_W 宽
if h5_img.width != TARGET_W:
print(f" ⚠️ H5 宽度异常 {h5_img.width},期望 {TARGET_W}")
# MP 缩放到 TARGET_W×2
mp_scaled = resize_to_width(mp_img, TARGET_W)
print(f" MP 缩放后: {mp_scaled.width}×{mp_scaled.height}")
# 取两者较小高度作为对比高度(正常情况下应该相等)
compare_h = min(h5_img.height, mp_scaled.height)
if h5_img.height != mp_scaled.height:
print(f" ⚠️ 高度不一致: H5={h5_img.height}, MP={mp_scaled.height}")
print(f" 取较小值 {compare_h} 进行对比")
# 裁剪到统一高度
h5_final = h5_img.crop((0, 0, TARGET_W, compare_h))
mp_final = mp_scaled.crop((0, 0, TARGET_W, compare_h))
# 输出对比图对
h5_out = SCREENSHOTS_DIR / f"seg-h5-{page_name}-{i}.png"
mp_out = SCREENSHOTS_DIR / f"seg-mp-{page_name}-{i}.png"
h5_final.save(h5_out)
mp_final.save(mp_out)
print(f" ✅ 对比尺寸: {TARGET_W}×{compare_h}")
print(f"{h5_out.name}")
print(f"{mp_out.name}")
results.append({
"region": name,
"index": i,
"h5_path": str(h5_out.relative_to(ROOT)),
"mp_path": str(mp_out.relative_to(ROOT)),
"width": TARGET_W,
"height": compare_h,
"h5_original": f"{h5_img.width}×{h5_img.height}",
"mp_original": f"{mp_img.width}×{mp_img.height}",
"mp_scaled": f"{mp_scaled.width}×{mp_scaled.height}",
})
# 汇总
ok_count = sum(1 for r in results if "error" not in r)
err_count = sum(1 for r in results if "error" in r)
print(f"\n{'='*60}")
print(f"📊 对比准备完成: {page_name}")
print(f" ✅ 成功: {ok_count}")
if err_count:
print(f" ❌ 失败: {err_count}")
print(f"\n 使用 mcp_image_compare_compare_images 逐段对比:")
for r in results:
if "error" not in r:
print(f" → seg-h5-{page_name}-{r['index']}.png vs "
f"seg-mp-{page_name}-{r['index']}.png")
return results
def cmd_list():
"""列出所有页面及参数"""
total_units = 0
print(f"\n{'#':>2} {'页面':<30} {'scrollH':>7} {'maxScr':>7} {'步数':>4} {'维度':>4} {'单元':>4}")
print("-" * 70)
for i, (name, data) in enumerate(PAGE_DATA.items(), 1):
units = data["steps"] * data["dims"]
total_units += units
seq = compute_scroll_sequence(data["scrollHeight"])
print(f"{i:>2} {name:<30} {data['scrollHeight']:>7} {data['maxScroll']:>7} "
f"{data['steps']:>4} {data['dims']:>4} {units:>4}")
print("-" * 70)
print(f" {'合计':<30} {'':>7} {'':>7} {'':>4} {'':>4} {total_units:>4}")
# ═══════════════════════════════════════════════════════════
# CLI 入口
# ═══════════════════════════════════════════════════════════
def cmd_report(page_name: str):
"""读取已有 diff 图的差异率数据,生成/更新 report.md"""
out_dir = page_output_dir(page_name)
report_path = out_dir / "report.md"
if not out_dir.exists():
print(f"目录不存在: {out_dir.relative_to(ROOT)}")
return
# 查找 diff 图
diff_files = sorted(out_dir.glob("diff--step-*.png"))
if not diff_files:
print(f"未找到 diff 图: {out_dir.relative_to(ROOT)}/diff--step-*.png")
return
print(f"\n差异率报告: {page_name}")
print(f"diff 图数量: {len(diff_files)}")
print(f"报告路径: {report_path.relative_to(ROOT)}")
print(f"\n注意:差异率数据需要从 image_compare MCP 的输出中手动填入")
def print_usage():
print("""
用法:
python scripts/ops/anchor_compare.py <command> <page_name>
python scripts/ops/anchor_compare.py <command> [args]
命令:
extract-h5 <page> 提取 H5 锚点坐标 + 逐段截图(需 Go Live 5500
mp-inst <page> 生成 MP 截图指令(基于已有锚点数据
compare <page> 逐段对比,输出配对图片
full <page> 一键执行 extract-h5 + mp-inst + compare
list 列出所有已配置锚点的页面
calc <scrollHeight> 计算 scrollTop 序列
status [<page>] 查看截图状态(不指定页面则显示全部
report <page> 生成差异率报告框架
list 列出所有页面及参数
v2 方案:两端都按 section 逐段截图(视口 430×752不再使用长截图裁剪。
截图和对比通过 MCP 工具在对话中执行:
H5 截图 → Playwright MCP
MP 截图 → 微信开发者工具 MCP
像素对比 → image_compare MCP
示例:
python scripts/ops/anchor_compare.py extract-h5 board-finance
python scripts/ops/anchor_compare.py mp-inst board-finance
python scripts/ops/anchor_compare.py compare board-finance
python scripts/ops/anchor_compare.py calc 5600
python scripts/ops/anchor_compare.py status board-finance
python scripts/ops/anchor_compare.py list
""")
def cmd_list():
"""列出所有已配置锚点的页面"""
print("\n已配置锚点的页面:")
for name, config in PAGE_ANCHORS.items():
n_anchors = len(config["anchors"])
n_sticky = len(config.get("sticky_selectors", []))
n_fixed = len(config.get("fixed_bottom_selectors", []))
scroll_mode = config.get("mp_scroll_mode", "page_scroll")
has_data = "" if (ANCHORS_DIR / f"{name}.json").exists() else " "
print(f" {has_data} {name}: {n_anchors} 锚点, "
f"{n_sticky} sticky, {n_fixed} fixed, {scroll_mode}")
def main():
_ensure_utf8_stdio()
async def async_main():
if len(sys.argv) < 2:
print_usage()
sys.exit(1)
command = sys.argv[1]
if command == "list":
cmd_list()
if command in ("--help", "-h"):
print_usage()
return
if len(sys.argv) < 3:
print(f"❌ 缺少 page_name 参数")
print_usage()
sys.exit(1)
page_name = sys.argv[2]
if page_name not in PAGE_ANCHORS:
print(f"❌ 页面 '{page_name}' 未配置锚点")
print(f" 已配置的页面: {', '.join(PAGE_ANCHORS.keys())}")
sys.exit(1)
if command == "extract-h5":
await extract_h5_anchors(page_name)
elif command == "mp-inst":
generate_mp_capture_instructions(page_name)
elif command == "compare":
compare_regions(page_name)
elif command == "full":
print(f"\n🚀 一键执行: {page_name}")
print(f" Step 1/3: 提取 H5 锚点 + 逐段截图...")
await extract_h5_anchors(page_name)
print(f"\n Step 2/3: 生成 MP 截图指令...")
generate_mp_capture_instructions(page_name)
print(f"\n Step 3/3: 逐段对比...")
compare_regions(page_name)
if command == "list":
cmd_list()
elif command == "calc":
if len(sys.argv) < 3:
print("缺少 scrollHeight 参数")
sys.exit(1)
cmd_calc(int(sys.argv[2]))
elif command == "status":
page = sys.argv[2] if len(sys.argv) >= 3 else None
cmd_status(page)
elif command == "report":
if len(sys.argv) < 3:
print("缺少 page 参数")
sys.exit(1)
cmd_report(sys.argv[2])
else:
print(f"未知命令: {command}")
print(f"未知命令: {command}")
print_usage()
sys.exit(1)
def main():
_ensure_utf8_stdio()
asyncio.run(async_main())
if __name__ == "__main__":
main()

View File

@@ -51,7 +51,7 @@ cash_online AS (
FROM day_label dl
LEFT JOIN dwd.dwd_settlement_head h
ON h.create_time >= dl.day_start AND h.create_time < dl.day_end
AND h.settle_type = 1
AND h.settle_type IN (1, 3)
GROUP BY dl.day_date
),
recharge AS (
@@ -79,7 +79,7 @@ member_guest AS (
FROM day_label dl
LEFT JOIN dwd.dwd_settlement_head h
ON h.create_time >= dl.day_start AND h.create_time < dl.day_end
AND h.settle_type = 1
AND h.settle_type IN (1, 3)
GROUP BY dl.day_date
),
new_member AS (

View File

@@ -140,6 +140,20 @@ def trunc(s, n=3000) -> str:
return str(s)
return s if len(s) <= n else s[:n] + f"\n... [截断,原文共 {len(s)} 字符]"
import re as _re
_SURROGATE_RE = _re.compile(r'[\ud800-\udfff]')
def _sanitize_surrogates(obj):
"""递归清洗 dict/list/str 中的 surrogate 字符,替换为 U+FFFD。"""
if isinstance(obj, str):
return _SURROGATE_RE.sub('\ufffd', obj)
if isinstance(obj, dict):
return {_sanitize_surrogates(k): _sanitize_surrogates(v) for k, v in obj.items()}
if isinstance(obj, list):
return [_sanitize_surrogates(i) for i in obj]
return obj
def safe_json(obj, n=5000) -> str:
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
@@ -1624,8 +1638,11 @@ def extract_single_execution(
return None
try:
with open(log_path, "r", encoding="utf-8") as f:
log = json.load(f)
with open(log_path, "rb") as f:
raw = f.read()
# 清洗 surrogate 字符Kiro log 中 emoji 可能被存为 surrogate pair
text = raw.decode("utf-8", errors="surrogatepass").encode("utf-8", errors="replace").decode("utf-8")
log = _sanitize_surrogates(json.loads(text))
except Exception:
return None
@@ -1860,6 +1877,9 @@ def extract_latest(global_storage: Optional[str] = None, workspace_path: Optiona
force=True, # 强制覆盖
)
if result:
# 逐条持久化,避免中途超时导致下次重复处理
save_index(index)
save_full_index(full_index)
print(f"[session-extract] completed partial: {result}")
# ── 步骤 2提取未索引的终态 execution ──
@@ -1888,9 +1908,6 @@ def extract_latest(global_storage: Optional[str] = None, workspace_path: Optiona
ready.append(r_exec)
if not ready:
if partial_eids:
save_index(index)
save_full_index(full_index)
return
# agent_on_stop 场景下限制单次处理量,避免处理数千个历史 execution
@@ -1912,39 +1929,66 @@ def extract_latest(global_storage: Optional[str] = None, workspace_path: Optiona
for cid in chat_groups:
chat_groups[cid].sort(key=lambda x: x.get("startTime", 0))
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
lock = threading.Lock()
extracted_count = 0
tombstone_count = 0
for cid, group_execs in chat_groups.items():
def _extract_group(group_execs: list[dict]) -> tuple[list[str], dict, dict]:
"""串行提取同一 chatSession 的所有 execution返回 (results, idx_entries, full_entries)。"""
local_index: dict = {"version": 2, "entries": {}}
local_full: dict = {"version": 2, "entries": {}}
results: list[str] = []
for execution in group_execs:
eid = execution.get("executionId", "")
result = extract_single_execution(
agent_dir=agent_dir,
hex_dir=execution["_hex_dir"],
execution=execution,
session_dir=session_dir,
index=index,
full_index=full_index,
)
if result:
# 如果是 partial 提取,在索引中标记 status
if execution.get("_is_partial"):
if eid in index.get("entries", {}):
index["entries"][eid]["status"] = "partial"
if full_index and eid in full_index.get("entries", {}):
full_index["entries"][eid]["status"] = "partial"
extracted_count += 1
print(f"[session-extract] extracted: {result}")
elif eid and index.get("entries", {}).get(eid, {}).get("no_log"):
# tombstone 被写入,需要保存索引
tombstone_count += 1
try:
result = extract_single_execution(
agent_dir=agent_dir,
hex_dir=execution["_hex_dir"],
execution=execution,
session_dir=session_dir,
index=local_index,
full_index=local_full,
)
if result:
if execution.get("_is_partial"):
if eid in local_index.get("entries", {}):
local_index["entries"][eid]["status"] = "partial"
if eid in local_full.get("entries", {}):
local_full["entries"][eid]["status"] = "partial"
results.append(result)
except Exception as exc:
print(f"[session-extract] ✗ {eid[:8]}: {exc}")
return results, local_index.get("entries", {}), local_full.get("entries", {})
if extracted_count > 0 or partial_eids or tombstone_count > 0:
save_index(index)
save_full_index(full_index)
if extracted_count > 1:
print(f"[session-extract] total: {extracted_count} executions")
if tombstone_count > 0:
print(f"[session-extract] tombstoned: {tombstone_count} (no log found)")
workers = min(4, len(chat_groups))
with ThreadPoolExecutor(max_workers=workers) as pool:
futures = {pool.submit(_extract_group, execs): cid
for cid, execs in chat_groups.items()}
for future in as_completed(futures):
results, idx_entries, full_entries = future.result()
with lock:
# 合并到主索引
index.setdefault("entries", {}).update(idx_entries)
if full_index is not None:
full_index.setdefault("entries", {}).update(full_entries)
extracted_count += len(results)
tombstone_count += sum(
1 for ent in idx_entries.values() if ent.get("no_log")
)
# 逐组持久化,避免中途超时导致下次重复处理
if idx_entries:
save_index(index)
save_full_index(full_index)
for r in results:
print(f"[session-extract] extracted: {r}")
if extracted_count > 1:
print(f"[session-extract] total: {extracted_count} executions")
if tombstone_count > 0:
print(f"[session-extract] tombstoned: {tombstone_count} (no log found)")
def extract_all_unindexed(

View File

@@ -0,0 +1,189 @@
"""
字段消失扫描器:检测 DWD 表中字段值从某天起突然全部为空的异常
判定条件:连续 ≥3 天 且 连续空记录 ≥20 条
报告类型:
- ONGOING从某天起至今持续为空如 DQ-6 member_phone
- RECOVERED中途消失后又恢复
输出:终端 + CSV → export/SYSTEM/REPORTS/field_scan/
"""
import os
import csv
from datetime import date, timedelta
from dataclasses import dataclass
from dotenv import load_dotenv
load_dotenv()
PG_DSN = os.environ.get("TEST_DB_DSN") or os.environ.get("PG_DSN")
if not PG_DSN:
raise RuntimeError("TEST_DB_DSN / PG_DSN 未配置")
SYSTEM_ANALYZE_ROOT = os.environ.get("SYSTEM_ANALYZE_ROOT")
if not SYSTEM_ANALYZE_ROOT:
raise RuntimeError("SYSTEM_ANALYZE_ROOT 未配置")
import psycopg2
# ── 扫描配置 ──────────────────────────────────────────────
# (schema.table, time_column, field, filter_sql)
# filter_sql 用于限定有意义的行(如只看会员订单)
SCAN_TARGETS = [
("dwd.dwd_settlement_head", "pay_time", "member_phone",
"settle_type IN (1,3) AND member_id IS NOT NULL AND member_id != 0"),
("dwd.dwd_settlement_head", "pay_time", "member_name",
"settle_type IN (1,3) AND member_id IS NOT NULL AND member_id != 0"),
("dwd.dwd_settlement_head", "pay_time", "member_card_type_name",
"settle_type IN (1,3) AND member_id IS NOT NULL AND member_id != 0"),
("dwd.dwd_settlement_head", "pay_time", "is_bind_member",
"settle_type IN (1,3) AND member_id IS NOT NULL AND member_id != 0"),
]
# 阈值
MIN_CONSECUTIVE_DAYS = 3
MIN_CONSECUTIVE_ROWS = 20
@dataclass
class Gap:
"""一段字段消失区间"""
table: str
field: str
start_date: date
end_date: date # 最后一个空日期
days: int
total_rows: int # 区间内总行数
null_rows: int # 区间内空行数
recovered: bool # 后面是否恢复了
def build_daily_sql(table: str, time_col: str, field: str, filter_sql: str) -> str:
"""生成按天统计非空率的 SQL直接分组不用 generate_series"""
where = f"WHERE {filter_sql}" if filter_sql else ""
return f"""
SELECT
{time_col}::date AS day,
COUNT(*) AS total,
COUNT(CASE
WHEN {field} IS NOT NULL
AND {field}::text != ''
AND {field}::text != '0'
THEN 1
END) AS non_null
FROM {table}
{where}
GROUP BY {time_col}::date
HAVING COUNT(*) > 0
ORDER BY day
"""
def detect_gaps(daily_stats: list[tuple[date, int, int]],
table: str, field: str) -> list[Gap]:
"""从每日统计中检测连续全空段"""
gaps = []
in_gap = False
gap_start = None
gap_rows = 0
gap_null = 0
gap_days = 0
for day, total, non_null in daily_stats:
is_empty = (non_null == 0)
if is_empty:
if not in_gap:
in_gap = True
gap_start = day
gap_rows = 0
gap_null = 0
gap_days = 0
gap_days += 1
gap_rows += total
gap_null += total
else:
if in_gap:
# 空段结束,检查是否达到阈值
if gap_days >= MIN_CONSECUTIVE_DAYS and gap_null >= MIN_CONSECUTIVE_ROWS:
gaps.append(Gap(
table=table, field=field,
start_date=gap_start,
end_date=day - timedelta(days=1),
days=gap_days, total_rows=gap_rows,
null_rows=gap_null, recovered=True
))
in_gap = False
# 如果到最后仍在空段中
if in_gap and gap_days >= MIN_CONSECUTIVE_DAYS and gap_null >= MIN_CONSECUTIVE_ROWS:
last_day = daily_stats[-1][0]
gaps.append(Gap(
table=table, field=field,
start_date=gap_start,
end_date=last_day,
days=gap_days, total_rows=gap_rows,
null_rows=gap_null, recovered=False
))
return gaps
def run_scan():
all_gaps: list[Gap] = []
with psycopg2.connect(PG_DSN, connect_timeout=15,
options="-c statement_timeout=120000") as conn:
with conn.cursor() as cur:
for table, time_col, field, filter_sql in SCAN_TARGETS:
print(f"扫描 {table}.{field} ...")
sql = build_daily_sql(table, time_col, field, filter_sql)
cur.execute(sql)
rows = cur.fetchall()
if not rows:
print(f" ⏭️ 无数据")
continue
gaps = detect_gaps(rows, table, field)
if gaps:
for g in gaps:
status = "🔴 ONGOING" if not g.recovered else "🟡 RECOVERED"
print(f" {status} {g.field}: {g.start_date}{g.end_date} "
f"({g.days}天, {g.null_rows}条全空)")
all_gaps.extend(gaps)
else:
print(f" ✅ 无异常")
# 输出报告
if not all_gaps:
print("\n✅ 所有字段正常,未发现消失段")
return
report_dir = os.path.join(os.path.dirname(SYSTEM_ANALYZE_ROOT), "field_scan")
os.makedirs(report_dir, exist_ok=True)
csv_path = os.path.join(report_dir, "field_disappearance_report.csv")
with open(csv_path, "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerow(["", "字段", "状态", "消失起始日", "消失结束日",
"持续天数", "区间总行数", "空行数"])
for g in all_gaps:
writer.writerow([
g.table, g.field,
"ONGOING" if not g.recovered else "RECOVERED",
g.start_date, g.end_date,
g.days, g.total_rows, g.null_rows
])
print(f"\n📊 发现 {len(all_gaps)} 个字段消失段")
print(f" 报告已生成: {csv_path}")
# 终端汇总
print(f"\n{'='*90}")
print(f"{'':<35} {'字段':<20} {'状态':<12} {'起始':<12} {'结束':<12} {'天数':>5} {'空行':>6}")
print(f"{'='*90}")
for g in all_gaps:
status = "ONGOING" if not g.recovered else "RECOVERED"
print(f"{g.table:<35} {g.field:<20} {status:<12} "
f"{str(g.start_date):<12} {str(g.end_date):<12} {g.days:>5} {g.null_rows:>6}")
if __name__ == "__main__":
run_scan()

View File

@@ -0,0 +1,189 @@
"""
board-finance WXSS rpx 修正脚本
基于 H5 vs MP 逐选择器审计结果,精确替换 23 处差异。
审计报告docs/reports/board-finance-h5-mp-audit.md
转换公式rpx = CSS_px × (750/412) ≈ px × 1.8204
注意2026-03-14 的 23 处修正已通过 IDE strReplace 直接应用。
本脚本保留作为修正记录和回滚参考。再次运行会因 old_text 不匹配而跳过。
"""
import pathlib
import sys
import os
def ensure_repo_root() -> None:
"""确保 cwd 为仓库根目录(含 pyproject.toml"""
root_marker = pathlib.Path("pyproject.toml")
if not root_marker.exists():
print(f"❌ 当前目录 {os.getcwd()} 不是仓库根目录,请在 C:\\NeoZQYY 下运行")
sys.exit(1)
ensure_repo_root()
WXSS_PATH = pathlib.Path(
"apps/miniprogram/miniprogram/pages/board-finance/board-finance.wxss"
)
# (old_text, new_text, description)
REPLACEMENTS: list[tuple[str, str, str]] = [
# --- #1 .filter-bar top: 78rpx → 80rpx (44px × 1.8204 = 80.10) ---
(
"top: 78rpx;",
"top: 80rpx;",
"#1 .filter-bar top: 44px → 80rpx",
),
# --- #2 .card-section border-radius: 30rpx → 14rpx (8px rounded-lg) ---
# card-section 有 margin 和 border-radius 在同一块
(
" border-radius: 30rpx;\n overflow: visible;\n border: 2rpx solid #eeeeee;",
" border-radius: 14rpx;\n overflow: visible;\n border: 2rpx solid #eeeeee;",
"#2 .card-section border-radius: 8px → 14rpx",
),
# --- #3 .card-header-dark padding: 24rpx → 26rpx (14px × 1.8204 = 25.49 → 26) ---
(
" gap: 22rpx;\n padding: 24rpx 30rpx;\n background: #1a1a1a;",
" gap: 22rpx;\n padding: 26rpx 30rpx;\n background: #1a1a1a;",
"#3 .card-header-dark padding-TB: 14px → 26rpx",
),
# --- #4 .card-header-emoji font-size: 32rpx → 33rpx (18px × 1.8204 = 32.77 → 33) ---
(
".card-header-emoji {\n font-size: 32rpx;",
".card-header-emoji {\n font-size: 33rpx;",
"#4 .card-header-emoji font-size: 18px → 33rpx",
),
# --- #5 .ai-insight-section margin: -2rpx → -30rpx (16px × 1.8204 = 29.13 → 30) ---
(
" margin: 30rpx -2rpx -2rpx -2rpx;",
" margin: 30rpx -30rpx -30rpx -30rpx;",
"#5 .ai-insight-section margin: 16px -16px → 30rpx -30rpx",
),
# --- #6 .ai-insight-icon width/height: 42rpx → 44rpx (24px × 1.8204 = 43.69 → 44) ---
(
".ai-insight-icon {\n width: 42rpx;\n height: 42rpx;",
".ai-insight-icon {\n width: 44rpx;\n height: 44rpx;",
"#6 .ai-insight-icon w/h: 24px → 44rpx",
),
# --- #7 .ai-insight-icon-img width/height: 32rpx → 33rpx (18px SVG) ---
(
".ai-insight-icon-img {\n width: 32rpx;\n height: 32rpx;",
".ai-insight-icon-img {\n width: 33rpx;\n height: 33rpx;",
"#7 .ai-insight-icon-img w/h: 18px → 33rpx",
),
# --- #8 .ai-insight-title font-size: 22rpx → 24rpx (13px × 1.8204 = 23.67 → 24) ---
(
".ai-insight-title {\n font-size: 22rpx;\n line-height: 29rpx;",
".ai-insight-title {\n font-size: 24rpx;\n line-height: 29rpx;",
"#8 .ai-insight-title font-size: 13px → 24rpx",
),
# --- #9 .card-section-title font-size: 22rpx → 24rpx (13px × 1.8204 = 23.67 → 24) ---
(
".card-section-title {\n font-size: 22rpx;\n line-height: 29rpx;\n font-weight: 600;",
".card-section-title {\n font-size: 24rpx;\n line-height: 29rpx;\n font-weight: 600;",
"#9 .card-section-title font-size: 13px → 24rpx",
),
# --- #10 .card-header-light border-radius: 30rpx 30rpx 0 0 → 14rpx 14rpx 0 0 ---
(
" border-radius: 30rpx 30rpx 0 0;",
" border-radius: 14rpx 14rpx 0 0;",
"#10 .card-header-light border-radius: 8px top → 14rpx",
),
# --- #11 .table-bordered border-radius: 30rpx → 14rpx ---
(
".table-bordered {\n border: 2rpx solid #e7e7e7;\n border-radius: 30rpx;",
".table-bordered {\n border: 2rpx solid #e7e7e7;\n border-radius: 14rpx;",
"#11 .table-bordered border-radius: 8px → 14rpx",
),
# --- #12 .total-balance-row border-radius: 30rpx → 14rpx ---
(
" background: #f0f0f0;\n border-radius: 30rpx;\n border: 2rpx solid #e7e7e7;\n}\n\n.total-balance-left",
" background: #f0f0f0;\n border-radius: 14rpx;\n border: 2rpx solid #e7e7e7;\n}\n\n.total-balance-left",
"#12 .total-balance-row border-radius → 14rpx",
),
# --- #13 .flow-item padding: 14rpx → 18rpx 0 (10px × 1.8204 = 18.20 → 18) ---
(
".flow-item {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 14rpx;",
".flow-item {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 18rpx 0;",
"#13 .flow-item padding: 10px 0 → 18rpx 0",
),
# --- #14 .flow-sum-row border-radius: 30rpx → 14rpx ---
(
" border-radius: 30rpx;\n border-top: 2rpx solid #e7e7e7;\n}\n\n/* CHANGE 2026-03-13 | intent: 校对 H5 flow-sum-label",
" border-radius: 14rpx;\n border-top: 2rpx solid #e7e7e7;\n}\n\n/* CHANGE 2026-03-13 | intent: 校对 H5 flow-sum-label",
"#14 .flow-sum-row border-radius → 14rpx",
),
# --- #15 .expense-cell border-radius: 30rpx → 14rpx ---
(
".expense-cell {\n background: #fafafa;\n border: 2rpx solid #e7e7e7;\n border-radius: 30rpx;",
".expense-cell {\n background: #fafafa;\n border: 2rpx solid #e7e7e7;\n border-radius: 14rpx;",
"#15 .expense-cell border-radius → 14rpx",
),
# --- #16 .sticky-section-header border-radius: 0 10rpx 10rpx 0 → 0 11rpx 11rpx 0 (6px × 1.8204 = 10.92 → 11) ---
(
" border-radius: 0 10rpx 10rpx 0;",
" border-radius: 0 11rpx 11rpx 0;",
"#16 .sticky-section-header border-radius: 6px → 11rpx",
),
# --- #17 .sticky-header-emoji font-size: 32rpx → 33rpx (18px) ---
(
".sticky-header-emoji {\n font-size: 32rpx;",
".sticky-header-emoji {\n font-size: 33rpx;",
"#17 .sticky-header-emoji font-size: 18px → 33rpx",
),
# --- #18 .sticky-header-tag border-radius: 10rpx → 11rpx (6px) ---
(
" border-radius: 10rpx;\n white-space: nowrap;\n}\n\n/* ===== 卡片底部锯齿",
" border-radius: 11rpx;\n white-space: nowrap;\n}\n\n/* ===== 卡片底部锯齿",
"#18 .sticky-header-tag border-radius: 6px → 11rpx",
),
# --- #19 & #20 .toc-item gap: 20rpx → 22rpx, padding: 20rpx → 22rpx (12px × 1.8204 = 21.84 → 22) ---
(
".toc-item {\n display: flex;\n align-items: center;\n gap: 20rpx;\n padding: 20rpx 30rpx;",
".toc-item {\n display: flex;\n align-items: center;\n gap: 22rpx;\n padding: 22rpx 30rpx;",
"#19-20 .toc-item gap+padding: 12px → 22rpx",
),
# --- #21 .toc-item-emoji font-size: 32rpx → 33rpx (18px) ---
(
".toc-item-emoji {\n font-size: 32rpx;",
".toc-item-emoji {\n font-size: 33rpx;",
"#21 .toc-item-emoji font-size: 18px → 33rpx",
),
# --- #22 & #23 .tip-toast-content font-size: 22rpx → 24rpx, line-height: 29rpx → 38rpx ---
(
".tip-toast-content {\n font-size: 22rpx;\n color: #4b4b4b;\n line-height: 29rpx;",
".tip-toast-content {\n font-size: 24rpx;\n color: #4b4b4b;\n line-height: 38rpx;",
"#22-23 .tip-toast-content font-size: 13px → 24rpx, line-height: 1.6 → 38rpx",
),
]
def main() -> None:
if not WXSS_PATH.exists():
print(f"❌ 文件不存在: {WXSS_PATH}")
sys.exit(1)
content = WXSS_PATH.read_text(encoding="utf-8")
applied = 0
skipped = 0
for old, new, desc in REPLACEMENTS:
count = content.count(old)
if count == 0:
print(f"⚠️ 跳过(未匹配): {desc}")
skipped += 1
elif count > 1:
print(f"⚠️ 跳过(多次匹配 ×{count}: {desc}")
skipped += 1
else:
content = content.replace(old, new, 1)
print(f"✅ 已修正: {desc}")
applied += 1
WXSS_PATH.write_text(content, encoding="utf-8")
print(f"\n完成: {applied} 处修正, {skipped} 处跳过")
if __name__ == "__main__":
main()

293
scripts/ops/measure_gaps.py Normal file
View File

@@ -0,0 +1,293 @@
"""
measure_gaps.py - 通用元素间距测量工具
功能:
1. 对任意 H5 页面任意 CSS 选择器,通过 getBoundingClientRect 测量
元素位置、大小、computed style输出 px 和 rpx 换算表
2. 对一组选择器对计算元素间的边界间距bottom-to-top 或 right-to-left
3. 支持 scrollTop 偏移(当元素在页面中下方时先滚动到目标屏)
4. 输出 JSON + 终端表格,可直接填入 audit.md
用法:
uv run python scripts/ops/measure_gaps.py <page_name> [options]
示例:
# 测量 task-list 页面中所有 .task-card 元素的间距
uv run python scripts/ops/measure_gaps.py task-list --selectors ".task-card"
# 测量 board-finance 页面应将 inner 到 section 的 padding
uv run python scripts/ops/measure_gaps.py board-finance --selectors "#section-overview" ".summary-content"
# 测量并输出比较表格(用于 audit.md F 项)
uv run python scripts/ops/measure_gaps.py task-list --pairs ".filter-bar" ".task-card:first-child"
"""
import argparse
import asyncio
import io
import json
import math
import sys
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
H5_PAGES = ROOT / "docs" / "h5_ui" / "pages"
OUT_DIR = ROOT / "docs" / "h5_ui" / "measure"
OUT_DIR.mkdir(parents=True, exist_ok=True)
VIEWPORT_W = 430
VIEWPORT_H = 752
DPR = 1.5
RPX_FACTOR = 750 / VIEWPORT_W # = 1.7442...
H5_BASE = "file:///" + str(ROOT).replace("\\", "/") + "/docs/h5_ui/pages"
def px_to_rpx(px: float) -> int:
"""H5 px -> 小程序 rpx取偶数"""
raw = px * RPX_FACTOR
return int(math.ceil(raw / 2) * 2)
def px_val(s: str) -> float:
"""'16px' -> 16.0"""
s = s.strip()
if s.endswith("px"):
return float(s[:-2])
if s in ("", "none", "normal", "auto"):
return 0.0
try:
return float(s)
except ValueError:
return 0.0
MEASURE_JS = """
(selectors, scrollTop) => {
// scroll to position first
window.scrollTo(0, scrollTop || 0);
const results = [];
for (const sel of selectors) {
const els = document.querySelectorAll(sel);
els.forEach((el, idx) => {
const r = el.getBoundingClientRect();
const cs = window.getComputedStyle(el);
results.push({
selector: sel,
index: idx,
// position relative to viewport
top: r.top,
bottom: r.bottom,
left: r.left,
right: r.right,
width: r.width,
height: r.height,
// absolute position on page
pageTop: r.top + (scrollTop || 0),
pageBottom: r.bottom + (scrollTop || 0),
// computed style key spacing props
paddingTop: cs.paddingTop,
paddingBottom: cs.paddingBottom,
paddingLeft: cs.paddingLeft,
paddingRight: cs.paddingRight,
marginTop: cs.marginTop,
marginBottom: cs.marginBottom,
marginLeft: cs.marginLeft,
marginRight: cs.marginRight,
gap: cs.gap,
rowGap: cs.rowGap,
columnGap: cs.columnGap,
fontSize: cs.fontSize,
lineHeight: cs.lineHeight,
fontWeight: cs.fontWeight,
borderTopWidth: cs.borderTopWidth,
borderBottomWidth: cs.borderBottomWidth,
borderRadius: cs.borderRadius,
text: (el.textContent || '').trim().substring(0, 40),
tagName: el.tagName,
className: (el.className || '').substring(0, 100),
});
});
}
return results;
}
"""
async def measure_page(page_name: str, selectors: list[str], scroll_top: int = 0,
pair_selectors: list[str] | None = None) -> dict:
try:
from playwright.async_api import async_playwright
except ImportError:
print("ERROR: playwright not installed. Run: uv add playwright && playwright install chromium")
sys.exit(1)
url = f"{H5_BASE}/{page_name}.html"
results = {}
async with async_playwright() as pw:
browser = await pw.chromium.launch(headless=True)
ctx = await browser.new_context(
viewport={"width": VIEWPORT_W, "height": VIEWPORT_H},
device_scale_factor=DPR,
)
page = await ctx.new_page()
await page.goto(url, wait_until="load", timeout=20000)
await page.wait_for_timeout(3000) # wait for Tailwind JIT
items = await page.evaluate(MEASURE_JS, selectors, scroll_top)
results["items"] = items
# compute gaps between consecutive items (pageBottom[i] -> pageTop[i+1])
if len(items) > 1:
gaps = []
for i in range(len(items) - 1):
a, b = items[i], items[i + 1]
gap_px = b["pageTop"] - a["pageBottom"]
gaps.append({
"from": f"{a['selector']}[{a['index']}]",
"to": f"{b['selector']}[{b['index']}]",
"gap_px": round(gap_px, 2),
"gap_rpx": px_to_rpx(gap_px),
"from_bottom_px": round(a["pageBottom"], 2),
"to_top_px": round(b["pageTop"], 2),
})
results["consecutive_gaps"] = gaps
# compute pair gaps if specified
if pair_selectors and len(pair_selectors) >= 2:
pair_items = await page.evaluate(MEASURE_JS, pair_selectors, scroll_top)
by_sel = {}
for it in pair_items:
by_sel.setdefault(it["selector"], []).append(it)
pair_a = by_sel.get(pair_selectors[0], [{}])[0]
pair_b = by_sel.get(pair_selectors[1], [{}])[0]
if pair_a and pair_b:
gap_px = pair_b.get("pageTop", 0) - pair_a.get("pageBottom", 0)
results["pair_gap"] = {
"a": pair_selectors[0],
"b": pair_selectors[1],
"gap_px": round(gap_px, 2),
"gap_rpx": px_to_rpx(gap_px),
}
await browser.close()
return results
def print_table(items: list[dict]):
print(f"\n{'selector':<35} {'idx':>3} {'top_px':>8} {'h_px':>8} {'pt':>8} {'pb':>8} {'mt':>8} {'mb':>8} {'gap':>8} {'fs':>8} {'lh':>8} {'h_rpx':>8}")
print("-" * 130)
for it in items:
print(
f"{it['selector']:<35} {it['index']:>3} "
f"{it['pageTop']:>8.1f} {it['height']:>8.1f} "
f"{px_val(it['paddingTop']):>8.1f} {px_val(it['paddingBottom']):>8.1f} "
f"{px_val(it['marginTop']):>8.1f} {px_val(it['marginBottom']):>8.1f} "
f"{px_val(it.get('gap','0px')):>8.1f} "
f"{px_val(it['fontSize']):>8.1f} "
f"{px_val(it['lineHeight']) if it['lineHeight'] not in ('normal','') else 0:>8.1f} "
f"{px_to_rpx(it['height']):>8}"
)
def print_gaps(gaps: list[dict]):
if not gaps:
return
print(f"\n{'from':<40} {'to':<40} {'gap_px':>8} {'gap_rpx':>8}")
print("-" * 100)
for g in gaps:
print(f"{g['from']:<40} {g['to']:<40} {g['gap_px']:>8.1f} {g['gap_rpx']:>8}")
def spacing_audit_table(items: list[dict]) -> str:
"""Generate markdown table for audit.md spacing
section"""
lines = [
"| 元素选择器 | 页面Top(px) | 高度(px) | 高度(rpx) | paddingT | paddingB | marginT | marginB | gap | fontSize | lineHeight |",
"|---|---|---|---|---|---|---|---|---|---|---|",
]
for it in items:
h_rpx = px_to_rpx(it['height'])
lh = px_val(it['lineHeight']) if it['lineHeight'] not in ('normal', '') else 0
lines.append(
f"| {it['selector']}[{it['index']}] "
f"| {it['pageTop']:.1f} | {it['height']:.1f} | {h_rpx} "
f"| {it['paddingTop']} | {it['paddingBottom']} "
f"| {it['marginTop']} | {it['marginBottom']} "
f"| {it.get('gap','0px')} | {it['fontSize']} | {it['lineHeight']} |"
)
return "\n".join(lines)
def main():
if sys.platform == 'win32':
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
parser = argparse.ArgumentParser(description='测量 H5 页面元素间距,输出 px 和 rpx')
parser.add_argument('page', help='页面名,如 task-list')
parser.add_argument('--selectors', nargs='+', default=[], help='CSS 选择器列表')
parser.add_argument('--scroll', type=int, default=0, help='scrollTop 位置(默认 0')
parser.add_argument('--pairs', nargs=2, metavar=('A', 'B'), help='计算两个选择器间的间距')
parser.add_argument('--out', help='输出 JSON 路径(默认自动命名)')
args = parser.parse_args()
if not args.selectors and not args.pairs:
print('请指定 --selectors 或 --pairs')
parser.print_help()
sys.exit(1)
selectors = args.selectors or list(args.pairs)
results = asyncio.run(measure_page(args.page, selectors, args.scroll, args.pairs))
# save JSON
out_path = Path(args.out) if args.out else OUT_DIR / f"{args.page}-gaps.json"
with open(out_path, 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=2)
# print table
items = results.get('items', [])
if items:
print(f'\n页面: {args.page} scrollTop={args.scroll} 元素数: {len(items)}')
print_table(items)
# spacing props summary
print('\n关键间距汇总px → rpx:')
for it in items:
pt = px_val(it['paddingTop'])
pb = px_val(it['paddingBottom'])
mt = px_val(it['marginTop'])
mb = px_val(it['marginBottom'])
g = px_val(it.get('gap', '0px'))
fs = px_val(it['fontSize'])
lh_raw = it['lineHeight']
lh = px_val(lh_raw) if lh_raw not in ('normal', '') else 0
parts = []
if pt: parts.append(f'paddingTop={pt:.1f}px→{px_to_rpx(pt)}rpx')
if pb: parts.append(f'paddingBot={pb:.1f}px→{px_to_rpx(pb)}rpx')
if mt: parts.append(f'marginTop={mt:.1f}px→{px_to_rpx(mt)}rpx')
if mb: parts.append(f'marginBot={mb:.1f}px→{px_to_rpx(mb)}rpx')
if g: parts.append(f'gap={g:.1f}px→{px_to_rpx(g)}rpx')
if fs: parts.append(f'fontSize={fs:.1f}px→{px_to_rpx(fs)}rpx')
if lh: parts.append(f'lineHeight={lh:.1f}px→{px_to_rpx(lh)}rpx')
if parts:
print(f' {it["selector"]}[{it["index"]}]: {" ".join(parts)}')
# consecutive gaps
gaps = results.get('consecutive_gaps', [])
if gaps:
print('\n\u76f8邻元素垂直间距:')
print_gaps(gaps)
# pair gap
pg = results.get('pair_gap')
if pg:
print(f'\n指定对间距: {pg["a"]}{pg["b"]}: {pg["gap_px"]:.1f}px = {pg["gap_rpx"]}rpx')
# markdown table
print('\n--- audit.md 间距表格 ---')
print(spacing_audit_table(items))
print(f'\n详细数据已保存: {out_path}')
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,89 @@
section"""
lines = [
"| 元素选择器 | 页面Top(px) | 高度(px) | 高度(rpx) | paddingT | paddingB | marginT | marginB | gap | fontSize | lineHeight |",
"|---|---|---|---|---|---|---|---|---|---|---|",
]
for it in items:
h_rpx = px_to_rpx(it['height'])
lh = px_val(it['lineHeight']) if it['lineHeight'] not in ('normal', '') else 0
lines.append(
f"| {it['selector']}[{it['index']}] "
f"| {it['pageTop']:.1f} | {it['height']:.1f} | {h_rpx} "
f"| {it['paddingTop']} | {it['paddingBottom']} "
f"| {it['marginTop']} | {it['marginBottom']} "
f"| {it.get('gap','0px')} | {it['fontSize']} | {it['lineHeight']} |"
)
return "\n".join(lines)
def main():
if sys.platform == 'win32':
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
parser = argparse.ArgumentParser(description='测量 H5 页面元素间距,输出 px 和 rpx')
parser.add_argument('page', help='页面名,如 task-list')
parser.add_argument('--selectors', nargs='+', default=[], help='CSS 选择器列表')
parser.add_argument('--scroll', type=int, default=0, help='scrollTop 位置(默认 0')
parser.add_argument('--pairs', nargs=2, metavar=('A', 'B'), help='计算两个选择器间的间距')
parser.add_argument('--out', help='输出 JSON 路径(默认自动命名)')
args = parser.parse_args()
if not args.selectors and not args.pairs:
print('请指定 --selectors 或 --pairs')
parser.print_help()
sys.exit(1)
selectors = args.selectors or list(args.pairs)
results = asyncio.run(measure_page(args.page, selectors, args.scroll, args.pairs))
# save JSON
out_path = Path(args.out) if args.out else OUT_DIR / f"{args.page}-gaps.json"
with open(out_path, 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=2)
# print table
items = results.get('items', [])
if items:
print(f'\n页面: {args.page} scrollTop={args.scroll} 元素数: {len(items)}')
print_table(items)
# spacing props summary
print('\n关键间距汇总px → rpx:')
for it in items:
pt = px_val(it['paddingTop'])
pb = px_val(it['paddingBottom'])
mt = px_val(it['marginTop'])
mb = px_val(it['marginBottom'])
g = px_val(it.get('gap', '0px'))
fs = px_val(it['fontSize'])
lh_raw = it['lineHeight']
lh = px_val(lh_raw) if lh_raw not in ('normal', '') else 0
parts = []
if pt: parts.append(f'paddingTop={pt:.1f}px→{px_to_rpx(pt)}rpx')
if pb: parts.append(f'paddingBot={pb:.1f}px→{px_to_rpx(pb)}rpx')
if mt: parts.append(f'marginTop={mt:.1f}px→{px_to_rpx(mt)}rpx')
if mb: parts.append(f'marginBot={mb:.1f}px→{px_to_rpx(mb)}rpx')
if g: parts.append(f'gap={g:.1f}px→{px_to_rpx(g)}rpx')
if fs: parts.append(f'fontSize={fs:.1f}px→{px_to_rpx(fs)}rpx')
if lh: parts.append(f'lineHeight={lh:.1f}px→{px_to_rpx(lh)}rpx')
if parts:
print(f' {it["selector"]}[{it["index"]}]: {" ".join(parts)}')
# consecutive gaps
gaps = results.get('consecutive_gaps', [])
if gaps:
print('\n\u76f8邻元素垂直间距:')
print_gaps(gaps)
# pair gap
pg = results.get('pair_gap')
if pg:
print(f'\n指定对间距: {pg["a"]}{pg["b"]}: {pg["gap_px"]:.1f}px = {pg["gap_rpx"]}rpx')
# markdown table
print('\n--- audit.md 间距表格 ---')
print(spacing_audit_table(items))
print(f'\n详细数据已保存: {out_path}')
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,222 @@
"""
会员消费报表2025-11-01 至今,按手机号归总,消费金额从高到低
口径items_sum = table_charge_money + goods_money + assistant_pd_money + assistant_cx_money + electricity_money
台费拆分:通过 dws.cfg_area_category 配置category_code='KTV' 为 K包其余为一般包厢台费
会员信息:通过 member_id LEFT JOIN dim_member 获取 mobile/nickname
12月起 settlement_head.member_phone 为空,必须走 dim_member
输出CSV → export/SYSTEM/REPORTS/member_reports/
"""
import os
import csv
from dotenv import load_dotenv
load_dotenv()
PG_DSN = os.environ.get("TEST_DB_DSN") or os.environ.get("PG_DSN")
if not PG_DSN:
raise RuntimeError("TEST_DB_DSN / PG_DSN 未配置")
SYSTEM_ANALYZE_ROOT = os.environ.get("SYSTEM_ANALYZE_ROOT")
if not SYSTEM_ANALYZE_ROOT:
raise RuntimeError("SYSTEM_ANALYZE_ROOT 未配置")
import psycopg2
SQL = r"""
WITH date_range AS (
SELECT '2025-11-01'::date AS start_date, CURRENT_DATE AS end_date
),
area_mapping AS (
SELECT source_area_name, source_table_name, category_code
FROM dws.cfg_area_category
WHERE is_active = true AND match_type = 'EXACT'
),
-- 先筛出有会员的结算单member_id 有值,或 member_phone 有值)
settle_ids AS (
SELECT sh.order_settle_id
FROM dwd.dwd_settlement_head sh
CROSS JOIN date_range dr
WHERE sh.settle_type IN (1, 3)
AND sh.pay_time >= dr.start_date
AND sh.pay_time < dr.end_date + 1
AND (
(sh.member_id IS NOT NULL AND sh.member_id != 0)
OR (sh.member_phone IS NOT NULL AND sh.member_phone != '')
)
),
table_fee_split AS (
SELECT
tfl.order_settle_id,
SUM(CASE WHEN COALESCE(am.category_code, 'OTHER') = 'KTV'
THEN tfl.ledger_amount ELSE 0 END) AS k_room_fee,
SUM(CASE WHEN COALESCE(am.category_code, 'OTHER') != 'KTV'
THEN tfl.ledger_amount ELSE 0 END) AS normal_table_fee
FROM dwd.dwd_table_fee_log tfl
INNER JOIN settle_ids si ON tfl.order_settle_id = si.order_settle_id
LEFT JOIN dwd.dim_table dt
ON dt.table_id = tfl.site_table_id AND dt.scd2_is_current = 1
LEFT JOIN area_mapping am
ON tfl.site_table_area_name = am.source_area_name
AND dt.table_name = am.source_table_name
WHERE tfl.is_delete = 0
GROUP BY tfl.order_settle_id
),
-- 关联 dim_member 获取手机号,优先 dim_member.mobile回退 settlement_head.member_phone
consumption AS (
SELECT
sh.order_settle_id,
COALESCE(NULLIF(dm.mobile, ''), NULLIF(sh.member_phone, '')) AS phone,
sh.pay_time,
COALESCE(sh.table_charge_money, 0)
+ COALESCE(sh.goods_money, 0)
+ COALESCE(sh.assistant_pd_money, 0)
+ COALESCE(sh.assistant_cx_money, 0)
+ COALESCE(sh.electricity_money, 0) AS items_sum,
COALESCE(sh.goods_money, 0) AS goods_money,
COALESCE(sh.assistant_pd_money, 0) + COALESCE(sh.assistant_cx_money, 0) AS assistant_money
FROM dwd.dwd_settlement_head sh
CROSS JOIN date_range dr
LEFT JOIN dwd.dim_member dm
ON dm.member_id = sh.member_id AND dm.scd2_is_current = 1
WHERE sh.settle_type IN (1, 3)
AND sh.pay_time >= dr.start_date
AND sh.pay_time < dr.end_date + 1
AND (
(sh.member_id IS NOT NULL AND sh.member_id != 0)
OR (sh.member_phone IS NOT NULL AND sh.member_phone != '')
)
),
-- 过滤掉最终仍无手机号的记录
consumption_with_phone AS (
SELECT * FROM consumption WHERE phone IS NOT NULL
),
monthly AS (
SELECT
c.phone,
EXTRACT(YEAR FROM c.pay_time)::int AS yr,
EXTRACT(MONTH FROM c.pay_time)::int AS mo,
SUM(c.items_sum) AS month_total,
SUM(c.goods_money) AS month_goods,
SUM(c.assistant_money) AS month_assistant,
SUM(COALESCE(tf.k_room_fee, 0)) AS month_k_room,
SUM(COALESCE(tf.normal_table_fee, 0)) AS month_normal_table
FROM consumption_with_phone c
LEFT JOIN table_fee_split tf ON c.order_settle_id = tf.order_settle_id
GROUP BY c.phone, yr, mo
),
member_agg AS (
SELECT
phone,
SUM(month_total) AS total_consumption,
SUM(CASE WHEN yr = 2025 AND mo = 11 THEN month_total ELSE 0 END) AS m11,
SUM(CASE WHEN yr = 2025 AND mo = 12 THEN month_total ELSE 0 END) AS m12,
SUM(CASE WHEN yr = 2026 AND mo = 1 THEN month_total ELSE 0 END) AS m01,
SUM(CASE WHEN yr = 2026 AND mo = 2 THEN month_total ELSE 0 END) AS m02,
SUM(CASE WHEN yr = 2026 AND mo = 3 THEN month_total ELSE 0 END) AS m03,
SUM(month_k_room) AS k_room_total,
SUM(month_normal_table) AS normal_table_total,
SUM(month_assistant) AS assistant_total,
SUM(month_goods) AS goods_total
FROM monthly
GROUP BY phone
),
-- 昵称:合并 dim_member.nickname 和 settlement_head.member_name
member_names AS (
SELECT
COALESCE(NULLIF(dm.mobile, ''), NULLIF(sh.member_phone, '')) AS phone,
STRING_AGG(
DISTINCT COALESCE(NULLIF(dm.nickname, ''), NULLIF(sh.member_name, '')),
' | '
ORDER BY COALESCE(NULLIF(dm.nickname, ''), NULLIF(sh.member_name, ''))
) FILTER (
WHERE COALESCE(NULLIF(dm.nickname, ''), NULLIF(sh.member_name, '')) IS NOT NULL
) AS names
FROM dwd.dwd_settlement_head sh
CROSS JOIN date_range dr
LEFT JOIN dwd.dim_member dm
ON dm.member_id = sh.member_id AND dm.scd2_is_current = 1
WHERE sh.settle_type IN (1, 3)
AND sh.pay_time >= dr.start_date
AND sh.pay_time < dr.end_date + 1
AND (
(sh.member_id IS NOT NULL AND sh.member_id != 0)
OR (sh.member_phone IS NOT NULL AND sh.member_phone != '')
)
AND COALESCE(NULLIF(dm.mobile, ''), NULLIF(sh.member_phone, '')) IS NOT NULL
GROUP BY phone
)
SELECT
COALESCE(mn.names, '') AS "会员昵称",
ma.phone AS "手机号",
ROUND(ma.total_consumption, 2) AS "11月至今共消费",
ROUND(ma.m11, 2) AS "11月消费共计",
ROUND(ma.m12, 2) AS "12月消费共计",
ROUND(ma.m01, 2) AS "1月消费共计",
ROUND(ma.m02, 2) AS "2月消费共计",
ROUND(ma.m03, 2) AS "3月消费共计",
ROUND(ma.k_room_total, 2) AS "K包",
CASE WHEN ma.total_consumption > 0
THEN ROUND(ma.k_room_total / ma.total_consumption * 100, 1)
ELSE 0 END AS "K包占比%",
ROUND(ma.normal_table_total, 2) AS "一般包厢台费",
CASE WHEN ma.total_consumption > 0
THEN ROUND(ma.normal_table_total / ma.total_consumption * 100, 1)
ELSE 0 END AS "一般包厢台费占比%",
ROUND(ma.assistant_total, 2) AS "助教费",
CASE WHEN ma.total_consumption > 0
THEN ROUND(ma.assistant_total / ma.total_consumption * 100, 1)
ELSE 0 END AS "助教费占比%",
ROUND(ma.goods_total, 2) AS "商品费",
CASE WHEN ma.total_consumption > 0
THEN ROUND(ma.goods_total / ma.total_consumption * 100, 1)
ELSE 0 END AS "商品费占比%"
FROM member_agg ma
LEFT JOIN member_names mn ON ma.phone = mn.phone
ORDER BY ma.total_consumption DESC
"""
def run_report():
with psycopg2.connect(PG_DSN, connect_timeout=15, options="-c statement_timeout=300000") as conn:
with conn.cursor() as cur:
cur.execute(SQL)
columns = [desc[0] for desc in cur.description]
rows = cur.fetchall()
report_dir = os.path.join(os.path.dirname(SYSTEM_ANALYZE_ROOT), "member_reports")
os.makedirs(report_dir, exist_ok=True)
csv_path = os.path.join(report_dir, "member_consumption_202511_to_now.csv")
with open(csv_path, "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerow(columns)
writer.writerows(rows)
print(f"✅ 报表已生成: {csv_path}")
print(f"{len(rows)} 位会员")
print(f"\n{'='*200}")
header = " | ".join(f"{c:>14}" if i > 1 else f"{c:<20}" for i, c in enumerate(columns))
print(header)
print(f"{'='*200}")
for row in rows[:20]:
line = " | ".join(
f"{str(v):<20}" if i <= 1 else f"{str(v):>14}"
for i, v in enumerate(row)
)
print(line)
if len(rows) > 20:
print(f"... 还有 {len(rows) - 20} 行(见 CSV 文件)")
if __name__ == "__main__":
run_report()

View File

@@ -0,0 +1,377 @@
"""
会员消费报表对比:数据库 vs 飞球导出 CSV
对比范围2025-12-09 ~ 2026-03-10
飞球 CSVtmp/结账记录_朗朗桌球_20251209_20260310.csv
通过小票号order_settle_id反查数据库获取会员信息
"""
import os
import csv
from decimal import Decimal
from collections import defaultdict
from dotenv import load_dotenv
load_dotenv()
PG_DSN = os.environ.get("TEST_DB_DSN") or os.environ.get("PG_DSN")
if not PG_DSN:
raise RuntimeError("TEST_DB_DSN / PG_DSN 未配置")
SYSTEM_ANALYZE_ROOT = os.environ.get("SYSTEM_ANALYZE_ROOT")
if not SYSTEM_ANALYZE_ROOT:
raise RuntimeError("SYSTEM_ANALYZE_ROOT 未配置")
import psycopg2
FEIQIU_CSV = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"tmp", "结账记录_朗朗桌球_20251209_20260310.csv"
)
# ── SQL: 按小票号批量查数据库记录 + 会员信息 ──
SQL_BY_TICKET = r"""
SELECT
sh.order_settle_id,
sh.settle_type,
sh.pay_time,
COALESCE(sh.table_charge_money, 0) AS table_charge_money,
COALESCE(sh.goods_money, 0) AS goods_money,
COALESCE(sh.assistant_pd_money, 0) AS assistant_pd_money,
COALESCE(sh.assistant_cx_money, 0) AS assistant_cx_money,
COALESCE(sh.electricity_money, 0) AS electricity_money,
COALESCE(sh.table_charge_money, 0)
+ COALESCE(sh.goods_money, 0)
+ COALESCE(sh.assistant_pd_money, 0)
+ COALESCE(sh.assistant_cx_money, 0)
+ COALESCE(sh.electricity_money, 0) AS items_sum,
sh.consume_money,
sh.member_id,
COALESCE(NULLIF(dm.mobile, ''), NULLIF(sh.member_phone, '')) AS phone,
COALESCE(NULLIF(dm.nickname, ''), NULLIF(sh.member_name, '')) AS member_name
FROM dwd.dwd_settlement_head sh
LEFT JOIN dwd.dim_member dm
ON dm.member_id = sh.member_id AND dm.scd2_is_current = 1
WHERE sh.order_settle_id = ANY(%s)
"""
# ── SQL: 数据库全量汇总(同期) ──
SQL_DB_TOTAL = r"""
SELECT
COUNT(*) AS total_orders,
ROUND(SUM(COALESCE(table_charge_money,0) + COALESCE(goods_money,0)
+ COALESCE(assistant_pd_money,0) + COALESCE(assistant_cx_money,0)
+ COALESCE(electricity_money,0)), 2) AS total_items_sum,
ROUND(SUM(COALESCE(table_charge_money,0)), 2) AS total_table_charge,
ROUND(SUM(COALESCE(goods_money,0)), 2) AS total_goods,
ROUND(SUM(COALESCE(assistant_pd_money,0) + COALESCE(assistant_cx_money,0)), 2) AS total_assistant,
ROUND(SUM(COALESCE(consume_money,0)), 2) AS total_consume_money,
COUNT(CASE WHEN member_id IS NOT NULL AND member_id != 0 THEN 1 END) AS member_orders,
COUNT(CASE WHEN member_id IS NULL OR member_id = 0 THEN 1 END) AS non_member_orders
FROM dwd.dwd_settlement_head
WHERE settle_type IN (1, 3)
AND pay_time >= '2025-12-09'
AND pay_time < '2026-03-11'
"""
def d(val: str) -> Decimal:
"""安全转 Decimal"""
return Decimal(val.strip() if val and val.strip() else "0")
def parse_feiqiu_csv(path: str) -> dict:
"""解析飞球 CSV返回 {小票号: row_dict} + 汇总"""
tickets = {}
totals = {
"orders": 0, "consume": Decimal("0"), "table_fee": Decimal("0"),
"goods": Decimal("0"), "service": Decimal("0"), "course": Decimal("0"),
"incentive": Decimal("0"), "bill_types": defaultdict(int),
}
with open(path, "r", encoding="utf-8-sig") as f:
reader = csv.DictReader(f)
for row in reader:
ticket = row.get("小票号", "").strip()
if not ticket or ticket == "-":
continue
totals["orders"] += 1
consume = d(row.get("消费金额", "0"))
table_fee = d(row.get("台费", "0"))
goods = d(row.get("商品费", "0"))
service = d(row.get("服务加收", "0"))
course = d(row.get("课程费", "0"))
incentive = d(row.get("激励费", "0"))
totals["consume"] += consume
totals["table_fee"] += table_fee
totals["goods"] += goods
totals["service"] += service
totals["course"] += course
totals["incentive"] += incentive
bt = row.get("账单类型", "未知")
totals["bill_types"][bt] += 1
tickets[ticket] = {
"consume": consume, "table_fee": table_fee,
"goods": goods, "service": service, "course": course,
"incentive": incentive, "bill_type": bt,
"table_no": row.get("台桌号", ""),
"pay_time": row.get("结账时间", ""),
}
return tickets, totals
def run_compare():
print("=" * 90, flush=True)
print("会员消费报表对比:数据库 vs 飞球导出", flush=True)
print("对比范围2025-12-09 ~ 2026-03-10", flush=True)
print("=" * 90, flush=True)
# ── 1. 解析飞球 CSV ──
print("\n📄 解析飞球 CSV ...", flush=True)
if not os.path.exists(FEIQIU_CSV):
print(f" ❌ 文件不存在: {FEIQIU_CSV}")
return
fq_tickets, fq_totals = parse_feiqiu_csv(FEIQIU_CSV)
print(f" 订单数: {fq_totals['orders']}", flush=True)
print(f" 消费金额合计: {fq_totals['consume']}", flush=True)
print(f" 台费: {fq_totals['table_fee']}", flush=True)
print(f" 商品费: {fq_totals['goods']}", flush=True)
print(f" 服务加收(陪打): {fq_totals['service']}", flush=True)
print(f" 课程费(超休): {fq_totals['course']}", flush=True)
print(f" 激励费: {fq_totals['incentive']}", flush=True)
print(f" 助教合计: {fq_totals['service'] + fq_totals['course']}", flush=True)
print(f" 账单类型: {dict(fq_totals['bill_types'])}", flush=True)
# ── 2. 用小票号反查数据库 ──
print("\n🗄️ 用小票号反查数据库 ...", flush=True)
ticket_ids = [int(t) for t in fq_tickets.keys() if t.isdigit()]
print(f" 待查小票数: {len(ticket_ids)}", flush=True)
db_records = {}
with psycopg2.connect(PG_DSN, connect_timeout=15,
options="-c statement_timeout=300000") as conn:
with conn.cursor() as cur:
# 分批查(每批 500
for i in range(0, len(ticket_ids), 500):
batch = ticket_ids[i:i+500]
cur.execute(SQL_BY_TICKET, (batch,))
for row in cur.fetchall():
db_records[str(row[0])] = {
"order_settle_id": row[0],
"settle_type": row[1],
"pay_time": row[2],
"table_charge": float(row[3]),
"goods": float(row[4]),
"assistant_pd": float(row[5]),
"assistant_cx": float(row[6]),
"electricity": float(row[7]),
"items_sum": float(row[8]),
"consume_money": float(row[9]) if row[9] else 0,
"member_id": row[10],
"phone": row[11],
"member_name": row[12],
}
# 全量汇总
cur.execute(SQL_DB_TOTAL)
db_total = cur.fetchone()
print(f" 数据库匹配到: {len(db_records)}", flush=True)
print(f" 飞球有但数据库无: {len(fq_tickets) - len(db_records)}", flush=True)
# ── 3. 数据库全量 ──
print(f"\n🗄️ 数据库全量(同期 settle_type IN 1,3:", flush=True)
print(f" 订单数: {db_total[0]} (会员: {db_total[6]}, 非会员: {db_total[7]})", flush=True)
print(f" items_sum: {db_total[1]}", flush=True)
print(f" 台费: {db_total[2]}", flush=True)
print(f" 商品费: {db_total[3]}", flush=True)
print(f" 助教费: {db_total[4]}", flush=True)
print(f" consume_money: {db_total[5]}", flush=True)
# ── 4. 总量对比 ──
print("\n" + "=" * 90, flush=True)
print("📊 总量对比(飞球 CSV vs 数据库全量)", flush=True)
print("=" * 90, flush=True)
fq_assistant = float(fq_totals["service"] + fq_totals["course"])
comparisons = [
("订单数", float(fq_totals["orders"]), float(db_total[0])),
("消费金额 vs items_sum", float(fq_totals["consume"]), float(db_total[1])),
("消费金额 vs consume_money", float(fq_totals["consume"]), float(db_total[5])),
("台费", float(fq_totals["table_fee"]), float(db_total[2])),
("商品费", float(fq_totals["goods"]), float(db_total[3])),
("助教费(服务+课程)", fq_assistant, float(db_total[4])),
]
print(f"{'指标':<28} {'飞球CSV':>14} {'数据库':>14} {'差额':>14} {'差异%':>9}", flush=True)
print("-" * 90, flush=True)
for label, fq_val, db_val in comparisons:
diff = db_val - fq_val
pct = (diff / fq_val * 100) if fq_val else 0
print(f"{label:<28} {fq_val:>14,.2f} {db_val:>14,.2f} {diff:>+14,.2f} {pct:>+8.2f}%", flush=True)
# ── 5. 逐单对比:找差异订单 ──
print("\n" + "=" * 90, flush=True)
print("🔍 逐单对比(飞球消费金额 vs 数据库 items_sum", flush=True)
print("=" * 90, flush=True)
# 提前定义输出目录
report_dir = os.path.join(os.path.dirname(SYSTEM_ANALYZE_ROOT), "member_reports")
os.makedirs(report_dir, exist_ok=True)
diffs = []
only_fq = []
only_db_settle_types = defaultdict(int) # 飞球有但数据库 settle_type 不同
for ticket, fq_row in fq_tickets.items():
db_row = db_records.get(ticket)
if not db_row:
only_fq.append(ticket)
continue
fq_consume = float(fq_row["consume"])
db_items = db_row["items_sum"]
diff = abs(db_items - fq_consume)
if diff > 0.02: # 容差 2 分钱
diffs.append({
"ticket": ticket,
"fq_pay_time": fq_row["pay_time"],
"db_pay_time": str(db_row["pay_time"]) if db_row["pay_time"] else "",
"fq_consume": fq_consume,
"db_items_sum": db_items,
"db_consume_money": db_row["consume_money"],
"diff": db_items - fq_consume,
"fq_table": float(fq_row["table_fee"]),
"db_table": db_row["table_charge"],
"fq_goods": float(fq_row["goods"]),
"db_goods": db_row["goods"],
"fq_assistant": float(fq_row["service"] + fq_row["course"]),
"db_assistant": db_row["assistant_pd"] + db_row["assistant_cx"],
"member_name": db_row["member_name"] or "",
"phone": db_row["phone"] or "",
"bill_type": fq_row["bill_type"],
"table_no": fq_row["table_no"],
"settle_type": db_row["settle_type"],
})
print(f" 总对比: {len(fq_tickets)}", flush=True)
print(f" 完全匹配差额≤0.02: {len(fq_tickets) - len(only_fq) - len(diffs)}", flush=True)
print(f" 有差异: {len(diffs)}", flush=True)
print(f" 飞球有但数据库无: {len(only_fq)}", flush=True)
if only_fq:
print(f"\n 飞球有但数据库无的小票号前20:", flush=True)
for t in only_fq[:20]:
fq_row = fq_tickets[t]
print(f" {t} 消费={fq_row['consume']} 类型={fq_row['bill_type']} 台桌={fq_row['table_no']}", flush=True)
if diffs:
# 导出完整差异明细 CSV
diff_csv_path = os.path.join(report_dir, "order_diff_detail_20251209.csv")
sorted_diffs = sorted(diffs, key=lambda x: abs(x["diff"]), reverse=True)
with open(diff_csv_path, "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerow(["小票号", "飞球结账时间", "DB结账时间", "账单类型", "台桌",
"settle_type", "飞球消费", "DB_items_sum", "DB_consume_money",
"差额", "飞球台费", "DB台费", "飞球商品", "DB商品",
"飞球助教", "DB助教", "会员昵称", "手机号"])
for d_row in sorted_diffs:
writer.writerow([
d_row["ticket"], d_row["fq_pay_time"], d_row["db_pay_time"],
d_row["bill_type"], d_row["table_no"], d_row["settle_type"],
f"{d_row['fq_consume']:.2f}", f"{d_row['db_items_sum']:.2f}",
f"{d_row['db_consume_money']:.2f}", f"{d_row['diff']:+.2f}",
f"{d_row['fq_table']:.2f}", f"{d_row['db_table']:.2f}",
f"{d_row['fq_goods']:.2f}", f"{d_row['db_goods']:.2f}",
f"{d_row['fq_assistant']:.2f}", f"{d_row['db_assistant']:.2f}",
d_row["member_name"], d_row["phone"],
])
print(f"\n ✅ 差异明细已导出: {diff_csv_path}", flush=True)
print(f"\n 差异订单前30按差额绝对值排序:", flush=True)
print(f" {'小票号':<20} {'结账时间':<20} {'飞球消费':>10} {'DB items':>10} {'差额':>10} {'台桌':<10} {'会员':>8}", flush=True)
print(f" {'-'*100}", flush=True)
for d_row in sorted_diffs[:30]:
print(f" {d_row['ticket']:<20} {d_row['fq_pay_time']:<20} {d_row['fq_consume']:>10,.2f} {d_row['db_items_sum']:>10,.2f} "
f"{d_row['diff']:>+10,.2f} {d_row['table_no']:<10} {d_row['member_name']:>8}", flush=True)
# ── 6. 按会员归总(飞球 CSV 通过小票号反查会员) ──
print("\n" + "=" * 90, flush=True)
print("👤 按会员归总(飞球订单通过小票号反查数据库会员信息)", flush=True)
print("=" * 90, flush=True)
member_agg = defaultdict(lambda: {
"name": "", "fq_consume": Decimal("0"), "db_items": 0.0,
"fq_table": Decimal("0"), "fq_goods": Decimal("0"),
"fq_assistant": Decimal("0"), "orders": 0,
})
no_member_count = 0
no_db_match = 0
for ticket, fq_row in fq_tickets.items():
db_row = db_records.get(ticket)
if not db_row:
no_db_match += 1
continue
phone = db_row.get("phone") or ""
if not phone:
# 无会员
if db_row.get("member_id") and db_row["member_id"] != 0:
phone = f"member_{db_row['member_id']}"
else:
no_member_count += 1
continue
agg = member_agg[phone]
agg["name"] = db_row.get("member_name") or agg["name"]
agg["fq_consume"] += fq_row["consume"]
agg["db_items"] += db_row["items_sum"]
agg["fq_table"] += fq_row["table_fee"]
agg["fq_goods"] += fq_row["goods"]
agg["fq_assistant"] += fq_row["service"] + fq_row["course"]
agg["orders"] += 1
print(f" 有会员的订单: {sum(a['orders'] for a in member_agg.values())}", flush=True)
print(f" 非会员订单: {no_member_count}", flush=True)
print(f" 数据库无匹配: {no_db_match}", flush=True)
print(f" 会员数: {len(member_agg)}", flush=True)
# 导出 CSV
csv_path = os.path.join(report_dir, "member_consumption_20251209_compare.csv")
sorted_members = sorted(member_agg.items(), key=lambda x: float(x[1]["fq_consume"]), reverse=True)
with open(csv_path, "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerow(["会员昵称", "手机号", "飞球消费合计", "DB items_sum合计",
"差额", "飞球台费", "飞球商品费", "飞球助教费", "订单数"])
for phone, agg in sorted_members:
diff = agg["db_items"] - float(agg["fq_consume"])
writer.writerow([
agg["name"], phone,
f"{float(agg['fq_consume']):.2f}",
f"{agg['db_items']:.2f}",
f"{diff:+.2f}",
f"{float(agg['fq_table']):.2f}",
f"{float(agg['fq_goods']):.2f}",
f"{float(agg['fq_assistant']):.2f}",
agg["orders"],
])
print(f"\n ✅ 会员对比报表已导出: {csv_path}", flush=True)
# 终端显示前 20
print(f"\n {'会员昵称':<12} {'手机号':<14} {'飞球消费':>10} {'DB items':>10} {'差额':>10} {'单数':>5}", flush=True)
print(f" {'-'*70}", flush=True)
for phone, agg in sorted_members[:20]:
diff = agg["db_items"] - float(agg["fq_consume"])
print(f" {agg['name']:<12} {phone:<14} {float(agg['fq_consume']):>10,.2f} "
f"{agg['db_items']:>10,.2f} {diff:>+10,.2f} {agg['orders']:>5}", flush=True)
if len(sorted_members) > 20:
print(f" ... 还有 {len(sorted_members) - 20} 位(见 CSV", flush=True)
if __name__ == "__main__":
run_compare()

View File

@@ -0,0 +1,107 @@
"""
一次性脚本:将 docs/database/ 下的 ETL 专属 BD_Manual 文件迁移到
apps/etl/connectors/feiqiu/docs/database/ 对应子目录,
将迁移变更记录归档到 docs/database/_archived/。
"""
import shutil
from pathlib import Path
ROOT = Path(__file__).resolve().parent.parent.parent
SRC = ROOT / "docs" / "database"
ETL_DB = ROOT / "apps" / "etl" / "connectors" / "feiqiu" / "docs" / "database"
ARCHIVED = SRC / "_archived"
# 新建 cross_layer 目录ODS→DWD 跨层映射文档)
CROSS_LAYER = ETL_DB / "cross_layer"
CROSS_LAYER.mkdir(exist_ok=True)
# ── A. ODS→DWD 跨层映射 → ETL cross_layer/ ──
cross_layer_files = [
"BD_Manual_assistant_accounts_master.md",
"BD_Manual_assistant_service_records.md",
"BD_Manual_goods_stock_movements.md",
"BD_Manual_goods_stock_summary.md",
"BD_Manual_member_balance_changes.md",
"BD_Manual_recharge_settlements.md",
"BD_Manual_site_tables_master.md",
"BD_Manual_store_goods_master.md",
"BD_Manual_store_goods_sales_records.md",
"BD_Manual_tenant_goods_master.md",
"BD_Manual_group_buy_package_details.md",
"BD_Manual_goods_stock_warning_info.md", # ODS→DWD 加列变更
]
# ── B. DWD/DWS 层文档 → ETL 对应子目录 ──
dwd_files = [
"BD_Manual_dim_groupbuy_package_ex_detail_fields.md",
]
dws_files = [
"BD_Manual_dws_goods_stock_summary.md",
"BD_Manual_dws_project_tags.md",
"BD_Manual_dws_assistant_order_contribution.md",
"BD_Manual_dws_member_spending_power_index.md",
]
# ── C. 迁移变更记录 → _archived/ ──
archive_files = [
"BD_Manual_20260301_cleanup_and_fixes.md",
"BD_Manual_biz_date_function_and_mv_rebuild.md",
"BD_Manual_fix_dim_staff_ex_rankname.md",
"BD_Manual_fix_dws_assistant_daily_table_area.md",
"BD_Manual_tenant_id_int_to_bigint.md",
]
moved = []
archived = []
skipped = []
def move_file(src_path: Path, dst_path: Path, label: str):
if not src_path.exists():
skipped.append(f"[跳过] {src_path.name} — 文件不存在")
return
dst_path.parent.mkdir(parents=True, exist_ok=True)
shutil.move(str(src_path), str(dst_path))
return True
# A: cross_layer
for f in cross_layer_files:
src = SRC / f
dst = CROSS_LAYER / f
if move_file(src, dst, "cross_layer"):
moved.append(f" {f} → cross_layer/")
# B: DWD
for f in dwd_files:
src = SRC / f
dst = ETL_DB / "DWD" / "changes" / f
if move_file(src, dst, "DWD/changes"):
moved.append(f" {f} → DWD/changes/")
for f in dws_files:
src = SRC / f
dst = ETL_DB / "DWS" / "changes" / f
if move_file(src, dst, "DWS/changes"):
moved.append(f" {f} → DWS/changes/")
# C: archive
for f in archive_files:
src = SRC / f
dst = ARCHIVED / f
if move_file(src, dst, "_archived"):
archived.append(f" {f}")
print("=== BD_Manual 文档整理完成 ===\n")
print(f"迁移到 ETL 模块 ({len(moved)} 个):")
for m in moved:
print(m)
print(f"\n归档到 _archived/ ({len(archived)} 个):")
for a in archived:
print(a)
if skipped:
print(f"\n跳过 ({len(skipped)} 个):")
for s in skipped:
print(s)
print(f"\n保留在 docs/database/ 的文件:")
remaining = [p.name for p in SRC.glob("BD_Manual_*.md") if p.is_file()]
for r in sorted(remaining):
print(f" {r}")

View File

@@ -1,6 +1,13 @@
"""
H5 原型页面批量截图脚本
iPhone 15 Pro Max: 430×932, DPR:3 → 输出 1290×N 像素 PNG
[已废弃 — 本 spec 不使用]
H5 原型页面批量截图脚本全页面截图430×932 视口)。
已被 anchor_compare.py 的逐段截图方案430×752 视口)取代。
保留原因:仍可用于快速获取全页面参考截图,但不作为像素对比的输入源。
替代工具scripts/ops/anchor_compare.py extract-h5 <page>
原始参数iPhone 15 Pro Max: 430×932, DPR:3 → 输出 1290×N 像素 PNG
"""
import asyncio
import sys

View File

@@ -0,0 +1,2 @@
> 主代理按本计划逐单元调度 4 种专职子代理(截图→审计→修正⇆验证)完成每屏还原。
> 所有操作在单元内闭环,不存在跨单元批量截图或批量对比阶段。

View File

@@ -0,0 +1,3 @@
> 主代理按本计划逐单元下发任务给子代理。
> 每个子代理接收一个单元后,独立完成截图→对比→审计→修正→验证的完整闭环。
> 不存在“批量截图”或“批量对比”阶段——所有操作在单元内闭环。

View File

@@ -0,0 +1,49 @@
# H5 → 微信小程序视觉还原 — 进度跟踪
> **主代理必读**:每次会话开始时先读本文件,确认当前状态后再下发任务。
> 每完成一个处理单元后立即更新本文件。
---
## 当前状态(会话开始时填写)
| 项目 | 内容 |
|---|---|
| **当前处理单元** | 未开始 |
| **下一个单元** | #1 board-finance/default/step-0 |
| **本次会话目标** | 未设定 |
| **MCP 状态** | 未检查 |
| **最后更新** | 2026-03-10 |
### MCP 就绪检查清单(每次会话开始时执行)
```
[ ] mcp_weixin_devtools_mcp_get_connection_status → 已连接
[ ] mcp_image_compare → 可用(测试 compare_images
[ ] Playwright MCP → 可用(测试 browser_run_code
[ ] 微信开发者工具已开启并显示目标页面
```
---
## 总览
| 指标 | 値 |
|------|-----|
| 总单元数 | 89 |
| 已完成 | 0 |
| 跳过 | 0 |
| 进行中 | 0 |
| 未开始 | 89 |
| 整体进度 | 0% |
---
## 前置任务
| # | 任务 | 状态 | 完成日期 | 备注 |
|---|------|------|----------|------|
| P0 | TS 零诊断基线检查 | ✅ 完成 | 2026-03-10 | 17 页面全部通过 |
| P1 | 跨页面共性偏差批量修复 | ✅ 完成 | 2026-03-10 | board-finance/coach/customer 三页 |
| P2 | 截图技术验证 | ✅ 完成 | 2026-03-10 | DPR=1.5 双端 645×1128 已验证 |
| P3 | AGENT-PLAYBOOK.md v4.2 更新 | ✅ 完成 | 2026-03-11 | 4种专职子代理、间距测量代理、裁剪修正 |

View File

@@ -0,0 +1,88 @@
## A 批次board-finance/default10 单元)
> H5 scrollHeight=5600maxScroll=484810 步
> 序列0, 600, 1200, 1800, 2400, 3000, 3600, 4200, 4800, 4848
| # | 单元 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|-----------|----------|-----------|------|------|
| 1 | default/step-0 | — | — | — | 未开始 | |
| 2 | default/step-600 | — | — | — | 未开始 | |
| 3 | default/step-1200 | — | — | — | 未开始 | |
| 4 | default/step-1800 | — | — | — | 未开始 | |
| 5 | default/step-2400 | — | — | — | 未开始 | |
| 6 | default/step-3000 | — | — | — | 未开始 | |
| 7 | default/step-3600 | — | — | — | 未开始 | |
| 8 | default/step-4200 | — | — | — | 未开始 | |
| 9 | default/step-4800 | — | — | — | 未开始 | |
| 10 | default/step-4848 | — | — | — | 未开始 | |
## A 批次board-finance/compare10 单元)
> 环比开启后页面高度可能变化scrollTop 序列需实测确认
| # | 单元 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|-----------|----------|-----------|------|------|
| 11 | compare/step-0 | — | — | — | 未开始 | |
| 12 | compare/step-600 | — | — | — | 未开始 | |
| 13 | compare/step-1200 | — | — | — | 未开始 | |
| 14 | compare/step-1800 | — | — | — | 未开始 | |
| 15 | compare/step-2400 | — | — | — | 未开始 | |
| 16 | compare/step-3000 | — | — | — | 未开始 | |
| 17 | compare/step-3600 | — | — | — | 未开始 | |
| 18 | compare/step-4200 | — | — | — | 未开始 | |
| 19 | compare/step-4800 | — | — | — | 未开始 | |
| 20 | compare/step-4827 | — | — | — | 未开始 | |
## A 批次board-coach4 单元单屏×4 维度)
| # | 单元 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|-----------|----------|-----------|------|------|
| 21 | perf/step-0 | — | — | — | 未开始 | |
| 22 | salary/step-0 | — | — | — | 未开始 | |
| 23 | sv/step-0 | — | — | — | 未开始 | |
| 24 | task/step-0 | — | — | — | 未开始 | |
## A 批次board-customer8 单元单屏×8 维度)
| # | 单元 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|-----------|----------|-----------|------|------|
| 25 | recall/step-0 | — | — | — | 未开始 | |
| 26 | potential/step-0 | — | — | — | 未开始 | |
| 27 | balance/step-0 | — | — | — | 未开始 | |
| 28 | recharge/step-0 | — | — | — | 未开始 | |
| 29 | recent/step-0 | — | — | — | 未开始 | |
| 30 | spend60/step-0 | — | — | — | 未开始 | |
| 31 | freq60/step-0 | — | — | — | 未开始 | |
| 32 | loyal/step-0 | — | — | — | 未开始 | |
## B 批次task-list + my-profile4 单元)
| # | 单元 | 页面 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|------|-----------|----------|-----------|------|------|
| 33 | step-0 | task-list | — | — | — | 未开始 | |
| 34 | step-600 | task-list | — | — | — | 未开始 | |
| 35 | step-676 | task-list | — | — | — | 未开始 | |
| 36 | step-0 | my-profile | — | — | — | 未开始 | |
## C 批次task-detail 系列17 单元)
| # | 单元 | 页面 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|------|-----------|----------|-----------|------|------|
| 37 | step-0 | task-detail | — | — | — | 未开始 | |
| 38 | step-600 | task-detail | — | — | — | 未开始 | |
| 39 | step-1200 | task-detail | — | — | — | 未开始 | |
| 40 | step-1800 | task-detail | — | — | — | 未开始 | |
| 41 | step-2243 | task-detail | — | — | — | 未开始 | |
| 42 | step-0 | task-detail-callback | — | — | — | 未开始 | |
| 43 | step-600 | task-detail-callback | — | — | — | 未开始 | |
| 44 | step-1200 | task-detail-callback | — | — | — | 未开始 | |
| 45 | step-1645 | task-detail-callback | — | — | — | 未开始 | |
| 46 | step-0 | task-detail-priority | — | — | — | 未开始 | |
| 47 | step-600 | task-detail-priority | — | — | — | 未开始 | |
| 48 | step-1200 | task-detail-priority | — | — | — | 未开始 | |
| 49 | step-1637 | task-detail-priority | — | — | — | 未开始 | |
| 50 | step-0 | task-detail-relationship | — | — | — | 未开始 | |
| 51 | step-600 | task-detail-relationship | — | — | — | 未开始 | |
| 52 | step-1200 | task-detail-relationship | — | — | — | 未开始 | |
| 53 | step-1523 | task-detail-relationship | — | — | — | 未开始 | |

View File

@@ -0,0 +1,98 @@
## D 批次详情页12 单元)
| # | 单元 | 页面 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|------|-----------|----------|-----------|------|------|
| 54 | step-0 | coach-detail | — | — | — | 未开始 | |
| 55 | step-600 | coach-detail | — | — | — | 未开始 | |
| 56 | step-1200 | coach-detail | — | — | — | 未开始 | |
| 57 | step-1800 | coach-detail | — | — | — | 未开始 | |
| 58 | step-2166 | coach-detail | — | — | — | 未开始 | |
| 59 | step-0 | customer-detail | — | — | — | 未开始 | |
| 60 | step-600 | customer-detail | — | — | — | 未开始 | |
| 61 | step-1200 | customer-detail | — | — | — | 未开始 | |
| 62 | step-1800 | customer-detail | — | — | — | 未开始 | |
| 63 | step-2318 | customer-detail | — | — | — | 未开始 | |
| 64 | step-0 | customer-service-records | — | — | — | 未开始 | |
| 65 | step-209 | customer-service-records | — | — | — | 未开始 | |
## E 批次绩效页面18 单元)
| # | 单元 | 页面 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|------|-----------|----------|-----------|------|------|
| 66 | step-0 | performance | — | — | — | 未开始 | |
| 67 | step-600 | performance | — | — | — | 未开始 | |
| 68 | step-1200 | performance | — | — | — | 未开始 | |
| 69 | step-1800 | performance | — | — | — | 未开始 | |
| 70 | step-2400 | performance | — | — | — | 未开始 | |
| 71 | step-3000 | performance | — | — | — | 未开始 | |
| 72 | step-3600 | performance | — | — | — | 未开始 | |
| 73 | step-4200 | performance | — | — | — | 未开始 | |
| 74 | step-4800 | performance | — | — | — | 未开始 | |
| 75 | step-5400 | performance | — | — | — | 未开始 | |
| 76 | step-6000 | performance | — | — | — | 未开始 | |
| 77 | step-6600 | performance | — | — | — | 未开始 | |
| 78 | step-6953 | performance | — | — | — | 未开始 | |
| 79 | step-0 | performance-records | — | — | — | 未开始 | |
| 80 | step-600 | performance-records | — | — | — | 未开始 | |
| 81 | step-1200 | performance-records | — | — | — | 未开始 | |
| 82 | step-1800 | performance-records | — | — | — | 未开始 | |
| 83 | step-1925 | performance-records | — | — | — | 未开始 | |
## F 批次对话页面3 单元)
| # | 单元 | 页面 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|------|-----------|----------|-----------|------|------|
| 84 | step-0 | chat | — | — | — | 未开始 | |
| 85 | step-309 | chat | — | — | — | 未开始 | |
| 86 | step-0 | chat-history | — | — | — | 未开始 | |
## G 批次其他3 单元)
| # | 单元 | 页面 | 初始差异率 | 修正轮次 | 最终差异率 | 状态 | 备注 |
|---|------|------|-----------|----------|-----------|------|------|
| 87 | step-0 | notes | — | — | — | 未开始 | |
| 88 | step-600 | notes | — | — | — | 未开始 | |
| 89 | step-957 | notes | — | — | — | 未开始 | |
---
## 主代理会话恢复流程
**每次新会话开始时,主代理必须执行以下步骤:**
```
步骤1读取本文件PROGRESS.md
→ 找到「当前状态」区块,确认「下一个单元」
→ 扫描单元表,找到第一个状态为「进行中」或「未开始」的行
步骤2MCP 就绪检查
→ mcp_weixin_devtools_mcp_get_connection_status
→ 若未连接等待10秒后重试最多3次
→ 3次失败后mcp_weixin_devtools_mcp_recompile 重新编译,再重试
步骤3更新「当前状态」区块
→ 填写「当前处理单元」和「本次会话目标」
步骤4从「下一个单元」开始下发任务
→ 严格按编号顺序,单元完成后立即更新本文件对应行
```
**单元状态说明:**
| 状态 | 含义 |
|------|------|
| 未开始 | 尚未处理 |
| 进行中 | 当前会话正在处理 |
| ✅ 通过 | 差异率 <5%,已收敛 |
| ⚠️ 跳过 | 5轮未收敛已记录差异继续下一单元 |
| 🔁 重写中 | 差异率 >20%,触发结构重写流程 |
---
## 变更日志
| 日期 | 变更 |
|------|------|
| 2026-03-11 | 新增会话恢复机制、MCP就绪检查、重写状态标记 |
| 2026-03-10 | 全部 89 单元初始化,前置任务 P0-P3 完成 |

View File

@@ -0,0 +1,51 @@
---
name: spacing-agent
description: >
间距测量专用子代理。用于 H5 原型与小程序页面之间精确间距/内边距/外边距的测量与 rpx 转换。
当审计代理发现偏差根因为间距不准确,或修正代理需要确认某元素精确 rpx 尺寸时调用。
---
# 间距测量子代理Spacing Agent
## 职责
精确测量 H5 页面任意元素的间距,输出 rpx 对比表,为修正代理提供可信赖的定量依据。
---
## 一、核心方法论
### 1.1 为什么不能只看 Tailwind 类名
Tailwind 类名提供理论尺寸,但实际渲染受嵌套、布局算法影响。必须用 getBoundingClientRect() 测量实际位置的场景:
| 场景 | 原因 |
|---|---|
| 嵌套层内边距 | 外层 padding + 内层 margin 叠加效果不直观 |
| 边框到边框间距 | 参考线不确定,只有 rect.top/bottom 可信 |
| flex/grid 元素 | gap 属性是容器的,不反映在子元素 rect 里 |
| 断行文本 | lineHeight computed 値可能是 normal需测 rect.height |
| 页面下方元素 | 需先 scrollTo 再测 |
### 1.2 换算公式
```
小程序 viewport 宽 = 750rpx = 430px
rpx = px × 1.7442(取偶数)
常用对照表:
2px → 4rpx 4px → 8rpx 6px → 10rpx
8px → 14rpx 10px → 18rpx 12px → 20rpx
14px → 24rpx 16px → 28rpx 20px → 34rpx
24px → 42rpx 28px → 50rpx 32px → 56rpx
```
### 1.3 五种间距类型与测量方法
| 间距类型 | 测量方法 | 小程序对应 |
|---|---|---|
| **内容容器 padding** | computedStyle.paddingTop/Bottom/Left/Right | padding: Xrpx |
| **相邻元素垂直间距** | B.rect.top - A.rect.bottom | margin-top 或父容器 gap |
| **边框内文字到边框** | text.rect.top - border.rect.top - paddingTop | 子元素 padding |
| **边框到边框(平行)** | B.rect.left - A.rect.right | gap 或 margin-left |
| **嵌套边框内边距** | inner.rect.top - outer.rect.top - borderTopWidth | 内层 margin/padding |

View File

@@ -0,0 +1,101 @@
## 二、测量工具measure_gaps.py
路径:`scripts/ops/measure_gaps.py`
### 2.1 基本用法
```bash
# 测量页面内所有 .task-card 元素的尺寸和间距
uv run python scripts/ops/measure_gaps.py task-list --selectors ".task-card"
# 测量多个选择器(按 DOM 顺序,计算相邻间距)
uv run python scripts/ops/measure_gaps.py board-finance --selectors ".summary-header" ".summary-content" ".grid-cols-3"
# 指定两个元素的直接间距
uv run python scripts/ops/measure_gaps.py task-list --pairs ".sticky-header" ".task-card:first-child"
# 页面中下方元素(需 scrollTop
uv run python scripts/ops/measure_gaps.py performance --selectors ".perf-section" --scroll 1200
```
### 2.2 输出解读
输出包含:
- 元素尺寸表top_px, h_px, paddingT/B, marginT/B, gap, fontSize, lineHeight, h_rpx
- 相邻元素垂直间距表gap_px 和 gap_rpx
- audit.md 可直接粘贴的 Markdown 表格
### 2.3 常用 CSS 选择器快查
| 元素类型 | 选择器示例 |
|---|---|
| 页面内边距容器 | `.px-4`, `.px-6`, `[class*="px-"]` |
| 卡片 | `.task-card`, `[class*="card"]` |
| 列表项间距 | `.list-item`, `li`, `[class*="item"]` |
| Sticky 头部 | `.sticky`, `.filter-bar`, `[class*="sticky"]` |
| Banner | `.banner-bg`, `[class*="banner"]` |
| 标签/徽章 | `.tag`, `.badge`, `[class*="tag"]` |
---
## 三、测量步骤(每页标准流程)
### 3.1 H5 侧测量
1. 确定要测量的元素(审计代理提供偏差元素列表)
2. 确定 CSS 选择器(参照 2.3 快查表)
3. 运行 measure_gaps.py记录输出的 px 和 rpx 尺寸
4. 如果元素在页面中下方,加上 `--scroll 目标scrollTop`
### 3.2 MP 側反向验证
1. 在 MP 页面 WXML 中定位对应元素
2. 使用 `SelectorQuery`
```js
const query = wx.createSelectorQuery();
query.select('.task-card').boundingClientRect(res => {
console.log('top:', res.top, 'height:', res.height);
}).exec();
```
3. 比较 MP 实测高度px与理论 rpx 下的预期实际宣染尺寸
4. 差异 > 4rpx 则标记为 P3 偏差,需要修正
### 3.3 图像反推验证(处理截图偏差时使用)
当 diff 图显示某个元素位置偏程时,用截图像素反推实际间距:
```
实际间距(px) = diff 图中偏程像素数 ÷ DPR
DPR = 1.5
示例diff 图中元素 A 比 H5 下小 9 像素
实际偏差 = 9 / 1.5 = 6px = 10rpx
应描小 WXSS 中对应 margin/padding 10rpx
```
---
## 四、输出格式audit.md 间距表
将 measure_gaps.py 输出的 Markdown 表格直接填入 audit.md 的 F 项:
```markdown
## G. 间距测量表
| 元素 | H5 top_px | H5 高度px | 预期 rpx | MP 实测 rpx | 差异 | 处理 |
|---|---|---|---|---|---|---|
| .task-card | 88.0 | 84.0 | 146rpx | 140rpx | -6rpx | 将 height 改为 146rpx |
| .task-card 间距 | - | - | 14rpx | 12rpx | -2rpx | margin-bottom 改为 14rpx |
| .sticky-header | 0.0 | 44.0 | 76rpx | 76rpx | 0 | 匹配 ✓ |
```
---
## 五、调用时机
| 场景 | 调用方 | 输出用途 |
|---|---|---|
| \u00a70.8 迁移前预计算 | 主代理在 step-0 审计前主动调用 | 填入 WXSS 的初始内边距/外边距/gap 尺寸 |
| 审计代理发现间距偏差 | 审计代理调用 | 补充 audit.md G 项 |
| 修正代理需要确认尺寸 | 修正代理调用 | 确认正确 rpx 尺寸再写入 WXSS |
| 差异率无法下降 | 修正代理调用 | 精确定位剩余偏差来源 |