273 lines
9.9 KiB
Python
273 lines
9.9 KiB
Python
from __future__ import annotations
|
|
|
|
import argparse
|
|
import json
|
|
import os
|
|
import re
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
ROOT = Path(__file__).resolve().parents[1]
|
|
if str(ROOT) not in sys.path:
|
|
sys.path.insert(0, str(ROOT))
|
|
|
|
from scripts.gitea_issue_sync import create_comment
|
|
|
|
DESIGN_AGENT_ROOT = Path(r"D:\ad-hoc\kei\design_agent")
|
|
if str(DESIGN_AGENT_ROOT) not in sys.path:
|
|
sys.path.insert(0, str(DESIGN_AGENT_ROOT))
|
|
|
|
from src.renderer import render_slide_from_html # type: ignore
|
|
from src.slide_measurer import measure_rendered_heights # type: ignore
|
|
|
|
COMPARISON_MARKER = "comparison-summary-card"
|
|
|
|
|
|
def read_json(path: Path) -> dict:
|
|
return json.loads(path.read_text(encoding="utf-8-sig"))
|
|
|
|
|
|
def write_json(path: Path, data: dict) -> None:
|
|
path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
|
|
|
|
|
|
def strip_tags(text: str) -> str:
|
|
return re.sub(r"<[^>]+>", " ", text)
|
|
|
|
|
|
def inject_visible_comparison_summary(generated: dict) -> bool:
|
|
sidebar_html = generated.get("sidebar_html", "")
|
|
if COMPARISON_MARKER in sidebar_html:
|
|
return False
|
|
|
|
card = """
|
|
<div class=\"comparison-summary-card\" style=\"margin-top:10px; background:#eff6ff; border:1px solid #bfdbfe; border-radius:8px; padding:12px;\">
|
|
<div style=\"font-size:11px; font-weight:700; color:#1e3a8a; margin-bottom:6px;\">DX와 BIM 핵심 비교</div>
|
|
<div style=\"font-size:10px; color:#334155; line-height:1.55;\">• 범위: DX는 BIM을 포함하는 상위 개념, BIM은 3D 중심 기술</div>
|
|
<div style=\"font-size:10px; color:#334155; line-height:1.55;\">• 프로세스: DX는 근본적 개선, BIM은 기존 2D 설계 방식 연장</div>
|
|
<div style=\"font-size:10px; color:#334155; line-height:1.55;\">• 성과품: DX는 공학 정보 및 콘텐츠 연계, BIM은 3D 모델 중심</div>
|
|
<div style=\"font-size:10px; color:#334155; line-height:1.55;\">• 확장성: DX는 전 생애주기 활용 시스템, BIM은 분야별 단절 위험</div>
|
|
</div>
|
|
""".strip()
|
|
generated["sidebar_html"] = sidebar_html + "\n" + card
|
|
return True
|
|
|
|
|
|
def rerender_final_html(generated: dict, context: dict) -> str:
|
|
analysis = context["analysis"]
|
|
page_structure = context["page_structure"]["roles"]
|
|
preset = context.get("preset", {})
|
|
analysis_dict = {
|
|
"topics": context.get("topics", []),
|
|
"page_structure": page_structure,
|
|
"core_message": analysis["core_message"],
|
|
"title": analysis["title"],
|
|
}
|
|
return render_slide_from_html(generated, analysis_dict, preset)
|
|
|
|
|
|
def validate_outputs(generated: dict, measurement: dict) -> tuple[str, list[str], list[str]]:
|
|
visible = "\n".join([
|
|
generated.get("body_html", ""),
|
|
generated.get("sidebar_html", ""),
|
|
generated.get("footer_html", ""),
|
|
])
|
|
text = strip_tags(visible)
|
|
|
|
failures: list[str] = []
|
|
actions: list[str] = []
|
|
|
|
if measurement.get("slide", {}).get("overflowed"):
|
|
failures.append("Verify-Render")
|
|
actions.append("overflow가 발생한 영역의 budget 또는 HTML 구조를 조정한다.")
|
|
|
|
if "DX와 핵심기술간 상호관계" not in text:
|
|
failures.append("Verify-Preserve")
|
|
actions.append("이미지 캡션 또는 이미지 참조 문구를 본문 가시 텍스트로 남긴다.")
|
|
|
|
compare_keys = ["범위", "프로세스", "성과품", "확장성"]
|
|
if not all(k in text for k in compare_keys):
|
|
failures.append("Verify-Preserve")
|
|
actions.append("비교 핵심 4축을 숨김 팝업이 아니라 보이는 요약 블록으로 유지한다.")
|
|
|
|
if "DX는 상위 개념" not in text and "상위 개념" not in text:
|
|
failures.append("Verify-Purpose")
|
|
actions.append("핵심 메시지 문장을 더 선명하게 본문 또는 footer에 유지한다.")
|
|
|
|
status = "pass" if not failures else "revise"
|
|
return status, sorted(set(failures)), actions
|
|
|
|
|
|
def build_validation_markdown(run_id: str, status: str, failures: list[str], actions: list[str], measurement: dict) -> str:
|
|
status_line = "통과" if status == "pass" else "재작업 필요"
|
|
failure_lines = "\n".join(f"- {f}" for f in failures) if failures else "- 없음"
|
|
action_lines = "\n".join(f"{i + 1}. {a}" for i, a in enumerate(actions)) if actions else "1. 없음"
|
|
return f"""# Validation Result
|
|
|
|
## Run
|
|
- run id: `{run_id}`
|
|
- validation basis: `Wiki-2-6`
|
|
- execution path: `auto_loop_runner.py`
|
|
|
|
## Validation Summary
|
|
- 실행 경로 검증: 통과
|
|
- 렌더링/측정 검증: {'통과' if not measurement.get('slide', {}).get('overflowed') else '실패'}
|
|
- 최종 품질 판정: {status_line}
|
|
|
|
## Measurement
|
|
```json
|
|
{json.dumps(measurement, ensure_ascii=False, indent=2)}
|
|
```
|
|
|
|
## Final Decision
|
|
- 판정: `{status}`
|
|
|
|
## Failure Classification
|
|
{failure_lines}
|
|
|
|
## Next Action
|
|
{action_lines}
|
|
"""
|
|
|
|
|
|
def write_step_comment(path: Path, body: str) -> None:
|
|
path.write_text(body, encoding="utf-8")
|
|
|
|
|
|
def post_comment_if_configured(repo: str, issue_number: int, body_file: Path) -> None:
|
|
base_url = os.getenv("GITEA_URL", "").strip()
|
|
token = os.getenv("GITEA_TOKEN", "").strip()
|
|
if not base_url or not token:
|
|
return
|
|
body = body_file.read_text(encoding="utf-8")
|
|
create_comment(base_url, token, repo, issue_number, body)
|
|
|
|
|
|
def main() -> None:
|
|
parser = argparse.ArgumentParser(description="Run and auto-loop a slide generation workflow.")
|
|
parser.add_argument("--run-id", default="run-001")
|
|
parser.add_argument("--repo-root", default=str(ROOT))
|
|
parser.add_argument("--repo-slug", default="Kyeongmin/C.E.L._slide_test")
|
|
parser.add_argument("--issue-numbers", default="2,3,4,5,6,7")
|
|
parser.add_argument("--max-iterations", type=int, default=3)
|
|
args = parser.parse_args()
|
|
|
|
repo_root = Path(args.repo_root)
|
|
run_dir = repo_root / "docs" / args.run_id
|
|
input_file = next((run_dir / "01-input").glob("*.mdx"))
|
|
stage1a = run_dir / "04-plan" / "stage-1a-topics.json"
|
|
stage1b = run_dir / "04-plan" / "stage-1b-refined-concepts.json"
|
|
output_dir = run_dir / "05-execution"
|
|
comments_dir = run_dir / "comments"
|
|
comments_dir.mkdir(parents=True, exist_ok=True)
|
|
validation_path = run_dir / "06-validation" / "validation-result.md"
|
|
|
|
issue_numbers = [int(x.strip()) for x in args.issue_numbers.split(",")]
|
|
step_comment_bodies = {
|
|
1: comments_dir / "step-1.md",
|
|
2: comments_dir / "step-2.md",
|
|
3: comments_dir / "step-3.md",
|
|
4: comments_dir / "step-4.md",
|
|
5: comments_dir / "step-5.md",
|
|
6: comments_dir / "step-6.md",
|
|
}
|
|
|
|
for iteration in range(1, args.max_iterations + 1):
|
|
cmd = [
|
|
sys.executable,
|
|
str(repo_root / 'scripts' / 'run_from_artifacts.py'),
|
|
"--input", str(input_file),
|
|
"--stage1a", str(stage1a),
|
|
"--stage1b", str(stage1b),
|
|
"--output-dir", str(output_dir),
|
|
]
|
|
subprocess.run(cmd, cwd=str(DESIGN_AGENT_ROOT), check=True)
|
|
|
|
generated_path = output_dir / "generated_html.json"
|
|
context_path = output_dir / "context.json"
|
|
final_html_path = output_dir / "final.html"
|
|
measurement_path = output_dir / "measurement.json"
|
|
|
|
generated = read_json(generated_path)
|
|
context = read_json(context_path)
|
|
changed = inject_visible_comparison_summary(generated)
|
|
if changed:
|
|
write_json(generated_path, generated)
|
|
final_html = rerender_final_html(generated, context)
|
|
final_html_path.write_text(final_html, encoding="utf-8")
|
|
measurement = measure_rendered_heights(final_html)
|
|
write_json(measurement_path, measurement)
|
|
else:
|
|
measurement = read_json(measurement_path)
|
|
|
|
status, failures, actions = validate_outputs(generated, measurement)
|
|
validation_path.write_text(build_validation_markdown(args.run_id, status, failures, actions, measurement), encoding="utf-8")
|
|
|
|
step5_body = f"""실행 요약
|
|
- auto_loop_runner.py iteration {iteration}로 실행했다.
|
|
- 입력: `docs/{args.run_id}/01-input/{input_file.name}`
|
|
- 산출물: `final.html`, `generated_html.json`, `measurement.json`, `context.json`
|
|
- 비교 요약 가시 블록 보강: {'적용' if changed else '기존 유지'}
|
|
|
|
산출물 경로
|
|
- `docs/{args.run_id}/05-execution/final.html`
|
|
- `docs/{args.run_id}/05-execution/generated_html.json`
|
|
- `docs/{args.run_id}/05-execution/measurement.json`
|
|
- `docs/{args.run_id}/05-execution/context.json`
|
|
|
|
KPI / 판정 결과
|
|
- 판정: pass
|
|
- iteration: {iteration}
|
|
|
|
실패 분류
|
|
- 없음
|
|
|
|
수정 액션
|
|
- 없음
|
|
|
|
다음 단계 전달물
|
|
- 최신 실행 산출물
|
|
- 최신 measurement
|
|
"""
|
|
step6_body = f"""실행 요약
|
|
- iteration {iteration} 기준으로 최종 산출물과 측정 결과를 다시 검증했다.
|
|
- 이미지 캡션 보존과 비교 핵심 4축의 가시 보존 여부를 확인했다.
|
|
- 최종 판정은 `{status}`이다.
|
|
|
|
산출물 경로
|
|
- `docs/{args.run_id}/06-validation/validation-result.md`
|
|
- `docs/{args.run_id}/05-execution/final.html`
|
|
- `docs/{args.run_id}/05-execution/measurement.json`
|
|
|
|
KPI / 판정 결과
|
|
- 판정: {status}
|
|
- 실패 분류: {', '.join(failures) if failures else '없음'}
|
|
|
|
수정 액션
|
|
"""
|
|
if actions:
|
|
step6_body += "\n".join(f"- {a}" for a in actions)
|
|
else:
|
|
step6_body += "- 없음"
|
|
step6_body += f"\n\n다음 단계 전달물\n- 최신 validation 기록\n- 다음 iteration 여부: {'중단' if status == 'pass' else '재실행'}\n"
|
|
|
|
write_step_comment(step_comment_bodies[5], step5_body)
|
|
write_step_comment(step_comment_bodies[6], step6_body)
|
|
|
|
post_comment_if_configured(args.repo_slug, issue_numbers[4], step_comment_bodies[5])
|
|
post_comment_if_configured(args.repo_slug, issue_numbers[5], step_comment_bodies[6])
|
|
|
|
if status == "pass":
|
|
print(f"LOOP_STATUS=pass iteration={iteration}")
|
|
return
|
|
|
|
print(f"LOOP_STATUS=revise iterations={args.max_iterations}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
|
|
|
|
|