Files
C.E.L._slide_test/scripts/auto_loop_runner.py

671 lines
28 KiB
Python

from __future__ import annotations
import argparse
import json
import os
import re
import subprocess
import sys
from pathlib import Path
from typing import Any
ROOT = Path(__file__).resolve().parents[1]
if str(ROOT) not in sys.path:
sys.path.insert(0, str(ROOT))
from scripts.gitea_issue_sync import create_comment
from scripts.raw_bootstrap import rebuild_run_from_raw
DESIGN_AGENT_ROOT = Path(r"D:\ad-hoc\kei\design_agent")
if str(DESIGN_AGENT_ROOT) not in sys.path:
sys.path.insert(0, str(DESIGN_AGENT_ROOT))
from src.renderer import render_slide_from_html # type: ignore
from src.slide_measurer import measure_rendered_heights, capture_slide_screenshot, settings # type: ignore
from selenium import webdriver # type: ignore
from selenium.webdriver.chrome.options import Options # type: ignore
from selenium.webdriver.common.by import By # type: ignore
COMPARISON_MARKER = "comparison-summary-card"
RELATION_MARKER = "relation-diagram-card"
COMPARE_KEYS = ["범위", "프로세스", "성과품", "확장성"]
CORE_MESSAGE_KEYS = ["DX는 상위 개념", "BIM은 핵심 기술"]
CORE_MESSAGE_MARKERS = [("상위 개념", "상위개념"), ("핵심 기술", "핵심 인프라 기술")]
IMAGE_REFERENCE_KEY = "DX와 핵심기술간 상호관계"
def read_json(path: Path) -> dict:
return json.loads(path.read_text(encoding="utf-8-sig"))
def write_json(path: Path, data: dict) -> None:
path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
def strip_tags(text: str) -> str:
return re.sub(r"<[^>]+>", " ", text)
def zone_overflow_names(measurement: dict) -> list[str]:
zones = measurement.get("zones", {})
names: list[str] = []
for name in ("body", "sidebar", "footer"):
if zones.get(name, {}).get("overflowed"):
names.append(name)
return names
def validate_outputs(
generated: dict,
measurement: dict,
required_titles: list[str],
run_mode: str,
layout_template: str = "",
) -> tuple[str, list[str], list[str]]:
body_html = generated.get("body_html", "")
sidebar_html = generated.get("sidebar_html", "")
footer_html = generated.get("footer_html", "")
visible_text = strip_tags("\n".join([body_html, sidebar_html, footer_html]))
failures: list[str] = []
actions: list[str] = []
slide_info = measurement.get("slide", {})
zones_info = measurement.get("zones", {})
slide_overflow = slide_info.get("overflowed")
zone_overflows = zone_overflow_names(measurement)
measurement_missing = not slide_info or not zones_info
if measurement_missing:
failures.append("Verify-Measurement")
actions.append("?? ??? ?? ?? stage 3/4 ???? ?? ???? ??.")
if slide_overflow:
failures.append("Verify-RenderSlide")
actions.append("slide ?? overflow? ????? layout budget ?? ?? ???? ??? ????.")
if zone_overflows:
failures.append("Verify-RenderZone")
actions.append(f"overflow? ??? zone({', '.join(zone_overflows)})? content budget, block ?, typography? ?????.")
if '???' in visible_text or '?? ??' in visible_text:
failures.append("Verify-Placeholder")
actions.append("placeholder? ?? ??? ????, ?? ??/???? ?? ???.")
matched_titles = sum(1 for title in required_titles if title and title in visible_text)
visible_len = len(re.sub(r'\s+', ' ', visible_text).strip())
if matched_titles < max(2, min(len(required_titles), 3)):
failures.append("Verify-SectionTitles")
actions.append("?? ?? ??? ?? ???? ? ????? ????.")
if run_mode == 'run001':
core_message_ok = all(any(marker in visible_text for marker in variants) for variants in CORE_MESSAGE_MARKERS)
if not core_message_ok:
failures.append("Verify-CoreMessage")
actions.append("?? ??? ???? `?? ??`? `?? ??/?? ??? ??` ??? ?? ???? ??? ????? ????.")
if IMAGE_REFERENCE_KEY not in visible_text:
failures.append("Verify-ImageRef")
actions.append("???/?? ?? ?? `DX? ????? ????`? ?? ??? ?? ?? ???? ????.")
comparison_visible = (COMPARISON_MARKER in body_html) and all(key in visible_text for key in COMPARE_KEYS)
if not comparison_visible:
failures.append("Verify-ComparisonVisible")
actions.append("?? ?? 4?(??, ????, ???, ???)? ??? ?? ??? ?? ???? ????.")
if RELATION_MARKER not in body_html:
failures.append("Verify-DesignStructure")
actions.append("?? ??? ???? ??? ??? ??? ?? ?? ??? ????.")
else:
if visible_len < 420:
failures.append("Verify-ContentDensity")
actions.append("??? ?? ??? ?? ?? ???? ?? ?? ??? ????.")
if not body_html:
failures.append("Verify-DesignStructure")
actions.append("body ??? ?? ??? ?? ?? ?? ??? ???.")
if matched_titles < max(3, len([title for title in required_titles if title]) - 1):
failures.append("Verify-SectionTitles")
actions.append("?? ?? ??? ? ?? ?? ?? ???? ????.")
if layout_template == "B_GOAL":
for marker, reason in [
("Goal details", "?? ?? ?? ?? ?? ???? ????."),
("Process details", "?? ?? ?? ?? ???? ????."),
("Stakeholder details", "??? ???? ?? ?? ???? ????."),
]:
if marker not in body_html:
failures.append("Verify-DesignStructure")
actions.append(reason)
if body_html.count("<li") < 10:
failures.append("Verify-ContentDensity")
actions.append("??/??/?? ??? ?? bullet ?? ?? ?? ??? ???.")
elif layout_template == "B_RPP":
for title in required_titles[:3]:
if title and title not in body_html:
failures.append("Verify-SectionTitles")
actions.append("??/??/??? ?? ?? ??? ??? ?? ????.")
if body_html.count("<li") < 14:
failures.append("Verify-ContentDensity")
actions.append("??/??/?? ??? ?? bullet ?? ??? ????? ??? ???.")
if failures:
return "revise", sorted(set(failures)), list(dict.fromkeys(actions))
return "pass", [], []
def build_validation_markdown(run_id: str, status: str, failures: list[str], actions: list[str], measurement: dict, retry_plan: dict | None = None) -> str:
slide_overflow = measurement.get("slide", {}).get("overflowed", True)
zones = measurement.get("zones", {})
zone_lines = []
for name in ("body", "sidebar", "footer"):
zone = zones.get(name, {})
zone_lines.append(
f"- {name}: overflowed={zone.get('overflowed')} excess_px={zone.get('excess_px')} block_count={zone.get('block_count')}"
)
render_ok = (not slide_overflow) and (not zone_overflow_names(measurement))
status_line = "통과" if status == "pass" else "재작업 필요"
failure_lines = "\n".join(f"- {f}" for f in failures) if failures else "- 없음"
action_lines = "\n".join(f"{i + 1}. {a}" for i, a in enumerate(actions)) if actions else "1. 없음"
retry_section = ""
if retry_plan:
retry_section = f"""
## Retry Plan
```json
{json.dumps(retry_plan, ensure_ascii=False, indent=2)}
```
"""
return f"""# Validation Result
## Run
- run id: `{run_id}`
- validation basis: `Wiki-2-6`
- execution path: `auto_loop_runner.py`
## Validation Summary
- 실행 경로 검증: 통과
- 렌더링/측정 검증: {'통과' if render_ok else '실패'}
- 최종 품질 판정: {status_line}
## Render Gates
- slide overflow: {slide_overflow}
{chr(10).join(zone_lines)}
## Measurement
```json
{json.dumps(measurement, ensure_ascii=False, indent=2)}
```
{retry_section}
## Final Decision
- 판정: `{status}`
## Failure Classification
{failure_lines}
## Next Action
{action_lines}
"""
def write_step_comment(path: Path, body: str) -> None:
path.write_text(body, encoding="utf-8")
def read_text_if_exists(path: Path) -> str:
return path.read_text(encoding="utf-8-sig") if path.exists() else ""
def summarize_markdown_lines(text: str, limit: int = 8) -> list[str]:
lines: list[str] = []
for raw in text.splitlines():
line = raw.strip()
if not line or line.startswith('#'):
continue
line = re.sub(r'^[-*]\s*', '', line)
line = re.sub(r'^\d+\.\s*', '', line)
if not line:
continue
if len(line) > 120:
line = compact_text(line, max(96, int(len(line) * 0.8)))
lines.append(line)
if len(lines) >= limit:
break
return lines
def build_step_comment(title: str, artifact_path: Path, lines: list[str], verdict: str = "pass") -> str:
bullet_text = "\n".join(f"- {line}" for line in lines) if lines else "- ??? ??? ????."
return f"""?? ??
- {title} ?? ???? ?? ???? ?? run ???? ????.
- ?? ??: `{artifact_path.as_posix()}`
?? ??
{bullet_text}
KPI / ?? ??
- ??: {verdict}
?? ?? ???
- `{artifact_path.as_posix()}`
"""
def post_comment_if_configured(repo: str, issue_number: int, body_file: Path) -> None:
base_url = os.getenv("GITEA_URL", "").strip()
token = os.getenv("GITEA_TOKEN", "").strip()
if not base_url or not token:
return
body = body_file.read_text(encoding="utf-8")
create_comment(base_url, token, repo, issue_number, body)
def refresh_final_screenshot(final_html_path: Path, output_dir: Path) -> None:
if not final_html_path.exists():
return
driver = None
try:
options = Options()
options.add_argument("--headless=new")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--force-device-scale-factor=1")
options.add_argument(f"--window-size={settings.slide_width},{settings.slide_height + 200}")
driver = webdriver.Chrome(options=options)
driver.get(final_html_path.resolve().as_uri())
slide = driver.find_element(By.CSS_SELECTOR, ".slide")
screenshot_bytes = slide.screenshot_as_png
(output_dir / "final-screenshot-current.png").write_bytes(screenshot_bytes)
(output_dir / "final-screenshot.png").write_bytes(screenshot_bytes)
except Exception:
html = final_html_path.read_text(encoding="utf-8")
screenshot_b64 = capture_slide_screenshot(html)
if not screenshot_b64:
return
import base64
screenshot_bytes = base64.b64decode(screenshot_b64)
(output_dir / "final-screenshot-current.png").write_bytes(screenshot_bytes)
(output_dir / "final-screenshot.png").write_bytes(screenshot_bytes)
finally:
if driver:
try:
driver.quit()
except Exception:
pass
def compact_text(text: str, max_len: int) -> str:
normalized = re.sub(r"\s+", " ", text).strip()
if len(normalized) <= max_len:
return normalized
cut = normalized[:max_len].rsplit(" ", 1)[0].strip()
return (cut or normalized[:max_len]).rstrip(" ,.;:") + "..."
def preserve_80_percent(text: str, floor: int = 80, ceiling: int = 180) -> int:
normalized = re.sub(r"\s+", " ", text).strip()
if not normalized:
return floor
return max(floor, min(ceiling, int(len(normalized) * 0.8)))
def ensure_phrase(base: str, phrase: str) -> str:
if phrase in base:
return base
return f"{base} {phrase}".strip()
def load_stage_artifacts(output_dir: Path) -> dict[str, Any]:
artifacts: dict[str, Any] = {}
for name in [
"stage_1b_context.json",
"stage_1_5b_context.json",
"stage_2_context.json",
"stage_2_verification.json",
]:
path = output_dir / name
if path.exists():
artifacts[name] = read_json(path)
return artifacts
def derive_retry_plan(failures: list[str], artifacts: dict[str, Any], stage1b_data: dict[str, Any], run_mode: str) -> dict[str, Any]:
stage_1_5b = artifacts.get("stage_1_5b_context.json", {})
stage_2v = artifacts.get("stage_2_verification.json", {})
rollback_stage = "stage_2"
reasons: list[str] = []
mutations: list[dict[str, Any]] = []
concepts = stage1b_data.get("concepts", [])
topic_ids = [c.get("topic_id") for c in concepts if c.get("topic_id")]
if run_mode == 'run001':
if any(f in failures for f in ["Verify-CoreMessage", "Verify-ImageRef", "Verify-ComparisonVisible", "Verify-DesignStructure", "Verify-SectionTitles", "Verify-Placeholder"]):
rollback_stage = "stage_1b"
reasons.append("가시 메시지/관계도/비교 요약이 부족하여 topic 표현 지시를 다시 강화해야 함")
mutations.extend([
{"topic_id": 2, "change": "summary", "strategy": "core_message_strengthen"},
{"topic_id": 3, "change": "expression_hint", "strategy": "force_relation_diagram_visible"},
{"topic_id": 5, "change": "expression_hint", "strategy": "force_visible_comparison_summary"},
{"topic_id": 6, "change": "summary", "strategy": "strong_footer_conclusion"},
])
else:
if any(f in failures for f in ["Verify-Placeholder", "Verify-SectionTitles", "Verify-ContentDensity", "Verify-DesignStructure"]):
rollback_stage = "stage_1b"
reasons.append("원문 섹션 제목과 내용 밀도를 더 직접적으로 살리도록 generic topic 표현을 강화해야 함")
if len(topic_ids) >= 1:
mutations.append({"topic_id": topic_ids[0], "change": "summary", "strategy": "strengthen_intro_from_source"})
if len(topic_ids) >= 2:
mutations.append({"topic_id": topic_ids[1], "change": "summary", "strategy": "strengthen_main_from_source"})
if len(topic_ids) >= 3:
mutations.append({"topic_id": topic_ids[2], "change": "summary", "strategy": "strengthen_support_from_source"})
if len(topic_ids) >= 4:
mutations.append({"topic_id": topic_ids[-1], "change": "summary", "strategy": "strong_footer_conclusion_generic"})
if any(f in failures for f in ["Verify-RenderZone", "Verify-RenderSlide"]):
if rollback_stage != "stage_1b":
rollback_stage = "stage_1_5b"
reasons.append("overflow가 발생하여 budget/문장 길이/보조 정보 밀도를 재조정해야 함")
for role, container in stage_1_5b.get("containers", {}).items():
db = container.get("design_budget", {})
if db and not db.get("fits", True):
mutations.append({"role": role, "change": "budget", "strategy": "compress_visible_copy"})
for area, result in stage_2v.items():
if not result.get("passed"):
mutations.append({"area": area, "change": "verification", "strategy": "reduce_density_and_split_visibility"})
return {
"rollback_stage": rollback_stage,
"failures": failures,
"reasons": reasons,
"mutations": mutations,
}
def apply_retry_plan_to_stage1b(stage1b_path: Path, retry_plan: dict[str, Any], iteration: int) -> dict[str, Any]:
data = read_json(stage1b_path)
backup_dir = stage1b_path.parent / "history"
backup_dir.mkdir(parents=True, exist_ok=True)
backup_path = backup_dir / f"stage-1b-refined-concepts.iteration-{iteration}.json"
write_json(backup_path, data)
concept_map = {item["topic_id"]: item for item in data.get("concepts", [])}
for mutation in retry_plan.get("mutations", []):
topic_id = mutation.get("topic_id")
strategy = mutation.get("strategy")
if topic_id not in concept_map:
continue
concept = concept_map[topic_id]
summary = concept.get("summary", "")
hint = concept.get("expression_hint", "")
if strategy == "core_message_strengthen":
concept["summary"] = compact_text(
ensure_phrase(summary, "DX는 상위 개념이고 BIM은 핵심 기술이다."),
80,
)
concept["expression_hint"] = ensure_phrase(hint, "본문 첫 블록에서 DX는 상위 개념, BIM은 핵심 기술이라는 문구를 그대로 가시 텍스트로 노출한다.")
elif strategy == "force_relation_diagram_visible":
concept["expression_hint"] = ensure_phrase(hint, "관계도는 팝업이나 숨김영역이 아니라 본문 중앙의 가시 다이어그램으로 렌더링한다.")
concept["summary"] = compact_text(ensure_phrase(summary, "DX와 GIS, BIM, Digital Twin의 관계를 시각적으로 드러낸다."), 90)
elif strategy == "force_visible_comparison_summary":
concept["expression_hint"] = ensure_phrase(hint, "범위, 프로세스, 성과품, 확장성 4개 비교축을 sidebar의 가시 요약 카드로 직접 노출한다.")
concept["summary"] = compact_text(
"범위·프로세스·성과품·확장성의 4개 비교축으로 DX와 BIM 차이를 짧고 직접적으로 보여준다.",
90,
)
elif strategy == "strong_footer_conclusion":
concept["summary"] = "결론: BIM은 건설산업 DX를 수행하는 과정의 가장 기초가 되는 일부분이다."
concept["expression_hint"] = ensure_phrase(hint, "footer 또는 결론 배너에서 문장을 축약하지 말고 그대로 강하게 노출한다.")
elif strategy == "compress_visible_copy":
concept["summary"] = compact_text(summary, preserve_80_percent(summary, floor=80, ceiling=180))
concept["expression_hint"] = ensure_phrase(hint, "문장 수를 줄이고 핵심 명사구 위주로 압축하되, 핵심 메시지는 유지한다.")
elif strategy == "reduce_density_and_split_visibility":
concept["summary"] = compact_text(summary, preserve_80_percent(summary, floor=90, ceiling=200))
concept["expression_hint"] = ensure_phrase(hint, "표현 밀도를 낮추고, 장문 설명 대신 짧은 bullet/card 구조로 나눈다.")
elif strategy == "strengthen_intro_from_source":
concept["expression_hint"] = ensure_phrase(hint, "첫 섹션 제목과 핵심 bullet을 그대로 가시 블록으로 유지한다.")
elif strategy == "strengthen_main_from_source":
concept["expression_hint"] = ensure_phrase(hint, "둘째 섹션의 원문 bullet과 소제목을 직접적으로 유지한다.")
elif strategy == "strengthen_support_from_source":
concept["expression_hint"] = ensure_phrase(hint, "보조 섹션도 placeholder 없이 원문 bullet 중심으로 노출한다.")
elif strategy == "strong_footer_conclusion_generic":
concept["expression_hint"] = ensure_phrase(hint, "핵심 요약 문장을 footer에서 축약하지 말고 직접 노출한다.")
write_json(stage1b_path, data)
retry_plan_path = stage1b_path.parent / "retry-plan.json"
write_json(retry_plan_path, retry_plan)
return data
def main() -> None:
parser = argparse.ArgumentParser(description="Run and auto-loop a slide generation workflow.")
parser.add_argument("--run-id", default="run-001")
parser.add_argument("--repo-root", default=str(ROOT))
parser.add_argument("--repo-slug", default="Kyeongmin/C.E.L._slide_test")
parser.add_argument("--issue-numbers", default="2,3,4,5,6,7")
parser.add_argument("--max-iterations", type=int, default=3)
args = parser.parse_args()
repo_root = Path(args.repo_root)
run_dir = repo_root / "docs" / args.run_id
input_file = next((run_dir / "01-input").glob("*.mdx"))
stage1a = run_dir / "04-plan" / "stage-1a-topics.json"
stage1b = run_dir / "04-plan" / "stage-1b-refined-concepts.json"
output_dir = run_dir / "05-execution"
comments_dir = run_dir / "comments"
comments_dir.mkdir(parents=True, exist_ok=True)
validation_path = run_dir / "06-validation" / "validation-result.md"
bootstrap_info = rebuild_run_from_raw(repo_root, run_dir, input_file)
issue_numbers = [int(x.strip()) for x in args.issue_numbers.split(",")]
step_comment_bodies = {
1: comments_dir / "step-1.md",
2: comments_dir / "step-2.md",
3: comments_dir / "step-3.md",
4: comments_dir / "step-4.md",
5: comments_dir / "step-5.md",
6: comments_dir / "step-6.md",
}
step_artifacts = {
1: run_dir / "01-input" / "input-review.md",
2: run_dir / "02-kei-interpretation" / "kei-interpretation.md",
3: run_dir / "03-structure" / "content-structure.md",
4: run_dir / "04-plan" / "execution-plan.md",
}
step_titles = {
1: "Step 1 ?? ??",
2: "Step 2 ?? ??",
3: "Step 3 ??? ???",
4: "Step 4 ?? ??",
}
for step_no in (1, 2, 3, 4):
artifact_path = step_artifacts[step_no]
artifact_text = read_text_if_exists(artifact_path)
lines = summarize_markdown_lines(artifact_text, limit=8)
body = build_step_comment(step_titles[step_no], artifact_path.relative_to(repo_root), lines, verdict="pass")
write_step_comment(step_comment_bodies[step_no], body)
if step_no - 1 < len(issue_numbers):
post_comment_if_configured(args.repo_slug, issue_numbers[step_no - 1], step_comment_bodies[step_no])
for iteration in range(1, args.max_iterations + 1):
cmd = [
sys.executable,
str(repo_root / "scripts" / "run_from_artifacts.py"),
"--input", str(input_file),
"--stage1a", str(stage1a),
"--stage1b", str(stage1b),
"--base-path", str(repo_root),
"--output-dir", str(output_dir),
]
completed = subprocess.run(cmd, cwd=str(DESIGN_AGENT_ROOT), capture_output=True, text=True)
generated_path = output_dir / "generated_html.json"
context_path = output_dir / "context.json"
final_html_path = output_dir / "final.html"
measurement_path = output_dir / "measurement.json"
if completed.returncode != 0:
actions = ["실패한 stage와 stderr를 확인하고 해당 stage부터 재실행한다."]
validation_path.write_text(
build_validation_markdown(args.run_id, "revise", ["Exec-Exit"], actions, {"slide": {"overflowed": True}, "zones": {}}),
encoding="utf-8",
)
step5_body = f"""실행 요약
- auto_loop_runner.py iteration {iteration} 실행이 비정상 종료되었다.
- 입력: `docs/{args.run_id}/01-input/{input_file.name}`
- 종료 코드: {completed.returncode}
실행 stderr
```text
{completed.stderr.strip()}
```
산출물 경로
- `docs/{args.run_id}/05-execution/`
KPI / 판정 결과
- 판정: fail
- iteration: {iteration}
- 종료 코드: {completed.returncode}
실패 분류
- Exec-Exit
수정 액션
- 실패한 stage와 stderr를 확인하고 해당 stage부터 재실행
다음 단계 전달물
- 실행 stderr
- 실패 시점 정보
"""
step6_body = f"""실행 요약
- iteration {iteration}에서 실행 자체가 실패하여 품질 검증을 완료할 수 없었다.
산출물 경로
- `docs/{args.run_id}/06-validation/validation-result.md`
KPI / 판정 결과
- 판정: fail
- 실패 분류: Exec-Exit
수정 액션
- 실패한 stage와 stderr를 확인하고 해당 stage부터 재실행
다음 단계 전달물
- 실행 stderr
- 실패 시점 정보
- 다음 iteration 여부: 재실행
"""
write_step_comment(step_comment_bodies[5], step5_body)
write_step_comment(step_comment_bodies[6], step6_body)
post_comment_if_configured(args.repo_slug, issue_numbers[4], step_comment_bodies[5])
post_comment_if_configured(args.repo_slug, issue_numbers[5], step_comment_bodies[6])
continue
screenshot_path = output_dir / "final-screenshot-current.png"
if (not screenshot_path.exists()) or (screenshot_path.stat().st_mtime < final_html_path.stat().st_mtime):
refresh_final_screenshot(final_html_path, output_dir)
generated = read_json(generated_path)
measurement = read_json(measurement_path)
stage1a_data = read_json(stage1a)
required_titles = [item.get("title", "") for item in stage1a_data.get("topics", [])]
topic_count = len(required_titles)
run_mode = "run001" if topic_count >= 5 else "generic"
layout_template = str(stage1a_data.get("analysis", {}).get("layout_template", "") or "")
status, failures, actions = validate_outputs(generated, measurement, required_titles, run_mode, layout_template)
final_html_text = final_html_path.read_text(encoding="utf-8")
if 'width:100%; height:28px' in final_html_text:
status = "revise"
failures = sorted(set(failures + ["Verify-RenderedSidebarBadge"]))
actions = list(dict.fromkeys(actions + ["???? ??? ????? ??? ??? ????? ?? grid/fixed-width ??? ?? ????."]))
retry_plan = None
if status != "pass" and iteration < args.max_iterations:
artifacts = load_stage_artifacts(output_dir)
retry_plan = derive_retry_plan(failures, artifacts, read_json(stage1b), run_mode)
apply_retry_plan_to_stage1b(stage1b, retry_plan, iteration)
validation_path.write_text(build_validation_markdown(args.run_id, status, failures, actions, measurement, retry_plan), encoding="utf-8")
zone_names = zone_overflow_names(measurement)
critical_outputs_ok = all(path.exists() and path.stat().st_size > 0 for path in [final_html_path, generated_path, measurement_path, context_path])
step5_status = "pass" if completed.returncode == 0 and critical_outputs_ok else "fail"
step5_failures = "없음" if step5_status == "pass" else "Exec-Artifact"
step5_actions = "- 없음" if step5_status == "pass" else "- 필수 산출물 4종과 저장 경로를 재확인하고 재생성"
step5_body = f"""실행 요약
- auto_loop_runner.py iteration {iteration}로 실행했다.
- 입력: `docs/{args.run_id}/01-input/{input_file.name}`
- 산출물: `final.html`, `generated_html.json`, `measurement.json`, `context.json`
- stage snapshot: `stage_0_context.json` ~ `final_context.json`
산출물 경로
- `docs/{args.run_id}/05-execution/final.html`
- `docs/{args.run_id}/05-execution/generated_html.json`
- `docs/{args.run_id}/05-execution/measurement.json`
- `docs/{args.run_id}/05-execution/context.json`
KPI / 판정 결과
- 판정: {step5_status}
- iteration: {iteration}
- 종료 코드: {completed.returncode}
- 필수 산출물 4종 유효 여부: {critical_outputs_ok}
실패 분류
- {step5_failures}
수정 액션
{step5_actions}
다음 단계 전달물
- 최신 실행 산출물
- 최신 measurement
- 최신 context
- stage snapshot 묶음
"""
step6_body = f"""실행 요약
- iteration {iteration} 기준으로 최종 산출물과 측정 결과를 다시 검증했다.
- slide overflow: {measurement.get('slide', {}).get('overflowed')}
- zone overflow: {', '.join(zone_names) if zone_names else '없음'}
- 최종 판정은 `{status}`이다.
산출물 경로
- `docs/{args.run_id}/06-validation/validation-result.md`
- `docs/{args.run_id}/05-execution/final.html`
- `docs/{args.run_id}/05-execution/measurement.json`
- `docs/{args.run_id}/05-execution/stage_2_verification.json`
KPI / 판정 결과
- 판정: {status}
- 실패 분류: {', '.join(failures) if failures else '없음'}
수정 액션
"""
if actions:
step6_body += "\n".join(f"- {a}" for a in actions)
else:
step6_body += "- 없음"
if retry_plan:
step6_body += f"\n\nRetry Plan\n- rollback stage: {retry_plan.get('rollback_stage')}\n- reason: {'; '.join(retry_plan.get('reasons', [])) or '없음'}\n- mutation count: {len(retry_plan.get('mutations', []))}"
step6_body += f"\n\n다음 단계 전달물\n- 최신 validation 기록\n- 다음 iteration 여부: {'중단' if status == 'pass' else '재실행'}\n"
write_step_comment(step_comment_bodies[5], step5_body)
write_step_comment(step_comment_bodies[6], step6_body)
post_comment_if_configured(args.repo_slug, issue_numbers[4], step_comment_bodies[5])
post_comment_if_configured(args.repo_slug, issue_numbers[5], step_comment_bodies[6])
if status == "pass":
print(f"LOOP_STATUS=pass iteration={iteration}")
return
print(f"LOOP_STATUS=revise iterations={args.max_iterations}")
if __name__ == "__main__":
main()