Files
s-canvas/gemini_renderer.py
HYUNJUNGLEE b9342f6726 Import S-CANVAS source + iter=1~7 lint cleanup
S-CANVAS (Saman Corp.) — DXF + DEM + AI 기반 3D 조감도 생성 엔진.
~24k LOC Python (scanvas_maker.py 7072 LOC GUI + 구조물 파서/빌더 다수).

이 커밋은 7-iter cleanup이 적용된 상태로 import:
- F821 8 + B023 6: 비동기 lambda + except/loop 변수 캡처 NameError
  (Py3.13에서 reproduce 확인된 진짜 버그)
- RUF012 4 + RUF013 1: ClassVar / implicit Optional 명시화
- F811/B905/B904/F401/F841/W293/F541/UP/SIM/RUF/PLR 700+ cleanup/modernization

신규 파일:
- ruff.toml: target=py313, Korean unicode/저자 스타일/도메인 복잡도 무력화
- requirements-py313.txt: pyproj>=3.7, scipy>=1.14, numpy>=2.0.2 (Py3.13 wheel)
- .gitignore: gcp-key.json, 캐시, 백업, 생성 이미지 제외

검증: ruff 0 errors, py_compile 0 errors, import 33/33 OK on Py3.13.13.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-08 10:29:08 +09:00

289 lines
13 KiB
Python

"""Gemini(Nano Banana 등) 기반 조감도 AI 렌더링 워커.
scanvas_maker.SCanvasApp의 백그라운드 스레드에서 호출되며, app 인스턴스를 통해
상태(capture_image / job_logger / log / after / dxf_path / 등)에 접근한다.
scanvas_maker로부터 ~264줄을 분리해 AI 호출 로직을 모듈 단위로 격리.
공개 API:
run_gemini_render(app, credential, prompt, use_vertex=False, location="us-central1")
"""
from __future__ import annotations
import io
import os
import time as _time
from pathlib import Path
from tkinter import messagebox
from PIL import Image
import contextlib
# Harness 의존 (선택적 — 없어도 동작)
try:
from harness.seed_manager import SeedManager
from harness.logger import get_db_session
_HARNESS_OK = True
except Exception:
SeedManager = None # type: ignore
get_db_session = None # type: ignore
_HARNESS_OK = False
def run_gemini_render(app, credential: str, prompt: str,
use_vertex: bool = False,
location: str = "us-central1") -> None:
"""Gemini 자동 호출 + Harness 통합. app = SCanvasApp 인스턴스.
Args:
app: scanvas_maker.SCanvasApp (상태/로그/UI scheduling 접근)
credential: use_vertex=True이면 GCP Project ID, 아니면 API Key
prompt: 렌더 프롬프트
use_vertex: True면 Vertex AI (google-genai SDK), False면 API Key 경로
location: Vertex AI region ("global"=gemini-3.x, "us-central1"=2.x)
"""
t_start = _time.time()
job_id = None
db = None
dxf_hash = app._get_dxf_hash()
prompt_hash = app._get_prompt_hash(prompt)
prompt_ver = "v1"
seed = 0
if app.job_logger and _HARNESS_OK:
try:
db = get_db_session()
job = app.job_logger.create_job(db, app.dxf_path or "unknown", dxf_hash)
job_id = job.id
seed = app.seed_mgr.get_or_create_seed(db, job_id, dxf_hash)
if app.prompt_reg:
prompt_ver = app.prompt_reg.latest_version() or "v1"
app.job_logger.start_job(db, job_id, seed, prompt_ver, prompt_hash)
app.after(0, lambda: app.log(
f" Harness: job#{job_id}, {SeedManager.describe(seed)}, prompt={prompt_ver}"))
except Exception as e:
app.after(0, lambda e=e: app.log(f" Harness 초기화 경고: {e}"))
try:
from google import genai
from google.genai import types as gtypes
sdk_version = "vertex" if use_vertex else "new"
except ImportError:
if use_vertex:
if app.job_logger and db and job_id:
app.job_logger.fail_job(db, job_id, "google-genai SDK 미설치")
app.after(0, lambda: messagebox.showerror("패키지 필요",
"Vertex AI는 google-genai SDK가 필요합니다.\n"
"pip install google-genai\n"
"그리고 gcloud auth application-default login"))
return
try:
import google.generativeai as genai_legacy # type: ignore
sdk_version = "legacy"
except ImportError:
if app.job_logger and db and job_id:
app.job_logger.fail_job(db, job_id, "google-genai SDK 미설치")
app.after(0, lambda: messagebox.showerror("패키지 필요",
"pip install google-generativeai\n또는\npip install google-genai"))
return
try:
source_img = app.capture_image if app.capture_image else app.guide_image
input_path = os.path.abspath("capture_for_ai.png")
source_img.save(input_path)
app.after(0, lambda: app.log(f" 입력 이미지: {source_img.size}"))
with open(input_path, "rb") as f:
img_bytes = f.read()
render_prompt = (
f"Transform this aerial/satellite terrain capture into a photorealistic "
f"bird's-eye view architectural rendering. "
f"CRITICAL: Preserve the EXACT terrain layout, roads, water, structures. "
f"The scene may combine a high-detail DXF area (center) with a lower-detail "
f"real DEM+satellite outer ring (edges). Render BOTH areas seamlessly — "
f"the outer ring is actual surrounding terrain, NOT filler to crop out. "
f"Keep the full image frame; do NOT trim to the central bbox. "
f"Dark road areas = freshly paved asphalt. Cut slopes = exposed earth. "
f"Enhance vegetation textures, water reflections, natural lighting. "
f"Style: {prompt}"
)
app.after(0, lambda: app.log(
f" Gemini API 호출 중... (SDK: {sdk_version}, "
f"loc: {location if use_vertex else '-'})"))
rendered = None
if sdk_version == "vertex":
try:
client = genai.Client(
vertexai=True,
project=credential,
location=location,
)
except Exception as ce:
err = str(ce)[:160]
app.after(0, lambda: app.log(
f" Vertex AI 클라이언트 생성 실패: {err}"))
has_sa = bool(os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"))
if has_sa:
app.after(0, lambda: app.log(
" → gcp-key.json 경로 확인. 서비스 계정에 "
"aiplatform.user 권한이 있는지 확인하세요."))
else:
app.after(0, lambda: app.log(
" → gcp-key.json을 프로젝트 루트에 배치하거나 "
"gcloud auth application-default login 실행"))
if app.job_logger and db and job_id:
app.job_logger.fail_job(db, job_id, "Vertex 인증 실패")
return
model_priority = [
("gemini-3.1-flash-image-preview", location),
("gemini-3-pro-image-preview", location),
("gemini-2.5-flash-image", "us-central1"),
("gemini-2.5-flash-image-preview", "us-central1"),
("gemini-2.0-flash-preview-image-generation", "us-central1"),
]
current_loc = location
for model_name, model_loc in model_priority:
if model_loc != current_loc:
try:
client = genai.Client(
vertexai=True,
project=credential,
location=model_loc,
)
current_loc = model_loc
except Exception:
continue
try:
response = client.models.generate_content(
model=model_name,
contents=[
gtypes.Part.from_bytes(data=img_bytes, mime_type="image/png"),
render_prompt
],
config=gtypes.GenerateContentConfig(
response_modalities=["IMAGE"],
)
)
for part in response.candidates[0].content.parts:
if hasattr(part, 'inline_data') and part.inline_data and \
part.inline_data.mime_type.startswith("image/"):
rendered = Image.open(io.BytesIO(part.inline_data.data))
break
if rendered:
_m, _l = model_name, current_loc
app.after(0, lambda _m=_m, _l=_l: app.log(
f" [Vertex] 모델 {_m} @ {_l} 성공"))
break
except Exception as exc:
_m, _e = model_name, str(exc)[:120]
app.after(0, lambda _m=_m, _e=_e: app.log(f" [Vertex] {_m}: {_e}"))
elif sdk_version == "new":
client = genai.Client(api_key=credential)
for model_name in ["gemini-2.5-flash-image",
"gemini-2.0-flash-preview-image-generation"]:
try:
response = client.models.generate_content(
model=model_name,
contents=[
gtypes.Part.from_bytes(data=img_bytes, mime_type="image/png"),
render_prompt
],
config=gtypes.GenerateContentConfig(
response_modalities=["IMAGE", "TEXT"],
)
)
for part in response.candidates[0].content.parts:
if hasattr(part, 'inline_data') and part.inline_data and \
part.inline_data.mime_type.startswith("image/"):
rendered = Image.open(io.BytesIO(part.inline_data.data))
break
if rendered:
_m = model_name
app.after(0, lambda _m=_m: app.log(f" 모델 {_m} 성공"))
break
except Exception as exc:
_m, _e = model_name, str(exc)[:80]
app.after(0, lambda _m=_m, _e=_e: app.log(f" {_m}: {_e}"))
else:
genai_legacy.configure(api_key=credential)
pil_img = Image.open(io.BytesIO(img_bytes))
for model_name in ["gemini-2.5-flash-image",
"gemini-2.0-flash-preview-image-generation"]:
try:
model = genai_legacy.GenerativeModel(model_name)
response = model.generate_content(
[pil_img, render_prompt]
)
if hasattr(response, 'candidates') and response.candidates:
for part in response.candidates[0].content.parts:
if (hasattr(part, 'inline_data') and part.inline_data
and part.inline_data.mime_type.startswith("image/")):
rendered = Image.open(io.BytesIO(part.inline_data.data))
break
if rendered:
_m = model_name
app.after(0, lambda _m=_m: app.log(f" 모델 {_m} 성공"))
break
except Exception as exc:
_m, _e = model_name, str(exc)[:80]
app.after(0, lambda _m=_m, _e=_e: app.log(f" {_m}: {_e}"))
if not rendered:
if app.job_logger and db and job_id:
app.job_logger.fail_job(db, job_id, "Gemini 이미지 생성 실패")
app.after(0, lambda: app.log(" Gemini 이미지 생성 실패. API Key와 모델을 확인하세요."))
app.after(0, lambda: app.set_status("이미지 생성 실패", "#E74C3C"))
return
# 출력 화질 후처리 — Step 4에서 고른 HD/FHD/UHD 로 리사이즈
tgt = getattr(app, "target_resolution", None)
if tgt and tgt[0] > 0 and tgt[1] > 0 and rendered.size != tuple(tgt):
src_size = rendered.size
app.after(0, lambda s=src_size, t=tgt: app.log(
f" 화질 리사이즈: {s[0]}x{s[1]}{t[0]}x{t[1]}"))
rendered = rendered.resize(tuple(tgt), Image.LANCZOS)
output_path = "rendered_birdseye.png"
rendered.save(output_path)
latency_ms = (_time.time() - t_start) * 1000
quality_score = 0.0
if app.quality_val:
try:
vr = app.quality_val.validate(Path(output_path))
quality_score = vr.score
app.after(0, lambda: app.log(f" 품질검증: {vr.summary}"))
except Exception as e:
app.after(0, lambda e=e: app.log(f" 품질검증 오류: {e}"))
if app.job_logger and db and job_id:
with contextlib.suppress(Exception):
app.job_logger.complete_job(db, job_id, output_path, quality_score, latency_ms)
app.after(0, lambda: app.log(
f" Gemini 렌더링 완료! → {output_path} ({rendered.size}) "
f"[{latency_ms:.0f}ms, 품질={quality_score:.2f}]"))
app.after(0, lambda: app.set_status("AI 렌더링 완료", "#2ECC71"))
app.after(0, lambda: app._show_rendered_result(output_path))
except Exception as e:
if app.job_logger and db and job_id:
with contextlib.suppress(Exception):
app.job_logger.fail_job(db, job_id, str(e))
err_msg = str(e)[:300]
app.after(0, lambda: app.log(f" Gemini 오류: {err_msg}"))
app.after(0, lambda: app.set_status("렌더링 실패", "#E74C3C"))
app.after(0, lambda: messagebox.showerror("Gemini 오류", f"API 호출 오류:\n{err_msg}"))
finally:
if db:
with contextlib.suppress(Exception):
db.close()