feat: stabilize dev sync and add history tables

This commit is contained in:
hyunho
2026-03-30 09:15:31 +09:00
parent bc60f932c3
commit cbae8769bf
6 changed files with 339 additions and 37 deletions

View File

@@ -281,6 +281,66 @@ CREATE TABLE IF NOT EXISTS integration_vouchers (
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
); );
CREATE TABLE IF NOT EXISTS history_revisions (
id BIGSERIAL PRIMARY KEY,
scope TEXT NOT NULL DEFAULT 'organization',
revision_label TEXT NOT NULL,
created_by_user_id BIGINT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
note TEXT NOT NULL DEFAULT ''
);
CREATE TABLE IF NOT EXISTS member_versions (
id BIGSERIAL PRIMARY KEY,
member_id INTEGER NOT NULL REFERENCES members(id) ON DELETE CASCADE,
name TEXT NOT NULL,
company TEXT NOT NULL DEFAULT '',
rank TEXT NOT NULL DEFAULT '',
role TEXT NOT NULL DEFAULT '',
department TEXT NOT NULL DEFAULT '',
grp TEXT NOT NULL DEFAULT '',
division TEXT NOT NULL DEFAULT '',
team TEXT NOT NULL DEFAULT '',
cell TEXT NOT NULL DEFAULT '',
work_status TEXT NOT NULL DEFAULT '',
work_time TEXT NOT NULL DEFAULT '',
phone TEXT NOT NULL DEFAULT '',
email TEXT NOT NULL DEFAULT '',
photo_url TEXT NOT NULL DEFAULT '',
valid_from TIMESTAMPTZ NOT NULL,
valid_to TIMESTAMPTZ,
revision_no BIGINT NOT NULL,
changed_by_user_id BIGINT,
change_reason TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS seat_assignment_versions (
id BIGSERIAL PRIMARY KEY,
member_id INTEGER NOT NULL REFERENCES members(id) ON DELETE CASCADE,
seat_map_id INTEGER REFERENCES seat_maps(id) ON DELETE CASCADE,
seat_slot_id INTEGER REFERENCES seat_slots(id) ON DELETE CASCADE,
seat_label TEXT NOT NULL DEFAULT '',
valid_from TIMESTAMPTZ NOT NULL,
valid_to TIMESTAMPTZ,
revision_no BIGINT NOT NULL,
changed_by_user_id BIGINT,
change_reason TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS entity_change_events (
id BIGSERIAL PRIMARY KEY,
entity_type TEXT NOT NULL,
entity_id BIGINT NOT NULL,
action_type TEXT NOT NULL,
revision_no BIGINT NOT NULL,
changed_by_user_id BIGINT,
changed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
change_reason TEXT NOT NULL DEFAULT '',
patch_json JSONB NOT NULL DEFAULT '{}'::jsonb
);
CREATE SCHEMA IF NOT EXISTS auth; CREATE SCHEMA IF NOT EXISTS auth;
CREATE TABLE IF NOT EXISTS auth.users ( CREATE TABLE IF NOT EXISTS auth.users (
@@ -474,6 +534,18 @@ ON integration_vouchers (project_code, project_name);
CREATE UNIQUE INDEX IF NOT EXISTS integration_project_category_mappings_key_idx CREATE UNIQUE INDEX IF NOT EXISTS integration_project_category_mappings_key_idx
ON integration_project_category_mappings (source_key, normalized_project_key); ON integration_project_category_mappings (source_key, normalized_project_key);
CREATE INDEX IF NOT EXISTS member_versions_member_time_idx
ON member_versions (member_id, valid_from, valid_to);
CREATE INDEX IF NOT EXISTS seat_assignment_versions_member_time_idx
ON seat_assignment_versions (member_id, valid_from, valid_to);
CREATE INDEX IF NOT EXISTS history_revisions_scope_created_idx
ON history_revisions (scope, created_at DESC);
CREATE INDEX IF NOT EXISTS entity_change_events_entity_idx
ON entity_change_events (entity_type, entity_id, changed_at DESC);
DO $$ DO $$
BEGIN BEGIN
IF NOT EXISTS ( IF NOT EXISTS (
@@ -556,6 +628,7 @@ def init_db(max_retries: int = 20, retry_delay: float = 2.0) -> None:
with conn.cursor() as cur: with conn.cursor() as cur:
cur.execute(SCHEMA_SQL) cur.execute(SCHEMA_SQL)
cur.execute(MIGRATION_SQL) cur.execute(MIGRATION_SQL)
ensure_history_backfill(cur)
conn.commit() conn.commit()
return return
except psycopg.OperationalError as exc: except psycopg.OperationalError as exc:
@@ -563,3 +636,69 @@ def init_db(max_retries: int = 20, retry_delay: float = 2.0) -> None:
time.sleep(retry_delay) time.sleep(retry_delay)
if last_error is not None: if last_error is not None:
raise last_error raise last_error
def ensure_history_backfill(cur) -> None:
cur.execute(
"""
SELECT id
FROM history_revisions
WHERE scope = 'organization'
AND revision_label = 'initial-backfill'
ORDER BY id ASC
LIMIT 1
"""
)
row = cur.fetchone()
if row is None:
cur.execute(
"""
INSERT INTO history_revisions (scope, revision_label, note)
VALUES ('organization', 'initial-backfill', 'Seeded from current members and seat_positions state')
RETURNING id
"""
)
revision_id = int(cur.fetchone()["id"])
else:
revision_id = int(row["id"])
cur.execute(
"""
INSERT INTO member_versions (
member_id, name, company, rank, role, department, grp, division, team, cell,
work_status, work_time, phone, email, photo_url,
valid_from, valid_to, revision_no, changed_by_user_id, change_reason
)
SELECT
m.id, m.name, COALESCE(m.company, ''), COALESCE(m.rank, ''), COALESCE(m.role, ''),
COALESCE(m.department, ''), COALESCE(m.grp, ''), COALESCE(m.division, ''), COALESCE(m.team, ''), COALESCE(m.cell, ''),
COALESCE(m.work_status, ''), COALESCE(m.work_time, ''), COALESCE(m.phone, ''), COALESCE(m.email, ''), COALESCE(m.photo_url, ''),
COALESCE(m.updated_at, m.created_at, NOW()), NULL, %s, NULL, 'initial-backfill'
FROM members AS m
WHERE NOT EXISTS (
SELECT 1
FROM member_versions mv
WHERE mv.member_id = m.id
)
""",
(revision_id,),
)
cur.execute(
"""
INSERT INTO seat_assignment_versions (
member_id, seat_map_id, seat_slot_id, seat_label,
valid_from, valid_to, revision_no, changed_by_user_id, change_reason
)
SELECT
sp.member_id, sp.seat_map_id, sp.seat_slot_id, COALESCE(sp.seat_label, ''),
COALESCE(sp.updated_at, NOW()), NULL, %s, NULL, 'initial-backfill'
FROM seat_positions AS sp
WHERE NOT EXISTS (
SELECT 1
FROM seat_assignment_versions sav
WHERE sav.member_id = sp.member_id
)
""",
(revision_id,),
)

78
docker-compose.8081.yml Normal file
View File

@@ -0,0 +1,78 @@
services:
proxy:
image: nginx:1.27-alpine
depends_on:
frontend:
condition: service_healthy
backend:
condition: service_healthy
ports:
- "8081:80"
volumes:
- ./proxy/nginx.conf:/etc/nginx/conf.d/default.conf:ro
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://127.0.0.1/ || exit 1"]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
frontend:
build:
context: .
dockerfile: frontend/Dockerfile
volumes:
- ./frontend/public:/usr/share/nginx/html:ro
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://127.0.0.1/ || exit 1"]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
backend:
build:
context: .
dockerfile: backend/Dockerfile
command: uvicorn backend.app.main:app --host 0.0.0.0 --port 8000 --reload
env_file:
- .env
depends_on:
db:
condition: service_healthy
volumes:
- ./backend/app:/app/backend/app:ro
- ./DashBoard-organization.html:/app/legacy/DashBoard-organization.html:ro
- ./DashBoard-organization-backup.html:/app/legacy/DashBoard-organization-backup.html:ro
- ./legacy/static:/app/legacy/static:ro
- ./incoming-files:/app/incoming-files:ro
- uploads_data:/data/uploads
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/api/health')\" || exit 1"]
interval: 15s
timeout: 5s
retries: 8
start_period: 20s
db:
image: postgres:16-alpine
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
interval: 10s
timeout: 5s
retries: 10
start_period: 10s
volumes:
postgres_data:
uploads_data:

View File

@@ -214,6 +214,16 @@
- 특정 날짜의 자리배치도 재구성 가능 - 특정 날짜의 자리배치도 재구성 가능
- 기간 비교나 변경 추적 UI로 확장 가능 - 기간 비교나 변경 추적 UI로 확장 가능
### 현재 반영 상태
- `history_revisions`
- `member_versions`
- `seat_assignment_versions`
- `entity_change_events`
초기 단계로 테이블과 baseline backfill 경로를 먼저 추가했다.
아직 조직도/자리배치도 쓰기 API가 매 수정마다 version row 를 append 하도록 완전히 전환된 상태는 아니다.
### 설계 문서 ### 설계 문서
- [HISTORY_ASOF_DB_PLAN.md](/home/hyunho/projects/mh-dashboard-organization/docs/HISTORY_ASOF_DB_PLAN.md) - [HISTORY_ASOF_DB_PLAN.md](/home/hyunho/projects/mh-dashboard-organization/docs/HISTORY_ASOF_DB_PLAN.md)

View File

@@ -11,14 +11,20 @@
### 코드 경로 ### 코드 경로
- 공개용 `8080`: `/home/hyunho/projects/mh-dashboard-organization` - 공개용 `8080`: `/home/hyunho/projects/mh-dashboard-organization`
- 작업용 `8081`: `/tmp/mh-dashboard-organization-dev` - 작업용 `8081`: `/home/hyunho/projects/mh-dashboard-organization`
### 작업용 Compose 기준
- 공개용 `8080` stack: `docker-compose.yml`
- 작업용 `8081` stack: `docker-compose.8081.yml`
- 작업용 project name 기본값: `mh-dashboard-organization-dev`
### DB 볼륨 ### DB 볼륨
- 공개용 `8080`: `mh-dashboard-organization_postgres_data` - 공개용 `8080`: `mh-dashboard-organization_postgres_data`
- 작업용 `8081`: `mh-dashboard-organization-dev_postgres_data` - 작업용 `8081`: `mh-dashboard-organization-dev_postgres_data`
즉 현재는 코드도 분리, DB도 분리 상태다. 즉 현재는 코드 workspace는 같아도 compose project 와 DB volume 이 분리 상태다.
## 정본 기준 ## 정본 기준
@@ -161,11 +167,12 @@
반복 가능한 동기화 스크립트: 반복 가능한 동기화 스크립트:
- [sync_prod_db_to_dev.sh](/home/hyunho/projects/mh-dashboard-organization/scripts/sync_prod_db_to_dev.sh) - [sync_prod_db_to_dev.sh](/home/hyunho/projects/mh-dashboard-organization/scripts/sync_prod_db_to_dev.sh)
- [docker-compose.8081.yml](/home/hyunho/projects/mh-dashboard-organization/docker-compose.8081.yml)
사용 방법: 사용 방법:
```bash ```bash
chmod +x scripts/sync_prod_db_to_dev.sh docker compose -p mh-dashboard-organization-dev --env-file .env -f docker-compose.8081.yml up -d
./scripts/sync_prod_db_to_dev.sh minimal ./scripts/sync_prod_db_to_dev.sh minimal
./scripts/sync_prod_db_to_dev.sh full ./scripts/sync_prod_db_to_dev.sh full
``` ```
@@ -179,6 +186,11 @@ chmod +x scripts/sync_prod_db_to_dev.sh
주의: 주의:
- 스크립트는 동기화 전에 `8081``proxy`, `frontend`, `backend` 를 잠시 멈춘다
- 이유는 중간 상태를 읽는 API 요청과 DB truncate/restore 가 충돌하면 deadlock 또는 부분 검증이 발생할 수 있기 때문이다
- 스크립트는 `8080` DB 데이터를 덤프해서 `8081` DB의 대상 테이블을 비우고 다시 적재한다 - 스크립트는 `8080` DB 데이터를 덤프해서 `8081` DB의 대상 테이블을 비우고 다시 적재한다
- `8081`에서만 존재하던 대상 테이블 데이터는 사라진다 - `8081`에서만 존재하던 대상 테이블 데이터는 사라진다
- `seat_positions` 는 portable CSV 경로로 별도 복원한다
- 복원 후 `members.seat_label`, `auth.users`, history backfill 을 다시 맞춘다
- 실행 후 주요 테이블 수량과 seat 정합성 수치를 출력한다
- 따라서 실행 전 현재 작업용 DB 상태를 유지해야 하면 별도 백업 후 실행한다 - 따라서 실행 전 현재 작업용 DB 상태를 유지해야 하면 별도 백업 후 실행한다

View File

@@ -25,6 +25,7 @@
- `8081` 작업용 접속 확인 - `8081` 작업용 접속 확인
- `8080` 공개용 접속 확인 - `8080` 공개용 접속 확인
- `docker compose ps`에서 `backend`, `frontend`, `proxy`, `db`가 정상인지 확인 - `docker compose ps`에서 `backend`, `frontend`, `proxy`, `db`가 정상인지 확인
- `8081`은 기본적으로 `docker compose -p mh-dashboard-organization-dev --env-file .env -f docker-compose.8081.yml up -d` 로 기동
### 2. 데이터 동기화 범위 결정 ### 2. 데이터 동기화 범위 결정

View File

@@ -4,7 +4,9 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
PROD_DIR="${ROOT_DIR}" PROD_DIR="${ROOT_DIR}"
DEV_DIR="${DEV_DIR:-/tmp/mh-dashboard-organization-dev}" DEV_DIR="${DEV_DIR:-${ROOT_DIR}}"
DEV_PROJECT_NAME="${DEV_PROJECT_NAME:-mh-dashboard-organization-dev}"
DEV_COMPOSE_FILE="${DEV_COMPOSE_FILE:-${ROOT_DIR}/docker-compose.8081.yml}"
SCOPE="${1:-minimal}" SCOPE="${1:-minimal}"
if [[ ! -f "${PROD_DIR}/docker-compose.yml" ]]; then if [[ ! -f "${PROD_DIR}/docker-compose.yml" ]]; then
@@ -14,7 +16,13 @@ fi
if [[ ! -f "${DEV_DIR}/docker-compose.yml" ]]; then if [[ ! -f "${DEV_DIR}/docker-compose.yml" ]]; then
echo "Development workspace not found: ${DEV_DIR}" >&2 echo "Development workspace not found: ${DEV_DIR}" >&2
echo "Set DEV_DIR=/path/to/dev-copy if the dev workspace moved." >&2 echo "Set DEV_DIR=/path/to/workspace if the dev workspace moved." >&2
exit 1
fi
if [[ ! -f "${DEV_COMPOSE_FILE}" ]]; then
echo "Development compose file not found: ${DEV_COMPOSE_FILE}" >&2
echo "Set DEV_COMPOSE_FILE=/path/to/dev-compose.yml if the dev compose file moved." >&2
exit 1 exit 1
fi fi
@@ -74,21 +82,30 @@ case "${SCOPE}" in
esac esac
PROD_COMPOSE=(docker compose --project-directory "${PROD_DIR}") PROD_COMPOSE=(docker compose --project-directory "${PROD_DIR}")
DEV_COMPOSE=(docker compose --project-directory "${DEV_DIR}") DEV_COMPOSE=(docker compose -p "${DEV_PROJECT_NAME}" --env-file "${DEV_DIR}/.env" -f "${DEV_COMPOSE_FILE}")
run_compose() {
local dir="$1"
shift
(cd "${dir}" && "$@")
}
require_service() { require_service() {
local dir="$1" local dir="$1"
shift shift
(cd "${dir}" && "$@") >/dev/null run_compose "${dir}" "$@" >/dev/null
} }
echo "[1/6] Checking source and target stacks" echo "[1/8] Checking source and target stacks"
require_service "${PROD_DIR}" "${PROD_COMPOSE[@]}" ps require_service "${PROD_DIR}" "${PROD_COMPOSE[@]}" ps
require_service "${DEV_DIR}" "${DEV_COMPOSE[@]}" ps require_service "${DEV_DIR}" "${DEV_COMPOSE[@]}" ps
echo "[2/6] Ensuring db containers are reachable" echo "[2/8] Ensuring db containers are reachable"
(cd "${PROD_DIR}" && "${PROD_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}") >/dev/null run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}") >/dev/null run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null
echo "[3/8] Pausing 8081 app services to avoid partial reads during sync"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" stop proxy frontend backend >/dev/null
WORK_DIR="$(mktemp -d)" WORK_DIR="$(mktemp -d)"
cleanup() { cleanup() {
@@ -100,53 +117,54 @@ DUMP_FILE="${WORK_DIR}/prod_to_dev_${SCOPE}.sql"
TRUNCATE_FILE="${WORK_DIR}/truncate_${SCOPE}.sql" TRUNCATE_FILE="${WORK_DIR}/truncate_${SCOPE}.sql"
SEAT_POSITIONS_FILE="${WORK_DIR}/seat_positions.csv" SEAT_POSITIONS_FILE="${WORK_DIR}/seat_positions.csv"
SEQUENCE_FIX_FILE="${WORK_DIR}/sequence_fix.sql" SEQUENCE_FIX_FILE="${WORK_DIR}/sequence_fix.sql"
AUTH_SYNC_FILE="${WORK_DIR}/auth_sync.py"
echo "[3/6] Building truncate script for ${SCOPE} scope" echo "[4/8] Building truncate script for ${SCOPE} scope"
{ {
echo "BEGIN;" echo "BEGIN;"
echo "SET session_replication_role = replica;" echo "SET session_replication_role = replica;"
printf 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE;\n' "$(IFS=,; echo "${TABLES[*]}")" printf 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE;\n' "$(printf 'public.%s,' "${TABLES[@]}" | sed 's/,$//')"
echo "SET session_replication_role = DEFAULT;" echo "SET session_replication_role = DEFAULT;"
echo "COMMIT;" echo "COMMIT;"
} > "${TRUNCATE_FILE}" } > "${TRUNCATE_FILE}"
echo "[4/6] Dumping ${SCOPE} data from 8080 source DB" echo "[5/8] Dumping ${SCOPE} data from 8080 source DB"
TABLE_ARGS=() TABLE_ARGS=()
for table in "${TABLES[@]}"; do for table in "${TABLES[@]}"; do
TABLE_ARGS+=(-t "public.${table}") TABLE_ARGS+=(-t "public.${table}")
done done
(cd "${PROD_DIR}" && "${PROD_COMPOSE[@]}" exec -T db \ run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db \
pg_dump -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \ pg_dump -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
--data-only --column-inserts --disable-triggers --no-owner --no-privileges \ --data-only --column-inserts --disable-triggers --no-owner --no-privileges \
"${TABLE_ARGS[@]}") > "${DUMP_FILE}" "${TABLE_ARGS[@]}" > "${DUMP_FILE}"
echo "[4.5/6] Exporting seat_positions in portable format" echo "[5.5/8] Exporting seat_positions in portable format"
(cd "${PROD_DIR}" && "${PROD_COMPOSE[@]}" exec -T db \ run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db \
psql -At -F ',' -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \ psql -At -F ',' -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "COPY ( -c "COPY (
SELECT member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at SELECT member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at
FROM public.seat_positions FROM public.seat_positions
ORDER BY member_id ORDER BY member_id
) TO STDOUT WITH CSV") > "${SEAT_POSITIONS_FILE}" ) TO STDOUT WITH CSV" > "${SEAT_POSITIONS_FILE}"
echo "[5/6] Truncating target tables in 8081 dev DB" echo "[6/8] Truncating target tables in 8081 dev DB"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \ run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null) < "${TRUNCATE_FILE}" psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${TRUNCATE_FILE}"
echo "[6/6] Restoring dumped data into 8081 dev DB" echo "[7/8] Restoring dumped data into 8081 dev DB"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \ run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null) < "${DUMP_FILE}" psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${DUMP_FILE}"
echo "[6.5/6] Restoring portable seat_positions and rebuilding auth users" echo "[7.5/8] Restoring portable seat_positions and rebuilding auth users"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \ run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \ psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "DELETE FROM public.seat_positions" >/dev/null) -c "DELETE FROM public.seat_positions" >/dev/null
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \ run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \ psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "COPY public.seat_positions (member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at) FROM STDIN WITH CSV" >/dev/null) < "${SEAT_POSITIONS_FILE}" -c "COPY public.seat_positions (member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at) FROM STDIN WITH CSV" >/dev/null < "${SEAT_POSITIONS_FILE}"
cat > "${AUTH_SYNC_FILE}" <<'PY' run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" up -d backend >/dev/null
AUTH_SYNC_PY="$(cat <<'PY'
from backend.app.main import get_conn, sync_auth_users_from_members from backend.app.main import get_conn, sync_auth_users_from_members
from backend.app.db import ensure_history_backfill
with get_conn() as conn: with get_conn() as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
@@ -160,12 +178,14 @@ with get_conn() as conn:
""" """
) )
sync_auth_users_from_members(cur) sync_auth_users_from_members(cur)
ensure_history_backfill(cur)
conn.commit() conn.commit()
print("members, seat labels, and auth users synced") print("members, seat labels, auth users, and history backfill synced")
PY PY
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T backend python -) < "${AUTH_SYNC_FILE}" )"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T backend python -c "${AUTH_SYNC_PY}"
echo "[6.8/6] Resetting serial sequences" echo "[7.8/8] Resetting serial sequences"
{ {
echo "SELECT setval(pg_get_serial_sequence('public.members', 'id'), COALESCE((SELECT MAX(id) FROM public.members), 1), true);" echo "SELECT setval(pg_get_serial_sequence('public.members', 'id'), COALESCE((SELECT MAX(id) FROM public.members), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.member_aliases', 'id'), COALESCE((SELECT MAX(id) FROM public.member_aliases), 1), true);" echo "SELECT setval(pg_get_serial_sequence('public.member_aliases', 'id'), COALESCE((SELECT MAX(id) FROM public.member_aliases), 1), true);"
@@ -188,11 +208,53 @@ echo "[6.8/6] Resetting serial sequences"
echo "SELECT setval(pg_get_serial_sequence('public.integration_vouchers', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_vouchers), 1), true);" echo "SELECT setval(pg_get_serial_sequence('public.integration_vouchers', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_vouchers), 1), true);"
fi fi
} > "${SEQUENCE_FIX_FILE}" } > "${SEQUENCE_FIX_FILE}"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \ run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null) < "${SEQUENCE_FIX_FILE}" psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${SEQUENCE_FIX_FILE}"
VERIFY_SQL="${WORK_DIR}/verify_${SCOPE}.sql"
{
cat <<'SQL'
SELECT 'members' AS table_name, COUNT(*)::text AS value FROM public.members
UNION ALL
SELECT 'member_retirements', COUNT(*)::text FROM public.member_retirements
UNION ALL
SELECT 'seat_maps', COUNT(*)::text FROM public.seat_maps
UNION ALL
SELECT 'seat_slots', COUNT(*)::text FROM public.seat_slots
UNION ALL
SELECT 'seat_positions', COUNT(*)::text FROM public.seat_positions
UNION ALL
SELECT 'members_with_seat_label', COUNT(*)::text FROM public.members WHERE COALESCE(seat_label, '') <> ''
UNION ALL
SELECT 'seat_positions_without_slot', COUNT(*)::text FROM public.seat_positions WHERE seat_slot_id IS NULL
UNION ALL
SELECT 'seat_label_mismatch', COUNT(*)::text
FROM public.members m
JOIN public.seat_positions sp ON sp.member_id = m.id
WHERE COALESCE(m.seat_label, '') <> COALESCE(sp.seat_label, '')
UNION ALL
SELECT 'auth_users', COUNT(*)::text FROM auth.users
ORDER BY table_name;
SQL
if [[ "${SCOPE}" == "analysis" || "${SCOPE}" == "full" ]]; then
cat <<'SQL'
SELECT 'integration_work_logs', COUNT(*)::text FROM public.integration_work_logs
UNION ALL
SELECT 'integration_vouchers', COUNT(*)::text FROM public.integration_vouchers
ORDER BY 1;
SQL
fi
} > "${VERIFY_SQL}"
echo "[8/8] Restarting 8081 app services and printing verification snapshot"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" up -d frontend proxy >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" -f - < "${VERIFY_SQL}"
echo echo
echo "Sync complete." echo "Sync complete."
echo "Source: ${PROD_DIR} (8080)" echo "Source: ${PROD_DIR} (8080)"
echo "Target: ${DEV_DIR} (8081)" echo "Target: ${DEV_DIR} (8081)"
echo "Dev compose: ${DEV_COMPOSE_FILE}"
echo "Dev project: ${DEV_PROJECT_NAME}"
echo "Scope : ${SCOPE}" echo "Scope : ${SCOPE}"