feat: stabilize dev sync and add history tables

This commit is contained in:
hyunho
2026-03-30 09:15:31 +09:00
parent bc60f932c3
commit cbae8769bf
6 changed files with 339 additions and 37 deletions

View File

@@ -4,7 +4,9 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
PROD_DIR="${ROOT_DIR}"
DEV_DIR="${DEV_DIR:-/tmp/mh-dashboard-organization-dev}"
DEV_DIR="${DEV_DIR:-${ROOT_DIR}}"
DEV_PROJECT_NAME="${DEV_PROJECT_NAME:-mh-dashboard-organization-dev}"
DEV_COMPOSE_FILE="${DEV_COMPOSE_FILE:-${ROOT_DIR}/docker-compose.8081.yml}"
SCOPE="${1:-minimal}"
if [[ ! -f "${PROD_DIR}/docker-compose.yml" ]]; then
@@ -14,7 +16,13 @@ fi
if [[ ! -f "${DEV_DIR}/docker-compose.yml" ]]; then
echo "Development workspace not found: ${DEV_DIR}" >&2
echo "Set DEV_DIR=/path/to/dev-copy if the dev workspace moved." >&2
echo "Set DEV_DIR=/path/to/workspace if the dev workspace moved." >&2
exit 1
fi
if [[ ! -f "${DEV_COMPOSE_FILE}" ]]; then
echo "Development compose file not found: ${DEV_COMPOSE_FILE}" >&2
echo "Set DEV_COMPOSE_FILE=/path/to/dev-compose.yml if the dev compose file moved." >&2
exit 1
fi
@@ -74,21 +82,30 @@ case "${SCOPE}" in
esac
PROD_COMPOSE=(docker compose --project-directory "${PROD_DIR}")
DEV_COMPOSE=(docker compose --project-directory "${DEV_DIR}")
DEV_COMPOSE=(docker compose -p "${DEV_PROJECT_NAME}" --env-file "${DEV_DIR}/.env" -f "${DEV_COMPOSE_FILE}")
run_compose() {
local dir="$1"
shift
(cd "${dir}" && "$@")
}
require_service() {
local dir="$1"
shift
(cd "${dir}" && "$@") >/dev/null
run_compose "${dir}" "$@" >/dev/null
}
echo "[1/6] Checking source and target stacks"
echo "[1/8] Checking source and target stacks"
require_service "${PROD_DIR}" "${PROD_COMPOSE[@]}" ps
require_service "${DEV_DIR}" "${DEV_COMPOSE[@]}" ps
echo "[2/6] Ensuring db containers are reachable"
(cd "${PROD_DIR}" && "${PROD_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}") >/dev/null
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}") >/dev/null
echo "[2/8] Ensuring db containers are reachable"
run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null
echo "[3/8] Pausing 8081 app services to avoid partial reads during sync"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" stop proxy frontend backend >/dev/null
WORK_DIR="$(mktemp -d)"
cleanup() {
@@ -100,53 +117,54 @@ DUMP_FILE="${WORK_DIR}/prod_to_dev_${SCOPE}.sql"
TRUNCATE_FILE="${WORK_DIR}/truncate_${SCOPE}.sql"
SEAT_POSITIONS_FILE="${WORK_DIR}/seat_positions.csv"
SEQUENCE_FIX_FILE="${WORK_DIR}/sequence_fix.sql"
AUTH_SYNC_FILE="${WORK_DIR}/auth_sync.py"
echo "[3/6] Building truncate script for ${SCOPE} scope"
echo "[4/8] Building truncate script for ${SCOPE} scope"
{
echo "BEGIN;"
echo "SET session_replication_role = replica;"
printf 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE;\n' "$(IFS=,; echo "${TABLES[*]}")"
printf 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE;\n' "$(printf 'public.%s,' "${TABLES[@]}" | sed 's/,$//')"
echo "SET session_replication_role = DEFAULT;"
echo "COMMIT;"
} > "${TRUNCATE_FILE}"
echo "[4/6] Dumping ${SCOPE} data from 8080 source DB"
echo "[5/8] Dumping ${SCOPE} data from 8080 source DB"
TABLE_ARGS=()
for table in "${TABLES[@]}"; do
TABLE_ARGS+=(-t "public.${table}")
done
(cd "${PROD_DIR}" && "${PROD_COMPOSE[@]}" exec -T db \
run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db \
pg_dump -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
--data-only --column-inserts --disable-triggers --no-owner --no-privileges \
"${TABLE_ARGS[@]}") > "${DUMP_FILE}"
"${TABLE_ARGS[@]}" > "${DUMP_FILE}"
echo "[4.5/6] Exporting seat_positions in portable format"
(cd "${PROD_DIR}" && "${PROD_COMPOSE[@]}" exec -T db \
echo "[5.5/8] Exporting seat_positions in portable format"
run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db \
psql -At -F ',' -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "COPY (
SELECT member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at
FROM public.seat_positions
ORDER BY member_id
) TO STDOUT WITH CSV") > "${SEAT_POSITIONS_FILE}"
) TO STDOUT WITH CSV" > "${SEAT_POSITIONS_FILE}"
echo "[5/6] Truncating target tables in 8081 dev DB"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null) < "${TRUNCATE_FILE}"
echo "[6/8] Truncating target tables in 8081 dev DB"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${TRUNCATE_FILE}"
echo "[6/6] Restoring dumped data into 8081 dev DB"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null) < "${DUMP_FILE}"
echo "[7/8] Restoring dumped data into 8081 dev DB"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${DUMP_FILE}"
echo "[6.5/6] Restoring portable seat_positions and rebuilding auth users"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \
echo "[7.5/8] Restoring portable seat_positions and rebuilding auth users"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "DELETE FROM public.seat_positions" >/dev/null)
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \
-c "DELETE FROM public.seat_positions" >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "COPY public.seat_positions (member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at) FROM STDIN WITH CSV" >/dev/null) < "${SEAT_POSITIONS_FILE}"
cat > "${AUTH_SYNC_FILE}" <<'PY'
-c "COPY public.seat_positions (member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at) FROM STDIN WITH CSV" >/dev/null < "${SEAT_POSITIONS_FILE}"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" up -d backend >/dev/null
AUTH_SYNC_PY="$(cat <<'PY'
from backend.app.main import get_conn, sync_auth_users_from_members
from backend.app.db import ensure_history_backfill
with get_conn() as conn:
with conn.cursor() as cur:
@@ -160,12 +178,14 @@ with get_conn() as conn:
"""
)
sync_auth_users_from_members(cur)
ensure_history_backfill(cur)
conn.commit()
print("members, seat labels, and auth users synced")
print("members, seat labels, auth users, and history backfill synced")
PY
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T backend python -) < "${AUTH_SYNC_FILE}"
)"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T backend python -c "${AUTH_SYNC_PY}"
echo "[6.8/6] Resetting serial sequences"
echo "[7.8/8] Resetting serial sequences"
{
echo "SELECT setval(pg_get_serial_sequence('public.members', 'id'), COALESCE((SELECT MAX(id) FROM public.members), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.member_aliases', 'id'), COALESCE((SELECT MAX(id) FROM public.member_aliases), 1), true);"
@@ -188,11 +208,53 @@ echo "[6.8/6] Resetting serial sequences"
echo "SELECT setval(pg_get_serial_sequence('public.integration_vouchers', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_vouchers), 1), true);"
fi
} > "${SEQUENCE_FIX_FILE}"
(cd "${DEV_DIR}" && "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null) < "${SEQUENCE_FIX_FILE}"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${SEQUENCE_FIX_FILE}"
VERIFY_SQL="${WORK_DIR}/verify_${SCOPE}.sql"
{
cat <<'SQL'
SELECT 'members' AS table_name, COUNT(*)::text AS value FROM public.members
UNION ALL
SELECT 'member_retirements', COUNT(*)::text FROM public.member_retirements
UNION ALL
SELECT 'seat_maps', COUNT(*)::text FROM public.seat_maps
UNION ALL
SELECT 'seat_slots', COUNT(*)::text FROM public.seat_slots
UNION ALL
SELECT 'seat_positions', COUNT(*)::text FROM public.seat_positions
UNION ALL
SELECT 'members_with_seat_label', COUNT(*)::text FROM public.members WHERE COALESCE(seat_label, '') <> ''
UNION ALL
SELECT 'seat_positions_without_slot', COUNT(*)::text FROM public.seat_positions WHERE seat_slot_id IS NULL
UNION ALL
SELECT 'seat_label_mismatch', COUNT(*)::text
FROM public.members m
JOIN public.seat_positions sp ON sp.member_id = m.id
WHERE COALESCE(m.seat_label, '') <> COALESCE(sp.seat_label, '')
UNION ALL
SELECT 'auth_users', COUNT(*)::text FROM auth.users
ORDER BY table_name;
SQL
if [[ "${SCOPE}" == "analysis" || "${SCOPE}" == "full" ]]; then
cat <<'SQL'
SELECT 'integration_work_logs', COUNT(*)::text FROM public.integration_work_logs
UNION ALL
SELECT 'integration_vouchers', COUNT(*)::text FROM public.integration_vouchers
ORDER BY 1;
SQL
fi
} > "${VERIFY_SQL}"
echo "[8/8] Restarting 8081 app services and printing verification snapshot"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" up -d frontend proxy >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" -f - < "${VERIFY_SQL}"
echo
echo "Sync complete."
echo "Source: ${PROD_DIR} (8080)"
echo "Target: ${DEV_DIR} (8081)"
echo "Dev compose: ${DEV_COMPOSE_FILE}"
echo "Dev project: ${DEV_PROJECT_NAME}"
echo "Scope : ${SCOPE}"