Files
MH-DashBoard-organization/scripts/sync_prod_db_to_dev.sh
2026-04-02 11:17:01 +09:00

269 lines
11 KiB
Bash
Executable File

#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
PROD_DIR="${ROOT_DIR}"
DEV_DIR="${DEV_DIR:-/tmp/mh-dashboard-organization-dev-worktree}"
DEV_PROJECT_NAME="${DEV_PROJECT_NAME:-mh-dashboard-organization-dev}"
DEV_COMPOSE_FILE="${DEV_COMPOSE_FILE:-${DEV_DIR}/docker-compose.8081.yml}"
SCOPE="${1:-minimal}"
ANALYSIS_TABLES=(
integration_import_batches
integration_raw_organization_rows
integration_raw_mh_rows
integration_raw_mh_pm_rows
integration_raw_payment_rows
integration_project_aliases
integration_project_category_mappings
integration_project_pm_assignments
integration_projects
integration_work_logs
integration_work_log_segments
integration_vouchers
)
MINIMAL_PRESERVE_TABLES=(
integration_project_pm_assignments
integration_work_logs
integration_work_log_segments
integration_vouchers
)
if [[ ! -f "${PROD_DIR}/docker-compose.yml" ]]; then
echo "Production workspace not found: ${PROD_DIR}" >&2
exit 1
fi
if [[ ! -f "${DEV_DIR}/docker-compose.yml" ]]; then
echo "Development workspace not found: ${DEV_DIR}" >&2
echo "Set DEV_DIR=/path/to/workspace if the dev workspace moved." >&2
exit 1
fi
if [[ ! -f "${DEV_COMPOSE_FILE}" ]]; then
echo "Development compose file not found: ${DEV_COMPOSE_FILE}" >&2
echo "Set DEV_COMPOSE_FILE=/path/to/dev-compose.yml if the dev compose file moved." >&2
exit 1
fi
case "${SCOPE}" in
minimal)
TABLES=(
member_aliases
member_overrides
member_retirements
members
seat_maps
seat_slots
)
;;
analysis)
TABLES=("${ANALYSIS_TABLES[@]}")
;;
full)
TABLES=(
"${ANALYSIS_TABLES[@]}"
member_aliases
member_overrides
member_retirements
members
seat_maps
seat_slots
)
;;
*)
echo "Usage: $0 [minimal|analysis|full]" >&2
exit 1
;;
esac
PRESERVE_TABLES=()
if [[ "${SCOPE}" == "minimal" ]]; then
PRESERVE_TABLES=("${MINIMAL_PRESERVE_TABLES[@]}")
fi
DUMP_TABLES=("${TABLES[@]}")
if [[ ${#PRESERVE_TABLES[@]} -gt 0 ]]; then
DUMP_TABLES+=("${PRESERVE_TABLES[@]}")
fi
PROD_COMPOSE=(docker compose --project-directory "${PROD_DIR}")
DEV_COMPOSE=(docker compose -p "${DEV_PROJECT_NAME}" --env-file "${DEV_DIR}/.env" -f "${DEV_COMPOSE_FILE}")
run_compose() {
local dir="$1"
shift
(cd "${dir}" && "$@")
}
require_service() {
local dir="$1"
shift
run_compose "${dir}" "$@" >/dev/null
}
echo "[1/8] Checking source and target stacks"
require_service "${PROD_DIR}" "${PROD_COMPOSE[@]}" ps
require_service "${DEV_DIR}" "${DEV_COMPOSE[@]}" ps
echo "[2/8] Ensuring db containers are reachable"
run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db pg_isready -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null
echo "[3/8] Pausing 8081 app services to avoid partial reads during sync"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" stop proxy frontend backend >/dev/null
WORK_DIR="$(mktemp -d)"
cleanup() {
rm -rf "${WORK_DIR}"
}
trap cleanup EXIT
DUMP_FILE="${WORK_DIR}/prod_to_dev_${SCOPE}.sql"
TRUNCATE_FILE="${WORK_DIR}/truncate_${SCOPE}.sql"
SEAT_POSITIONS_FILE="${WORK_DIR}/seat_positions.csv"
SEQUENCE_FIX_FILE="${WORK_DIR}/sequence_fix.sql"
echo "[4/8] Building truncate script for ${SCOPE} scope"
{
echo "BEGIN;"
echo "SET session_replication_role = replica;"
printf 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE;\n' "$(printf 'public.%s,' "${TABLES[@]}" | sed 's/,$//')"
echo "SET session_replication_role = DEFAULT;"
echo "COMMIT;"
} > "${TRUNCATE_FILE}"
echo "[5/8] Dumping ${SCOPE} data from 8080 source DB"
TABLE_ARGS=()
for table in "${DUMP_TABLES[@]}"; do
TABLE_ARGS+=(-t "public.${table}")
done
run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db \
pg_dump -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
--data-only --column-inserts --disable-triggers --no-owner --no-privileges \
"${TABLE_ARGS[@]}" > "${DUMP_FILE}"
echo "[5.5/8] Exporting seat_positions in portable format"
run_compose "${PROD_DIR}" "${PROD_COMPOSE[@]}" exec -T db \
psql -At -F ',' -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "COPY (
SELECT member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at
FROM public.seat_positions
ORDER BY member_id
) TO STDOUT WITH CSV" > "${SEAT_POSITIONS_FILE}"
echo "[6/8] Truncating target tables in 8081 dev DB"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${TRUNCATE_FILE}"
echo "[7/8] Restoring dumped data into 8081 dev DB"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${DUMP_FILE}"
echo "[7.5/8] Restoring portable seat_positions and rebuilding auth users"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "DELETE FROM public.seat_positions" >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" \
-c "COPY public.seat_positions (member_id, seat_map_id, seat_slot_id, row_index, col_index, seat_label, updated_at) FROM STDIN WITH CSV" >/dev/null < "${SEAT_POSITIONS_FILE}"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" up -d backend >/dev/null
AUTH_SYNC_PY="$(cat <<'PY'
from backend.app.main import get_conn, sync_auth_users_from_members
from backend.app.db import ensure_history_backfill
with get_conn() as conn:
with conn.cursor() as cur:
cur.execute("UPDATE members SET seat_label = ''")
cur.execute(
"""
UPDATE members AS m
SET seat_label = sp.seat_label
FROM seat_positions AS sp
WHERE sp.member_id = m.id
"""
)
sync_auth_users_from_members(cur)
ensure_history_backfill(cur)
conn.commit()
print("members, seat labels, auth users, and history backfill synced")
PY
)"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T backend python -c "${AUTH_SYNC_PY}"
echo "[7.8/8] Resetting serial sequences"
{
echo "SELECT setval(pg_get_serial_sequence('public.members', 'id'), COALESCE((SELECT MAX(id) FROM public.members), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.member_aliases', 'id'), COALESCE((SELECT MAX(id) FROM public.member_aliases), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.member_overrides', 'id'), COALESCE((SELECT MAX(id) FROM public.member_overrides), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.member_retirements', 'id'), COALESCE((SELECT MAX(id) FROM public.member_retirements), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.seat_maps', 'id'), COALESCE((SELECT MAX(id) FROM public.seat_maps), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.seat_slots', 'id'), COALESCE((SELECT MAX(id) FROM public.seat_slots), 1), true);"
if [[ "${SCOPE}" == "analysis" || "${SCOPE}" == "full" || "${#PRESERVE_TABLES[@]}" -gt 0 ]]; then
echo "SELECT setval(pg_get_serial_sequence('public.integration_import_batches', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_import_batches), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_raw_organization_rows', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_raw_organization_rows), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_raw_mh_rows', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_raw_mh_rows), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_raw_mh_pm_rows', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_raw_mh_pm_rows), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_raw_payment_rows', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_raw_payment_rows), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_project_aliases', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_project_aliases), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_project_category_mappings', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_project_category_mappings), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_project_pm_assignments', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_project_pm_assignments), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_projects', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_projects), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_work_logs', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_work_logs), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_work_log_segments', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_work_log_segments), 1), true);"
echo "SELECT setval(pg_get_serial_sequence('public.integration_vouchers', 'id'), COALESCE((SELECT MAX(id) FROM public.integration_vouchers), 1), true);"
fi
} > "${SEQUENCE_FIX_FILE}"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" >/dev/null < "${SEQUENCE_FIX_FILE}"
VERIFY_SQL="${WORK_DIR}/verify_${SCOPE}.sql"
{
cat <<'SQL'
SELECT 'members' AS table_name, COUNT(*)::text AS value FROM public.members
UNION ALL
SELECT 'member_retirements', COUNT(*)::text FROM public.member_retirements
UNION ALL
SELECT 'seat_maps', COUNT(*)::text FROM public.seat_maps
UNION ALL
SELECT 'seat_slots', COUNT(*)::text FROM public.seat_slots
UNION ALL
SELECT 'seat_positions', COUNT(*)::text FROM public.seat_positions
UNION ALL
SELECT 'members_with_seat_label', COUNT(*)::text FROM public.members WHERE COALESCE(seat_label, '') <> ''
UNION ALL
SELECT 'seat_positions_without_slot', COUNT(*)::text FROM public.seat_positions WHERE seat_slot_id IS NULL
UNION ALL
SELECT 'seat_label_mismatch', COUNT(*)::text
FROM public.members m
JOIN public.seat_positions sp ON sp.member_id = m.id
WHERE COALESCE(m.seat_label, '') <> COALESCE(sp.seat_label, '')
UNION ALL
SELECT 'auth_users', COUNT(*)::text FROM auth.users
ORDER BY table_name;
SQL
if [[ "${SCOPE}" == "analysis" || "${SCOPE}" == "full" || "${#PRESERVE_TABLES[@]}" -gt 0 ]]; then
cat <<'SQL'
SELECT 'integration_work_logs', COUNT(*)::text FROM public.integration_work_logs
UNION ALL
SELECT 'integration_vouchers', COUNT(*)::text FROM public.integration_vouchers
ORDER BY 1;
SQL
fi
} > "${VERIFY_SQL}"
echo "[8/8] Restarting 8081 app services and printing verification snapshot"
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" up -d frontend proxy >/dev/null
run_compose "${DEV_DIR}" "${DEV_COMPOSE[@]}" exec -T db \
psql -q -v ON_ERROR_STOP=1 -U "${POSTGRES_USER:-orgapp}" -d "${POSTGRES_DB:-orgdb}" -f - < "${VERIFY_SQL}"
echo
echo "Sync complete."
echo "Source: ${PROD_DIR} (8080)"
echo "Target: ${DEV_DIR} (8081)"
echo "Dev compose: ${DEV_COMPOSE_FILE}"
echo "Dev project: ${DEV_PROJECT_NAME}"
echo "Scope : ${SCOPE}"