Files
geoip-rest/cmd/server/main.go
2025-12-10 11:53:48 +09:00

269 lines
5.7 KiB
Go

package main
import (
"context"
"errors"
"fmt"
"log"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/logger"
"geoip-rest/internal/geo"
"geoip-rest/internal/schedule"
)
const (
defaultPort = "8080"
defaultDBPath = "/initial_data/GeoLite2-City.mmdb"
defaultCron = "5 0 * * *" // 매일 00:05 KST
defaultJob = "user-program-sync"
)
func main() {
backend := geo.Backend(env("GEOIP_BACKEND", string(geo.BackendMMDB)))
dbPath := env("GEOIP_DB_PATH", defaultDBPath)
dbURL := os.Getenv("DATABASE_URL")
lookupQuery := os.Getenv("GEOIP_LOOKUP_QUERY")
port := env("PORT", defaultPort)
resolver, err := geo.NewResolver(geo.Config{
Backend: backend,
MMDBPath: dbPath,
DatabaseURL: dbURL,
LookupQuery: lookupQuery,
})
if err != nil {
log.Fatalf("failed to initialize resolver: %v", err)
}
defer resolver.Close()
app := fiber.New(fiber.Config{
DisableStartupMessage: true,
ReadBufferSize: 16 * 1024, // allow larger request headers (e.g., proxy cookies)
})
app.Use(newFileLogger(env("ACCESS_LOG_PATH", "/log/api-access.log")))
app.Get("/", func(c *fiber.Ctx) error {
return c.JSON(fiber.Map{
"service": "geoip-rest",
"endpoints": []string{
"/health",
"/lookup?ip=<IPv4|IPv6>",
},
})
})
app.Get("/health", func(c *fiber.Ctx) error {
return c.JSON(fiber.Map{"status": "ok"})
})
app.Get("/lookup", func(c *fiber.Ctx) error {
ip := c.Query("ip")
if ip == "" {
ip = c.IP()
}
location, err := resolver.Lookup(ip)
if err != nil {
switch {
case errors.Is(err, geo.ErrInvalidIP):
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
"error": "invalid ip address",
})
case errors.Is(err, geo.ErrNotFound):
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{
"error": "location not found",
})
default:
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
"error": "lookup failed",
})
}
}
return c.JSON(location)
})
log.Printf("starting GeoIP API on :%s backend=%s", port, backend)
switch backend {
case geo.BackendPostgres:
log.Printf("using postgres DSN %s", sanitizeDBURL(dbURL))
default:
log.Printf("using mmdb path %s", dbPath)
}
stopScheduler := maybeStartScheduler()
defer func() {
if stopScheduler != nil {
ctx := stopScheduler()
<-ctx.Done()
}
}()
if err := app.Listen(":" + port); err != nil {
log.Fatalf("server stopped: %v", err)
}
}
func newFileLogger(path string) fiber.Handler {
if path == "" {
return func(c *fiber.Ctx) error { return c.Next() }
}
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
log.Printf("access log disabled (mkdir failed: %v)", err)
return func(c *fiber.Ctx) error { return c.Next() }
}
maxBytes := int64(envInt("ACCESS_LOG_MAX_BYTES", 10*1024*1024))
writer, err := newRotatingWriter(path, maxBytes)
if err != nil {
log.Printf("access log disabled (open failed: %v)", err)
return func(c *fiber.Ctx) error { return c.Next() }
}
format := "${time} ${ip} ${method} ${path} ${protocol} ${status} ${latency_human} headers=${reqHeaders}\n"
cfg := logger.Config{
Format: format,
TimeFormat: time.RFC3339,
TimeZone: "Asia/Seoul",
Output: writer,
}
return logger.New(cfg)
}
type rotatingWriter struct {
mu sync.Mutex
path string
maxBytes int64
file *os.File
}
func newRotatingWriter(path string, maxBytes int64) (*rotatingWriter, error) {
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return nil, err
}
return &rotatingWriter{
path: path,
maxBytes: maxBytes,
file: f,
}, nil
}
func (w *rotatingWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
if err := w.rotateIfNeeded(len(p)); err != nil {
return 0, err
}
return w.file.Write(p)
}
func (w *rotatingWriter) rotateIfNeeded(incoming int) error {
info, err := w.file.Stat()
if err != nil {
return err
}
if info.Size()+int64(incoming) <= w.maxBytes {
return nil
}
_ = w.file.Close()
ts := time.Now().Format("20060102-150405")
rotated := fmt.Sprintf("%s.%s", w.path, ts)
if err := os.Rename(w.path, rotated); err != nil {
// attempt to reopen original to keep logging
w.file, _ = os.OpenFile(w.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
return err
}
f, err := os.OpenFile(w.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return err
}
w.file = f
return nil
}
func env(key, fallback string) string {
if val := os.Getenv(key); val != "" {
return val
}
return fallback
}
func envBool(key string, fallback bool) bool {
val := os.Getenv(key)
if val == "" {
return fallback
}
switch strings.ToLower(val) {
case "1", "t", "true", "y", "yes", "on":
return true
case "0", "f", "false", "n", "no", "off":
return false
default:
return fallback
}
}
func envInt(key string, fallback int) int {
val := os.Getenv(key)
if val == "" {
return fallback
}
parsed, err := strconv.Atoi(val)
if err != nil {
return fallback
}
return parsed
}
func sanitizeDBURL(raw string) string {
u, err := url.Parse(raw)
if err != nil {
return "postgres"
}
return u.Redacted()
}
func maybeStartScheduler() func() context.Context {
enabled := envBool("USER_PROGRAM_CRON_ENABLE", false)
if !enabled {
return nil
}
cronExpr := defaultCron
command := defaultJob
sched, err := schedule.Start(schedule.Config{
CronExpr: cronExpr,
Command: command,
Logger: log.Default(),
})
if err != nil {
log.Printf("scheduler not started (error=%v)", err)
return nil
}
return func() context.Context {
ctx := sched.Stop()
timer := time.NewTimer(2 * time.Second)
select {
case <-ctx.Done():
timer.Stop()
return ctx
case <-timer.C:
return ctx
}
}
}