Add Axum backend, Dockerfiles, and K8s deployment manifests

- Axum backend server: health check, transfer matrix cache (base64 BYTEA),
  route cache (JSONB), CORS, gzip compression, tracing
- Postgres schema: transfer_matrices + cached_routes tables with upserts
- Dockerfile.frontend: 3-stage (wasm-pack → SvelteKit → nginx:alpine)
- Dockerfile.backend: 2-stage (rust build → debian:bookworm-slim)
- nginx.conf: SPA fallback, WASM mime type, /api proxy to backend
- docker-compose.yml: Postgres + backend for local development
- K8s manifests: namespace, frontend/backend deployments with services,
  ingress routing, health probes, secret-based DATABASE_URL

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-08 12:22:07 -07:00
parent 21c842acdc
commit 47ef19d76c
12 changed files with 583 additions and 5 deletions

View File

@@ -4,13 +4,13 @@ version = "0.1.0"
edition = "2021"
[dependencies]
orbital-mechanics = { path = "../orbital-mechanics" }
mass-driver-core = { path = "../mass-driver-core" }
axum = "0.8"
tokio = { version = "1", features = ["full"] }
sqlx = { version = "0.8", features = ["runtime-tokio", "postgres"] }
sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "chrono"] }
serde = { workspace = true }
serde_json = { workspace = true }
tower-http = { version = "0.6", features = ["cors", "compression-gzip"] }
tracing = "0.1"
tracing-subscriber = "0.3"
base64 = "0.22"
chrono = { version = "0.4", features = ["serde"] }

View File

@@ -0,0 +1,152 @@
use sqlx::PgPool;
pub async fn init_db(pool: &PgPool) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS transfer_matrices (
config_hash VARCHAR(64) PRIMARY KEY,
station_count INTEGER NOT NULL,
launch_velocity_kms REAL NOT NULL,
matrix_bytes BYTEA NOT NULL,
created_at TIMESTAMPTZ DEFAULT NOW(),
accessed_at TIMESTAMPTZ DEFAULT NOW()
);
"#,
)
.execute(pool)
.await?;
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS cached_routes (
id SERIAL PRIMARY KEY,
config_hash VARCHAR(64) REFERENCES transfer_matrices(config_hash) ON DELETE CASCADE,
from_station INTEGER NOT NULL,
to_station INTEGER NOT NULL,
route_json JSONB NOT NULL,
created_at TIMESTAMPTZ DEFAULT NOW(),
UNIQUE(config_hash, from_station, to_station)
);
"#,
)
.execute(pool)
.await?;
tracing::info!("Database tables initialized");
Ok(())
}
pub async fn store_matrix(
pool: &PgPool,
config_hash: &str,
station_count: i32,
launch_velocity_kms: f32,
matrix_bytes: &[u8],
) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
INSERT INTO transfer_matrices (config_hash, station_count, launch_velocity_kms, matrix_bytes)
VALUES ($1, $2, $3, $4)
ON CONFLICT (config_hash) DO UPDATE
SET station_count = EXCLUDED.station_count,
launch_velocity_kms = EXCLUDED.launch_velocity_kms,
matrix_bytes = EXCLUDED.matrix_bytes,
accessed_at = NOW()
"#,
)
.bind(config_hash)
.bind(station_count)
.bind(launch_velocity_kms)
.bind(matrix_bytes)
.execute(pool)
.await?;
Ok(())
}
pub async fn get_matrix(
pool: &PgPool,
config_hash: &str,
) -> Result<Option<MatrixRow>, sqlx::Error> {
// Update accessed_at and return the row
let row = sqlx::query_as::<_, MatrixRow>(
r#"
UPDATE transfer_matrices
SET accessed_at = NOW()
WHERE config_hash = $1
RETURNING config_hash, station_count, launch_velocity_kms, matrix_bytes, created_at, accessed_at
"#,
)
.bind(config_hash)
.fetch_optional(pool)
.await?;
Ok(row)
}
pub async fn store_route(
pool: &PgPool,
config_hash: &str,
from_station: i32,
to_station: i32,
route_json: &serde_json::Value,
) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
INSERT INTO cached_routes (config_hash, from_station, to_station, route_json)
VALUES ($1, $2, $3, $4)
ON CONFLICT (config_hash, from_station, to_station) DO UPDATE
SET route_json = EXCLUDED.route_json
"#,
)
.bind(config_hash)
.bind(from_station)
.bind(to_station)
.bind(route_json)
.execute(pool)
.await?;
Ok(())
}
pub async fn get_route(
pool: &PgPool,
config_hash: &str,
from_station: i32,
to_station: i32,
) -> Result<Option<RouteRow>, sqlx::Error> {
let row = sqlx::query_as::<_, RouteRow>(
r#"
SELECT id, config_hash, from_station, to_station, route_json, created_at
FROM cached_routes
WHERE config_hash = $1 AND from_station = $2 AND to_station = $3
"#,
)
.bind(config_hash)
.bind(from_station)
.bind(to_station)
.fetch_optional(pool)
.await?;
Ok(row)
}
#[derive(sqlx::FromRow, serde::Serialize)]
pub struct MatrixRow {
pub config_hash: String,
pub station_count: i32,
pub launch_velocity_kms: f32,
pub matrix_bytes: Vec<u8>,
pub created_at: Option<chrono::DateTime<chrono::Utc>>,
pub accessed_at: Option<chrono::DateTime<chrono::Utc>>,
}
#[derive(sqlx::FromRow, serde::Serialize)]
pub struct RouteRow {
pub id: i32,
pub config_hash: Option<String>,
pub from_station: i32,
pub to_station: i32,
pub route_json: serde_json::Value,
pub created_at: Option<chrono::DateTime<chrono::Utc>>,
}

View File

@@ -1,3 +1,49 @@
fn main() {
println!("mass-driver backend — not yet implemented");
mod db;
mod routes;
use axum::{routing::{get, post}, Router};
use sqlx::postgres::PgPoolOptions;
use tower_http::{compression::CompressionLayer, cors::CorsLayer};
#[tokio::main]
async fn main() {
tracing_subscriber::fmt::init();
let database_url =
std::env::var("DATABASE_URL").expect("DATABASE_URL environment variable must be set");
let port = std::env::var("PORT").unwrap_or_else(|_| "3001".to_string());
tracing::info!("Connecting to database...");
let pool = PgPoolOptions::new()
.max_connections(10)
.connect(&database_url)
.await
.expect("Failed to connect to Postgres");
db::init_db(&pool).await.expect("Failed to initialize database tables");
tracing::info!("Database initialized");
let app = Router::new()
.route("/api/health", get(routes::health))
.route("/api/cache/transfer-matrix", post(routes::store_matrix))
.route(
"/api/cache/transfer-matrix/{config_hash}",
get(routes::get_matrix),
)
.route("/api/cache/route", post(routes::store_route))
.route("/api/cache/route", get(routes::get_route))
.layer(CompressionLayer::new())
.layer(CorsLayer::permissive())
.with_state(pool);
let addr = format!("0.0.0.0:{port}");
tracing::info!("Listening on {addr}");
let listener = tokio::net::TcpListener::bind(&addr)
.await
.expect("Failed to bind listener");
axum::serve(listener, app)
.await
.expect("Server error");
}

View File

@@ -0,0 +1,134 @@
use axum::{
extract::{Path, Query, State},
http::StatusCode,
response::IntoResponse,
Json,
};
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use crate::db;
// ---------- Health ----------
pub async fn health() -> impl IntoResponse {
Json(serde_json::json!({ "status": "ok" }))
}
// ---------- Transfer Matrix ----------
#[derive(Deserialize)]
pub struct StoreMatrixRequest {
pub config_hash: String,
pub station_count: i32,
pub launch_velocity_kms: f32,
pub matrix_bytes: String, // base64-encoded
}
#[derive(Serialize)]
pub struct MatrixResponse {
pub config_hash: String,
pub station_count: i32,
pub launch_velocity_kms: f32,
pub matrix_bytes: String, // base64-encoded
}
pub async fn store_matrix(
State(pool): State<PgPool>,
Json(req): Json<StoreMatrixRequest>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let bytes = BASE64
.decode(&req.matrix_bytes)
.map_err(|e| (StatusCode::BAD_REQUEST, format!("Invalid base64: {e}")))?;
db::store_matrix(
&pool,
&req.config_hash,
req.station_count,
req.launch_velocity_kms,
&bytes,
)
.await
.map_err(|e| {
tracing::error!("DB error storing matrix: {e}");
(StatusCode::INTERNAL_SERVER_ERROR, format!("DB error: {e}"))
})?;
Ok((StatusCode::CREATED, Json(serde_json::json!({ "stored": true }))))
}
pub async fn get_matrix(
State(pool): State<PgPool>,
Path(config_hash): Path<String>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let row = db::get_matrix(&pool, &config_hash).await.map_err(|e| {
tracing::error!("DB error fetching matrix: {e}");
(StatusCode::INTERNAL_SERVER_ERROR, format!("DB error: {e}"))
})?;
match row {
Some(m) => Ok(Json(MatrixResponse {
config_hash: m.config_hash,
station_count: m.station_count,
launch_velocity_kms: m.launch_velocity_kms,
matrix_bytes: BASE64.encode(&m.matrix_bytes),
})
.into_response()),
None => Ok(StatusCode::NOT_FOUND.into_response()),
}
}
// ---------- Cached Route ----------
#[derive(Deserialize)]
pub struct StoreRouteRequest {
pub config_hash: String,
pub from_station: i32,
pub to_station: i32,
pub route_json: serde_json::Value,
}
#[derive(Deserialize)]
pub struct RouteQuery {
pub config_hash: String,
pub from: i32,
pub to: i32,
}
pub async fn store_route(
State(pool): State<PgPool>,
Json(req): Json<StoreRouteRequest>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
db::store_route(
&pool,
&req.config_hash,
req.from_station,
req.to_station,
&req.route_json,
)
.await
.map_err(|e| {
tracing::error!("DB error storing route: {e}");
(StatusCode::INTERNAL_SERVER_ERROR, format!("DB error: {e}"))
})?;
Ok((StatusCode::CREATED, Json(serde_json::json!({ "stored": true }))))
}
pub async fn get_route(
State(pool): State<PgPool>,
Query(q): Query<RouteQuery>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let row = db::get_route(&pool, &q.config_hash, q.from, q.to)
.await
.map_err(|e| {
tracing::error!("DB error fetching route: {e}");
(StatusCode::INTERNAL_SERVER_ERROR, format!("DB error: {e}"))
})?;
match row {
Some(r) => Ok(Json(r).into_response()),
None => Ok(StatusCode::NOT_FOUND.into_response()),
}
}