feat: filesystem blob storage

This commit is contained in:
lewis
2026-01-20 18:21:41 +02:00
committed by Tangled
parent 12a189cb2b
commit a18be8c6fd
30 changed files with 2493 additions and 371 deletions

View File

@@ -14,19 +14,32 @@ DATABASE_URL=postgres://postgres:postgres@localhost:5432/pds
# DATABASE_MIN_CONNECTIONS=10
# DATABASE_ACQUIRE_TIMEOUT_SECS=30
# =============================================================================
# Blob Storage (S3-compatible)
# Blob Storage
# =============================================================================
S3_ENDPOINT=http://localhost:9000
AWS_REGION=us-east-1
S3_BUCKET=pds-blobs
AWS_ACCESS_KEY_ID=minioadmin
AWS_SECRET_ACCESS_KEY=minioadmin
# Backend: "filesystem" (default) or "s3"
# BLOB_STORAGE_BACKEND=filesystem
# For filesystem backend:
BLOB_STORAGE_PATH=/var/lib/tranquil/blobs
# For S3 backend:
# S3_ENDPOINT=http://localhost:9000
# AWS_REGION=us-east-1
# S3_BUCKET=pds-blobs
# AWS_ACCESS_KEY_ID=minioadmin
# AWS_SECRET_ACCESS_KEY=minioadmin
# =============================================================================
# Backups (S3-compatible)
# Backups
# =============================================================================
# Set to enable automatic repo backups to S3
# BACKUP_S3_BUCKET=pds-backups
# Enable/disable automatic repo backups
# BACKUP_ENABLED=true
# Backend: "filesystem" (default) or "s3"
# BACKUP_STORAGE_BACKEND=filesystem
# For filesystem backend:
BACKUP_STORAGE_PATH=/var/lib/tranquil/backups
# For S3 backend:
# BACKUP_S3_BUCKET=pds-backups
# Backup schedule and retention
# BACKUP_RETENTION_COUNT=7
# BACKUP_INTERVAL_SECS=86400
# =============================================================================
# Valkey (for caching and distributed rate limiting)
# =============================================================================

View File

@@ -0,0 +1,77 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT token, request_uri, provider as \"provider: SsoProviderType\",\n provider_user_id, provider_username, provider_email, created_at, expires_at\n FROM sso_pending_registration\n WHERE token = $1 AND expires_at > NOW()\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "token",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "request_uri",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "provider: SsoProviderType",
"type_info": {
"Custom": {
"name": "sso_provider_type",
"kind": {
"Enum": [
"github",
"discord",
"google",
"gitlab",
"oidc"
]
}
}
}
},
{
"ordinal": 3,
"name": "provider_user_id",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "provider_username",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "provider_email",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 7,
"name": "expires_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false,
true,
true,
false,
false
]
},
"hash": "06eb7c6e1983b6121526ba63612236391290c2e63d37d2bb1cd89ea822950a82"
}

View File

@@ -0,0 +1,77 @@
{
"db_name": "PostgreSQL",
"query": "\n DELETE FROM sso_pending_registration\n WHERE token = $1 AND expires_at > NOW()\n RETURNING token, request_uri, provider as \"provider: SsoProviderType\",\n provider_user_id, provider_username, provider_email, created_at, expires_at\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "token",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "request_uri",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "provider: SsoProviderType",
"type_info": {
"Custom": {
"name": "sso_provider_type",
"kind": {
"Enum": [
"github",
"discord",
"google",
"gitlab",
"oidc"
]
}
}
}
},
{
"ordinal": 3,
"name": "provider_user_id",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "provider_username",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "provider_email",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 7,
"name": "expires_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false,
true,
true,
false,
false
]
},
"hash": "5031b96c65078d6c54954ce6e57ff9cbba4c48dd8a7546882ab5647114ffab4a"
}

View File

@@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "SELECT email_verified FROM users WHERE email = $1 OR handle = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "email_verified",
"type_info": "Bool"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false
]
},
"hash": "6258398accee69e0c5f455a3c0ecc273b3da6ef5bb4d8660adafe63d8e3cd2d4"
}

View File

@@ -0,0 +1,31 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO external_identities (did, provider, provider_user_id, provider_username, provider_email)\n VALUES ($1, $2, $3, $4, $5)\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
{
"Custom": {
"name": "sso_provider_type",
"kind": {
"Enum": [
"github",
"discord",
"google",
"gitlab",
"oidc"
]
}
}
},
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "a4dc8fb22bd094d414c55b9da20b610f7b122b485ab0fd0d0646d68ae8e64fe6"
}

View File

@@ -0,0 +1,32 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO sso_pending_registration (token, request_uri, provider, provider_user_id, provider_username, provider_email)\n VALUES ($1, $2, $3, $4, $5, $6)\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
{
"Custom": {
"name": "sso_provider_type",
"kind": {
"Enum": [
"github",
"discord",
"google",
"gitlab",
"oidc"
]
}
}
},
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "dec3a21a8e60cc8d2c5dad727750bc88f5535dedae244f7b6e4afa95769b8f1a"
}

3
Cargo.lock generated
View File

@@ -6173,7 +6173,10 @@ dependencies = [
"bytes",
"futures",
"sha2",
"tokio",
"tracing",
"tranquil-infra",
"uuid",
]
[[package]]

View File

@@ -12,9 +12,9 @@ Another excellent PDS is [Cocoon](https://tangled.org/hailey.at/cocoon), written
## What's different about Tranquil PDS
It is a superset of the reference PDS, including: passkeys and 2FA (WebAuthn/FIDO2, TOTP, backup codes, trusted devices), SSO login and signup, did:web support (PDS-hosted subdomains or bring-your-own), multi-channel communication (email, discord, telegram, signal) for verification and alerts, granular OAuth scopes with a consent UI showing human-readable descriptions, app passwords with granular permissions (read-only, post-only, or custom scopes), account delegation (letting others manage an account with configurable permission levels), automatic backups to s3-compatible object storage (configurable retention and frequency, one-click restore), and a built-in web UI for account management, OAuth consent, repo browsing, and admin.
It is a superset of the reference PDS, including: passkeys and 2FA (WebAuthn/FIDO2, TOTP, backup codes, trusted devices), SSO login and signup, did:web support (PDS-hosted subdomains or bring-your-own), multi-channel communication (email, discord, telegram, signal) for verification and alerts, granular OAuth scopes with a consent UI showing human-readable descriptions, app passwords with granular permissions (read-only, post-only, or custom scopes), account delegation (letting others manage an account with configurable permission levels), automatic backups (configurable retention and frequency, one-click restore), and a built-in web UI for account management, OAuth consent, repo browsing, and admin.
The PDS itself is a single small binary with no node/npm runtime. It does require postgres, valkey, and s3-compatible storage, which makes setup heavier than the reference PDS's sqlite. The tradeoff is that these are battle-tested pieces of infra that we already know how to scale, back up, and monitor.
The PDS itself is a single small binary with no node/npm runtime. It requires postgres and stores blobs on the local filesystem. Valkey is optional (enables distributed rate limiting for multi-node setups). The tradeoff vs the reference PDS's sqlite is that postgres is a battle-tested piece of infra that we already know how to scale, back up, and monitor.
## Quick Start

View File

@@ -8,8 +8,10 @@ use std::time::Duration;
pub enum StorageError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("S3 error: {0}")]
S3(String),
#[error("Storage error: {0}")]
Backend(String),
#[error("Not found: {0}")]
NotFound(String),
#[error("Other: {0}")]
Other(String),
}
@@ -35,6 +37,27 @@ pub trait BlobStorage: Send + Sync {
async fn copy(&self, src_key: &str, dst_key: &str) -> Result<(), StorageError>;
}
#[async_trait]
pub trait BackupStorage: Send + Sync {
async fn put_backup(&self, did: &str, rev: &str, data: &[u8]) -> Result<String, StorageError>;
async fn get_backup(&self, storage_key: &str) -> Result<Bytes, StorageError>;
async fn delete_backup(&self, storage_key: &str) -> Result<(), StorageError>;
}
pub fn backup_retention_count() -> u32 {
std::env::var("BACKUP_RETENTION_COUNT")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(7)
}
pub fn backup_interval_secs() -> u64 {
std::env::var("BACKUP_INTERVAL_SECS")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(86400)
}
#[derive(Debug, thiserror::Error)]
pub enum CacheError {
#[error("Cache connection error: {0}")]

View File

@@ -79,6 +79,7 @@ zip = { workspace = true }
[features]
external-infra = []
s3-storage = []
[dev-dependencies]
ciborium = { workspace = true }

View File

@@ -3,7 +3,7 @@ use crate::api::{EmptyResponse, EnabledResponse};
use crate::auth::BearerAuth;
use crate::scheduled::generate_full_backup;
use crate::state::AppState;
use crate::storage::BackupStorage;
use crate::storage::{BackupStorage, backup_retention_count};
use axum::{
Json,
extract::{Query, State},
@@ -249,10 +249,10 @@ pub async fn create_backup(State(state): State<AppState>, auth: BearerAuth) -> R
"Created manual backup"
);
let retention = BackupStorage::retention_count();
let retention = backup_retention_count();
if let Err(e) = cleanup_old_backups(
state.backup_repo.as_ref(),
backup_storage,
backup_storage.as_ref(),
user.id,
retention,
)
@@ -275,7 +275,7 @@ pub async fn create_backup(State(state): State<AppState>, auth: BearerAuth) -> R
async fn cleanup_old_backups(
backup_repo: &dyn BackupRepository,
backup_storage: &BackupStorage,
backup_storage: &dyn BackupStorage,
user_id: uuid::Uuid,
retention_count: u32,
) -> Result<(), String> {

View File

@@ -198,7 +198,7 @@ pub async fn upload_blob(
let cid = Cid::new_v1(0x55, multihash);
let cid_str = cid.to_string();
let cid_link: CidLink = CidLink::new_unchecked(&cid_str);
let storage_key = format!("blobs/{}", cid_str);
let storage_key = cid_str.clone();
info!(
"Blob upload complete: size={}, cid={}, copying to final location",

View File

@@ -82,11 +82,9 @@ pub async fn reauth_password(
.await
.unwrap_or_default();
let app_password_valid = app_password_hashes
.iter()
.fold(false, |acc, h| {
acc | bcrypt::verify(&input.password, h).unwrap_or(false)
});
let app_password_valid = app_password_hashes.iter().fold(false, |acc, h| {
acc | bcrypt::verify(&input.password, h).unwrap_or(false)
});
if !app_password_valid {
warn!(did = %&auth.0.did, "Re-auth failed: invalid password");

View File

@@ -49,7 +49,11 @@ pub fn generate_app_password() -> String {
let chars: &[u8] = b"abcdefghijklmnopqrstuvwxyz234567";
let mut rng = rand::thread_rng();
let segments: Vec<String> = (0..4)
.map(|_| (0..4).map(|_| chars[rng.gen_range(0..chars.len())] as char).collect())
.map(|_| {
(0..4)
.map(|_| chars[rng.gen_range(0..chars.len())] as char)
.collect()
})
.collect();
segments.join("-")
}

View File

@@ -3368,11 +3368,9 @@ pub async fn register_complete(
}
};
let password_valid = password_hashes
.iter()
.fold(false, |acc, hash| {
acc | bcrypt::verify(&form.app_password, hash).unwrap_or(false)
});
let password_valid = password_hashes.iter().fold(false, |acc, hash| {
acc | bcrypt::verify(&form.app_password, hash).unwrap_or(false)
});
if !password_valid {
return (

View File

@@ -15,7 +15,7 @@ use tranquil_db_traits::{
use tranquil_types::{AtUri, CidLink, Did};
use crate::repo::PostgresBlockStore;
use crate::storage::{BackupStorage, BlobStorage};
use crate::storage::{BackupStorage, BlobStorage, backup_interval_secs, backup_retention_count};
use crate::sync::car::encode_car_header;
async fn process_genesis_commit(
@@ -537,14 +537,14 @@ pub async fn start_backup_tasks(
repo_repo: Arc<dyn RepoRepository>,
backup_repo: Arc<dyn BackupRepository>,
block_store: PostgresBlockStore,
backup_storage: Arc<BackupStorage>,
backup_storage: Arc<dyn BackupStorage>,
mut shutdown_rx: watch::Receiver<bool>,
) {
let backup_interval = Duration::from_secs(BackupStorage::interval_secs());
let backup_interval = Duration::from_secs(backup_interval_secs());
info!(
interval_secs = backup_interval.as_secs(),
retention_count = BackupStorage::retention_count(),
retention_count = backup_retention_count(),
"Starting backup service"
);
@@ -564,7 +564,7 @@ pub async fn start_backup_tasks(
repo_repo.as_ref(),
backup_repo.as_ref(),
&block_store,
&backup_storage,
backup_storage.as_ref(),
).await {
error!("Error processing scheduled backups: {}", e);
}
@@ -592,7 +592,7 @@ async fn process_single_backup(
repo_repo: &dyn RepoRepository,
backup_repo: &dyn BackupRepository,
block_store: &PostgresBlockStore,
backup_storage: &BackupStorage,
backup_storage: &dyn BackupStorage,
user_id: uuid::Uuid,
did: String,
repo_root_cid: String,
@@ -656,13 +656,13 @@ async fn process_scheduled_backups(
repo_repo: &dyn RepoRepository,
backup_repo: &dyn BackupRepository,
block_store: &PostgresBlockStore,
backup_storage: &BackupStorage,
backup_storage: &dyn BackupStorage,
) -> Result<(), String> {
let backup_interval_secs = BackupStorage::interval_secs() as i64;
let retention_count = BackupStorage::retention_count();
let interval_secs = backup_interval_secs() as i64;
let retention = backup_retention_count();
let users_needing_backup = backup_repo
.get_users_needing_backup(backup_interval_secs, 50)
.get_users_needing_backup(interval_secs, 50)
.await
.map_err(|e| format!("DB error fetching users for backup: {:?}", e))?;
@@ -700,13 +700,9 @@ async fn process_scheduled_backups(
block_count = result.block_count,
"Created backup"
);
if let Err(e) = cleanup_old_backups(
backup_repo,
backup_storage,
result.user_id,
retention_count,
)
.await
if let Err(e) =
cleanup_old_backups(backup_repo, backup_storage, result.user_id, retention)
.await
{
warn!(did = %result.did, error = %e, "Failed to cleanup old backups");
}
@@ -844,7 +840,7 @@ fn read_varint(data: &[u8]) -> Option<(u64, usize)> {
async fn cleanup_old_backups(
backup_repo: &dyn BackupRepository,
backup_storage: &BackupStorage,
backup_storage: &dyn BackupStorage,
user_id: uuid::Uuid,
retention_count: u32,
) -> Result<(), String> {

View File

@@ -1221,7 +1221,11 @@ pub async fn complete_registration(
scopes: None,
created_by_controller_did: None,
};
if let Err(e) = state.session_repo.create_app_password(&app_password_data).await {
if let Err(e) = state
.session_repo
.create_app_password(&app_password_data)
.await
{
tracing::warn!("Failed to create initial app password: {:?}", e);
}

View File

@@ -5,7 +5,7 @@ use crate::config::AuthConfig;
use crate::rate_limit::RateLimiters;
use crate::repo::PostgresBlockStore;
use crate::sso::{SsoConfig, SsoManager};
use crate::storage::{BackupStorage, BlobStorage, S3BlobStorage};
use crate::storage::{BackupStorage, BlobStorage, create_backup_storage, create_blob_storage};
use crate::sync::firehose::SequencedEvent;
use sqlx::PgPool;
use std::error::Error;
@@ -32,7 +32,7 @@ pub struct AppState {
pub event_notifier: Arc<dyn RepoEventNotifier>,
pub block_store: PostgresBlockStore,
pub blob_store: Arc<dyn BlobStorage>,
pub backup_storage: Option<Arc<BackupStorage>>,
pub backup_storage: Option<Arc<dyn BackupStorage>>,
pub firehose_tx: broadcast::Sender<SequencedEvent>,
pub rate_limiters: Arc<RateLimiters>,
pub circuit_breakers: Arc<CircuitBreakers>,
@@ -165,8 +165,8 @@ impl AppState {
let repos = Arc::new(PostgresRepositories::new(db.clone()));
let block_store = PostgresBlockStore::new(db);
let blob_store = S3BlobStorage::new().await;
let backup_storage = BackupStorage::new().await.map(Arc::new);
let blob_store = create_blob_storage().await;
let backup_storage = create_backup_storage().await;
let firehose_buffer_size: usize = std::env::var("FIREHOSE_BUFFER_SIZE")
.ok()
@@ -195,7 +195,7 @@ impl AppState {
sso_repo: repos.sso.clone(),
repos,
block_store,
blob_store: Arc::new(blob_store),
blob_store,
backup_storage,
firehose_tx,
rate_limiters,

View File

@@ -1,3 +1,5 @@
pub use tranquil_storage::{
BackupStorage, BlobStorage, S3BlobStorage, StorageError, StreamUploadResult,
BackupStorage, BlobStorage, FilesystemBackupStorage, FilesystemBlobStorage, S3BackupStorage,
S3BlobStorage, StorageError, StreamUploadResult, backup_interval_secs, backup_retention_count,
create_backup_storage, create_blob_storage,
};

View File

@@ -1,11 +1,15 @@
#[cfg(feature = "s3-storage")]
use aws_config::BehaviorVersion;
#[cfg(feature = "s3-storage")]
use aws_sdk_s3::Client as S3Client;
#[cfg(feature = "s3-storage")]
use aws_sdk_s3::config::Credentials;
use chrono::Utc;
use reqwest::{Client, StatusCode, header};
use serde_json::{Value, json};
use sqlx::postgres::PgPoolOptions;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::{Arc, OnceLock, RwLock};
#[allow(unused_imports)]
use std::time::Duration;
@@ -19,16 +23,19 @@ static APP_PORT: OnceLock<u16> = OnceLock::new();
static MOCK_APPVIEW: OnceLock<MockServer> = OnceLock::new();
static MOCK_PLC: OnceLock<MockServer> = OnceLock::new();
static TEST_DB_POOL: OnceLock<sqlx::PgPool> = OnceLock::new();
static TEST_TEMP_DIR: OnceLock<PathBuf> = OnceLock::new();
#[cfg(not(feature = "external-infra"))]
#[cfg(all(not(feature = "external-infra"), feature = "s3-storage"))]
use testcontainers::GenericImage;
#[cfg(all(not(feature = "external-infra"), feature = "s3-storage"))]
use testcontainers::core::ContainerPort;
#[cfg(not(feature = "external-infra"))]
use testcontainers::{ContainerAsync, GenericImage, ImageExt, runners::AsyncRunner};
use testcontainers::{ContainerAsync, ImageExt, runners::AsyncRunner};
#[cfg(not(feature = "external-infra"))]
use testcontainers_modules::postgres::Postgres;
#[cfg(not(feature = "external-infra"))]
static DB_CONTAINER: OnceLock<ContainerAsync<Postgres>> = OnceLock::new();
#[cfg(not(feature = "external-infra"))]
#[cfg(all(not(feature = "external-infra"), feature = "s3-storage"))]
static S3_CONTAINER: OnceLock<ContainerAsync<GenericImage>> = OnceLock::new();
#[allow(dead_code)]
@@ -42,11 +49,15 @@ pub const TARGET_DID: &str = "did:plc:target";
fn has_external_infra() -> bool {
std::env::var("TRANQUIL_PDS_TEST_INFRA_READY").is_ok()
|| (std::env::var("DATABASE_URL").is_ok() && std::env::var("S3_ENDPOINT").is_ok())
|| (std::env::var("DATABASE_URL").is_ok()
&& (std::env::var("S3_ENDPOINT").is_ok() || std::env::var("BLOB_STORAGE_PATH").is_ok()))
}
#[cfg(test)]
#[ctor::dtor]
fn cleanup() {
if let Some(temp_dir) = TEST_TEMP_DIR.get() {
let _ = std::fs::remove_dir_all(temp_dir);
}
if has_external_infra() {
return;
}
@@ -125,27 +136,36 @@ pub async fn base_url() -> &'static str {
async fn setup_with_external_infra() -> String {
let database_url =
std::env::var("DATABASE_URL").expect("DATABASE_URL must be set when using external infra");
let s3_endpoint =
std::env::var("S3_ENDPOINT").expect("S3_ENDPOINT must be set when using external infra");
let plc_url = setup_mock_plc_directory().await;
unsafe {
std::env::set_var(
"S3_BUCKET",
std::env::var("S3_BUCKET").unwrap_or_else(|_| "test-bucket".to_string()),
);
std::env::set_var(
"AWS_ACCESS_KEY_ID",
std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "minioadmin".to_string()),
);
std::env::set_var(
"AWS_SECRET_ACCESS_KEY",
std::env::var("AWS_SECRET_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string()),
);
std::env::set_var(
"AWS_REGION",
std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string()),
);
std::env::set_var("S3_ENDPOINT", &s3_endpoint);
if std::env::var("S3_ENDPOINT").is_ok() {
let s3_endpoint = std::env::var("S3_ENDPOINT").unwrap();
std::env::set_var("BLOB_STORAGE_BACKEND", "s3");
std::env::set_var("BACKUP_STORAGE_BACKEND", "s3");
std::env::set_var("BACKUP_S3_BUCKET", "test-backups");
std::env::set_var(
"S3_BUCKET",
std::env::var("S3_BUCKET").unwrap_or_else(|_| "test-bucket".to_string()),
);
std::env::set_var(
"AWS_ACCESS_KEY_ID",
std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "minioadmin".to_string()),
);
std::env::set_var(
"AWS_SECRET_ACCESS_KEY",
std::env::var("AWS_SECRET_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string()),
);
std::env::set_var(
"AWS_REGION",
std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string()),
);
std::env::set_var("S3_ENDPOINT", &s3_endpoint);
} else if std::env::var("BLOB_STORAGE_PATH").is_ok() {
std::env::set_var("BLOB_STORAGE_BACKEND", "filesystem");
std::env::set_var("BACKUP_STORAGE_BACKEND", "filesystem");
} else {
panic!("Either S3_ENDPOINT or BLOB_STORAGE_PATH must be set for external-infra");
}
std::env::set_var("MAX_IMPORT_SIZE", "100000000");
std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
@@ -160,7 +180,49 @@ async fn setup_with_external_infra() -> String {
spawn_app(database_url).await
}
#[cfg(not(feature = "external-infra"))]
#[cfg(all(not(feature = "external-infra"), not(feature = "s3-storage")))]
async fn setup_with_testcontainers() -> String {
let temp_dir = std::env::temp_dir().join(format!("tranquil-pds-test-{}", uuid::Uuid::new_v4()));
let blob_path = temp_dir.join("blobs");
let backup_path = temp_dir.join("backups");
std::fs::create_dir_all(&blob_path).expect("Failed to create blob temp directory");
std::fs::create_dir_all(&backup_path).expect("Failed to create backup temp directory");
TEST_TEMP_DIR.set(temp_dir).ok();
let plc_url = setup_mock_plc_directory().await;
unsafe {
std::env::set_var("BLOB_STORAGE_BACKEND", "filesystem");
std::env::set_var("BLOB_STORAGE_PATH", blob_path.to_str().unwrap());
std::env::set_var("BACKUP_STORAGE_BACKEND", "filesystem");
std::env::set_var("BACKUP_STORAGE_PATH", backup_path.to_str().unwrap());
std::env::set_var("MAX_IMPORT_SIZE", "100000000");
std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
}
let mock_server = MockServer::start().await;
setup_mock_appview(&mock_server).await;
let mock_uri = mock_server.uri();
let mock_host = mock_uri.strip_prefix("http://").unwrap_or(&mock_uri);
let mock_did = format!("did:web:{}", mock_host.replace(':', "%3A"));
setup_mock_did_document(&mock_server, &mock_did, &mock_uri).await;
MOCK_APPVIEW.set(mock_server).ok();
let container = Postgres::default()
.with_tag("18-alpine")
.with_label("tranquil_pds_test", "true")
.start()
.await
.expect("Failed to start Postgres");
let connection_string = format!(
"postgres://postgres:postgres@127.0.0.1:{}",
container
.get_host_port_ipv4(5432)
.await
.expect("Failed to get port")
);
DB_CONTAINER.set(container).ok();
spawn_app(connection_string).await
}
#[cfg(all(not(feature = "external-infra"), feature = "s3-storage"))]
async fn setup_with_testcontainers() -> String {
let s3_container = GenericImage::new("cgr.dev/chainguard/minio", "latest")
.with_exposed_port(ContainerPort::Tcp(9000))
@@ -178,6 +240,9 @@ async fn setup_with_testcontainers() -> String {
let s3_endpoint = format!("http://127.0.0.1:{}", s3_port);
let plc_url = setup_mock_plc_directory().await;
unsafe {
std::env::set_var("BLOB_STORAGE_BACKEND", "s3");
std::env::set_var("BACKUP_STORAGE_BACKEND", "s3");
std::env::set_var("BACKUP_S3_BUCKET", "test-backups");
std::env::set_var("S3_BUCKET", "test-bucket");
std::env::set_var("AWS_ACCESS_KEY_ID", "minioadmin");
std::env::set_var("AWS_SECRET_ACCESS_KEY", "minioadmin");
@@ -204,6 +269,11 @@ async fn setup_with_testcontainers() -> String {
.build();
let s3_client = S3Client::from_conf(s3_config);
let _ = s3_client.create_bucket().bucket("test-bucket").send().await;
let _ = s3_client
.create_bucket()
.bucket("test-backups")
.send()
.await;
let mock_server = MockServer::start().await;
setup_mock_appview(&mock_server).await;
let mock_uri = mock_server.uri();
@@ -232,7 +302,7 @@ async fn setup_with_testcontainers() -> String {
#[cfg(feature = "external-infra")]
async fn setup_with_testcontainers() -> String {
panic!(
"Testcontainers disabled with external-infra feature. Set DATABASE_URL and S3_ENDPOINT."
"Testcontainers disabled with external-infra feature. Set DATABASE_URL and BLOB_STORAGE_PATH (or S3_ENDPOINT)."
);
}

View File

@@ -1536,10 +1536,7 @@ async fn test_full_oauth_registration_flow() {
let access_jwt = account["accessJwt"].as_str().unwrap();
let app_password_res = http_client
.post(format!(
"{}/xrpc/com.atproto.server.createAppPassword",
url
))
.post(format!("{}/xrpc/com.atproto.server.createAppPassword", url))
.header("Authorization", format!("Bearer {}", access_jwt))
.json(&json!({ "name": "oauth-test-app" }))
.send()

View File

@@ -13,3 +13,6 @@ aws-sdk-s3 = { workspace = true }
bytes = { workspace = true }
futures = { workspace = true }
sha2 = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }

View File

@@ -1,4 +1,7 @@
pub use tranquil_infra::{BlobStorage, StorageError, StreamUploadResult};
pub use tranquil_infra::{
BackupStorage, BlobStorage, StorageError, StreamUploadResult, backup_interval_secs,
backup_retention_count,
};
use async_trait::async_trait;
use aws_config::BehaviorVersion;
@@ -10,9 +13,95 @@ use aws_sdk_s3::types::CompletedPart;
use bytes::Bytes;
use futures::Stream;
use sha2::{Digest, Sha256};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::Arc;
const MIN_PART_SIZE: usize = 5 * 1024 * 1024;
const EXDEV: i32 = 18;
fn validate_key(key: &str) -> Result<(), StorageError> {
let dominated_by_traversal = key
.split('/')
.filter(|seg| !seg.is_empty())
.try_fold(0i32, |depth, segment| match segment {
".." => {
let new_depth = depth - 1;
(new_depth >= 0).then_some(new_depth)
}
"." => Some(depth),
_ => Some(depth + 1),
})
.is_none();
let has_null = key.contains('\0');
let is_absolute = key.starts_with('/');
match (dominated_by_traversal, has_null, is_absolute) {
(true, _, _) => Err(StorageError::Other(format!(
"Path traversal detected in key: {}",
key
))),
(_, true, _) => Err(StorageError::Other(format!(
"Null byte in key: {}",
key.replace('\0', "\\0")
))),
(_, _, true) => Err(StorageError::Other(format!(
"Absolute path not allowed: {}",
key
))),
_ => Ok(()),
}
}
async fn cleanup_orphaned_tmp_files(tmp_path: &Path) {
let tmp_path = tmp_path.to_path_buf();
let cleaned = tokio::task::spawn_blocking(move || {
std::fs::read_dir(&tmp_path)
.into_iter()
.flatten()
.filter_map(Result::ok)
.filter(|e| e.path().is_file())
.filter_map(|entry| std::fs::remove_file(entry.path()).ok())
.count()
})
.await
.unwrap_or(0);
if cleaned > 0 {
tracing::info!(
count = cleaned,
"Cleaned orphaned tmp files from previous run"
);
}
}
async fn rename_with_fallback(src: &Path, dst: &Path) -> Result<(), StorageError> {
match tokio::fs::rename(src, dst).await {
Ok(()) => Ok(()),
Err(e) if e.raw_os_error() == Some(EXDEV) => {
tokio::fs::copy(src, dst).await?;
tokio::fs::File::open(dst).await?.sync_all().await?;
let _ = tokio::fs::remove_file(src).await;
Ok(())
}
Err(e) => Err(StorageError::Io(e)),
}
}
async fn ensure_parent_dir(path: &Path) -> Result<(), StorageError> {
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent).await?;
}
Ok(())
}
fn map_io_not_found(key: &str) -> impl FnOnce(std::io::Error) -> StorageError + '_ {
|e| match e.kind() {
std::io::ErrorKind::NotFound => StorageError::NotFound(key.to_string()),
_ => StorageError::Io(e),
}
}
pub struct S3BlobStorage {
client: Client,
@@ -40,57 +129,34 @@ async fn create_s3_client() -> Client {
.load()
.await;
if let Ok(endpoint) = std::env::var("S3_ENDPOINT") {
let s3_config = aws_sdk_s3::config::Builder::from(&config)
.endpoint_url(endpoint)
.force_path_style(true)
.build();
Client::from_conf(s3_config)
} else {
Client::new(&config)
}
std::env::var("S3_ENDPOINT").ok().map_or_else(
|| Client::new(&config),
|endpoint| {
let s3_config = aws_sdk_s3::config::Builder::from(&config)
.endpoint_url(endpoint)
.force_path_style(true)
.build();
Client::from_conf(s3_config)
},
)
}
pub struct BackupStorage {
pub struct S3BackupStorage {
client: Client,
bucket: String,
}
impl BackupStorage {
impl S3BackupStorage {
pub async fn new() -> Option<Self> {
let backup_enabled = std::env::var("BACKUP_ENABLED")
.map(|v| v != "false" && v != "0")
.unwrap_or(true);
if !backup_enabled {
return None;
}
let bucket = std::env::var("BACKUP_S3_BUCKET").ok()?;
let client = create_s3_client().await;
Some(Self { client, bucket })
}
}
pub fn retention_count() -> u32 {
std::env::var("BACKUP_RETENTION_COUNT")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(7)
}
pub fn interval_secs() -> u64 {
std::env::var("BACKUP_INTERVAL_SECS")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(86400)
}
pub async fn put_backup(
&self,
did: &str,
rev: &str,
data: &[u8],
) -> Result<String, StorageError> {
#[async_trait]
impl BackupStorage for S3BackupStorage {
async fn put_backup(&self, did: &str, rev: &str, data: &[u8]) -> Result<String, StorageError> {
let key = format!("{}/{}.car", did, rev);
self.client
.put_object()
@@ -99,12 +165,12 @@ impl BackupStorage {
.body(ByteStream::from(Bytes::copy_from_slice(data)))
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
Ok(key)
}
pub async fn get_backup(&self, storage_key: &str) -> Result<Bytes, StorageError> {
async fn get_backup(&self, storage_key: &str) -> Result<Bytes, StorageError> {
let resp = self
.client
.get_object()
@@ -112,26 +178,23 @@ impl BackupStorage {
.key(storage_key)
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
let data = resp
.body
resp.body
.collect()
.await
.map_err(|e| StorageError::S3(e.to_string()))?
.into_bytes();
Ok(data)
.map(|agg| agg.into_bytes())
.map_err(|e| StorageError::Backend(e.to_string()))
}
pub async fn delete_backup(&self, storage_key: &str) -> Result<(), StorageError> {
async fn delete_backup(&self, storage_key: &str) -> Result<(), StorageError> {
self.client
.delete_object()
.bucket(&self.bucket)
.key(storage_key)
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
Ok(())
}
@@ -151,7 +214,7 @@ impl BlobStorage for S3BlobStorage {
.body(ByteStream::from(data))
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
Ok(())
}
@@ -168,16 +231,13 @@ impl BlobStorage for S3BlobStorage {
.key(key)
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
let data = resp
.body
resp.body
.collect()
.await
.map_err(|e| StorageError::S3(e.to_string()))?
.into_bytes();
Ok(data)
.map(|agg| agg.into_bytes())
.map_err(|e| StorageError::Backend(e.to_string()))
}
async fn get_head(&self, key: &str, size: usize) -> Result<Bytes, StorageError> {
@@ -190,16 +250,13 @@ impl BlobStorage for S3BlobStorage {
.range(range)
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
let data = resp
.body
resp.body
.collect()
.await
.map_err(|e| StorageError::S3(e.to_string()))?
.into_bytes();
Ok(data)
.map(|agg| agg.into_bytes())
.map_err(|e| StorageError::Backend(e.to_string()))
}
async fn delete(&self, key: &str) -> Result<(), StorageError> {
@@ -209,7 +266,7 @@ impl BlobStorage for S3BlobStorage {
.key(key)
.send()
.await
.map_err(|e| StorageError::S3(e.to_string()))?;
.map_err(|e| StorageError::Backend(e.to_string()))?;
Ok(())
}
@@ -217,7 +274,7 @@ impl BlobStorage for S3BlobStorage {
async fn put_stream(
&self,
key: &str,
mut stream: Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send>>,
stream: Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send>>,
) -> Result<StreamUploadResult, StorageError> {
use futures::StreamExt;
@@ -228,19 +285,15 @@ impl BlobStorage for S3BlobStorage {
.key(key)
.send()
.await
.map_err(|e| StorageError::S3(format!("Failed to create multipart upload: {}", e)))?;
.map_err(|e| {
StorageError::Backend(format!("Failed to create multipart upload: {}", e))
})?;
let upload_id = create_resp
.upload_id()
.ok_or_else(|| StorageError::S3("No upload ID returned".to_string()))?
.ok_or_else(|| StorageError::Backend("No upload ID returned".to_string()))?
.to_string();
let mut hasher = Sha256::new();
let mut total_size: u64 = 0;
let mut part_number = 1;
let mut completed_parts: Vec<CompletedPart> = Vec::new();
let mut buffer = Vec::with_capacity(MIN_PART_SIZE);
let upload_part = |client: &Client,
bucket: &str,
key: &str,
@@ -264,11 +317,11 @@ impl BlobStorage for S3BlobStorage {
.body(ByteStream::from(data))
.send()
.await
.map_err(|e| StorageError::S3(format!("Failed to upload part: {}", e)))?;
.map_err(|e| StorageError::Backend(format!("Failed to upload part: {}", e)))?;
let etag = resp
.e_tag()
.ok_or_else(|| StorageError::S3("No ETag returned for part".to_string()))?
.ok_or_else(|| StorageError::Backend("No ETag returned for part".to_string()))?
.to_string();
Ok(CompletedPart::builder()
@@ -278,58 +331,23 @@ impl BlobStorage for S3BlobStorage {
})
};
loop {
match stream.next().await {
Some(Ok(chunk)) => {
hasher.update(&chunk);
total_size += chunk.len() as u64;
buffer.extend_from_slice(&chunk);
if buffer.len() >= MIN_PART_SIZE {
let part_data =
std::mem::replace(&mut buffer, Vec::with_capacity(MIN_PART_SIZE));
let part = upload_part(
&self.client,
&self.bucket,
key,
&upload_id,
part_number,
part_data,
)
.await?;
completed_parts.push(part);
part_number += 1;
}
}
Some(Err(e)) => {
let _ = self
.client
.abort_multipart_upload()
.bucket(&self.bucket)
.key(key)
.upload_id(&upload_id)
.send()
.await;
return Err(StorageError::Io(e));
}
None => break,
}
struct UploadState {
hasher: Sha256,
total_size: u64,
part_number: i32,
completed_parts: Vec<CompletedPart>,
buffer: Vec<u8>,
}
if !buffer.is_empty() {
let part = upload_part(
&self.client,
&self.bucket,
key,
&upload_id,
part_number,
buffer,
)
.await?;
completed_parts.push(part);
}
let initial_state = UploadState {
hasher: Sha256::new(),
total_size: 0,
part_number: 1,
completed_parts: Vec::new(),
buffer: Vec::with_capacity(MIN_PART_SIZE),
};
if completed_parts.is_empty() {
let abort_upload = || async {
let _ = self
.client
.abort_multipart_upload()
@@ -338,11 +356,70 @@ impl BlobStorage for S3BlobStorage {
.upload_id(&upload_id)
.send()
.await;
};
let result: Result<UploadState, StorageError> = {
let mut state = initial_state;
let chunk_results: Vec<Result<Bytes, std::io::Error>> = stream.collect().await;
for chunk_result in chunk_results {
match chunk_result {
Ok(chunk) => {
state.hasher.update(&chunk);
state.total_size += chunk.len() as u64;
state.buffer.extend_from_slice(&chunk);
if state.buffer.len() >= MIN_PART_SIZE {
let part_data = std::mem::replace(
&mut state.buffer,
Vec::with_capacity(MIN_PART_SIZE),
);
let part = upload_part(
&self.client,
&self.bucket,
key,
&upload_id,
state.part_number,
part_data,
)
.await?;
state.completed_parts.push(part);
state.part_number += 1;
}
}
Err(e) => {
abort_upload().await;
return Err(StorageError::Io(e));
}
}
}
Ok(state)
};
let mut state = result?;
if !state.buffer.is_empty() {
let part = upload_part(
&self.client,
&self.bucket,
key,
&upload_id,
state.part_number,
std::mem::take(&mut state.buffer),
)
.await?;
state.completed_parts.push(part);
}
if state.completed_parts.is_empty() {
abort_upload().await;
return Err(StorageError::Other("Empty upload".to_string()));
}
let completed_upload = CompletedMultipartUpload::builder()
.set_parts(Some(completed_parts))
.set_parts(Some(state.completed_parts))
.build();
self.client
@@ -353,12 +430,14 @@ impl BlobStorage for S3BlobStorage {
.multipart_upload(completed_upload)
.send()
.await
.map_err(|e| StorageError::S3(format!("Failed to complete multipart upload: {}", e)))?;
.map_err(|e| {
StorageError::Backend(format!("Failed to complete multipart upload: {}", e))
})?;
let hash: [u8; 32] = hasher.finalize().into();
let hash: [u8; 32] = state.hasher.finalize().into();
Ok(StreamUploadResult {
sha256_hash: hash,
size: total_size,
size: state.total_size,
})
}
@@ -372,8 +451,303 @@ impl BlobStorage for S3BlobStorage {
.key(dst_key)
.send()
.await
.map_err(|e| StorageError::S3(format!("Failed to copy object: {}", e)))?;
.map_err(|e| StorageError::Backend(format!("Failed to copy object: {}", e)))?;
Ok(())
}
}
pub struct FilesystemBlobStorage {
base_path: PathBuf,
tmp_path: PathBuf,
}
impl FilesystemBlobStorage {
pub async fn new(base_path: impl Into<PathBuf>) -> Result<Self, StorageError> {
let base_path = base_path.into();
let tmp_path = base_path.join(".tmp");
tokio::fs::create_dir_all(&base_path).await?;
tokio::fs::create_dir_all(&tmp_path).await?;
cleanup_orphaned_tmp_files(&tmp_path).await;
Ok(Self {
base_path,
tmp_path,
})
}
pub async fn from_env() -> Result<Self, StorageError> {
let path = std::env::var("BLOB_STORAGE_PATH")
.map_err(|_| StorageError::Other("BLOB_STORAGE_PATH not set".into()))?;
Self::new(path).await
}
fn resolve_path(&self, key: &str) -> Result<PathBuf, StorageError> {
validate_key(key)?;
Ok(self.base_path.join(key))
}
async fn atomic_write(&self, path: &Path, data: &[u8]) -> Result<(), StorageError> {
use tokio::io::AsyncWriteExt;
let tmp_file_name = uuid::Uuid::new_v4().to_string();
let tmp_path = self.tmp_path.join(&tmp_file_name);
let mut file = tokio::fs::File::create(&tmp_path).await?;
file.write_all(data).await?;
file.sync_all().await?;
drop(file);
rename_with_fallback(&tmp_path, path).await
}
}
#[async_trait]
impl BlobStorage for FilesystemBlobStorage {
async fn put(&self, key: &str, data: &[u8]) -> Result<(), StorageError> {
let path = self.resolve_path(key)?;
ensure_parent_dir(&path).await?;
self.atomic_write(&path, data).await
}
async fn put_bytes(&self, key: &str, data: Bytes) -> Result<(), StorageError> {
self.put(key, &data).await
}
async fn get(&self, key: &str) -> Result<Vec<u8>, StorageError> {
let path = self.resolve_path(key)?;
tokio::fs::read(&path).await.map_err(map_io_not_found(key))
}
async fn get_bytes(&self, key: &str) -> Result<Bytes, StorageError> {
self.get(key).await.map(Bytes::from)
}
async fn get_head(&self, key: &str, size: usize) -> Result<Bytes, StorageError> {
use tokio::io::AsyncReadExt;
let path = self.resolve_path(key)?;
let mut file = tokio::fs::File::open(&path)
.await
.map_err(map_io_not_found(key))?;
let mut buffer = vec![0u8; size];
let n = file.read(&mut buffer).await?;
buffer.truncate(n);
Ok(Bytes::from(buffer))
}
async fn delete(&self, key: &str) -> Result<(), StorageError> {
let path = self.resolve_path(key)?;
tokio::fs::remove_file(&path).await.or_else(|e| {
(e.kind() == std::io::ErrorKind::NotFound)
.then_some(())
.ok_or(StorageError::Io(e))
})
}
async fn put_stream(
&self,
key: &str,
stream: Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send>>,
) -> Result<StreamUploadResult, StorageError> {
use futures::TryStreamExt;
use tokio::io::AsyncWriteExt;
let tmp_file_name = uuid::Uuid::new_v4().to_string();
let tmp_path = self.tmp_path.join(&tmp_file_name);
let final_path = self.resolve_path(key)?;
ensure_parent_dir(&final_path).await?;
let file = tokio::fs::File::create(&tmp_path).await?;
struct StreamState {
file: tokio::fs::File,
hasher: Sha256,
total_size: u64,
}
let initial = StreamState {
file,
hasher: Sha256::new(),
total_size: 0,
};
let final_state = stream
.map_err(StorageError::Io)
.try_fold(initial, |mut state, chunk| async move {
state.hasher.update(&chunk);
state.total_size += chunk.len() as u64;
state.file.write_all(&chunk).await?;
Ok(state)
})
.await?;
final_state.file.sync_all().await?;
drop(final_state.file);
rename_with_fallback(&tmp_path, &final_path).await?;
let hash: [u8; 32] = final_state.hasher.finalize().into();
Ok(StreamUploadResult {
sha256_hash: hash,
size: final_state.total_size,
})
}
async fn copy(&self, src_key: &str, dst_key: &str) -> Result<(), StorageError> {
let src_path = self.resolve_path(src_key)?;
let dst_path = self.resolve_path(dst_key)?;
ensure_parent_dir(&dst_path).await?;
tokio::fs::copy(&src_path, &dst_path)
.await
.map_err(map_io_not_found(src_key))?;
tokio::fs::File::open(&dst_path).await?.sync_all().await?;
Ok(())
}
}
pub struct FilesystemBackupStorage {
base_path: PathBuf,
tmp_path: PathBuf,
}
impl FilesystemBackupStorage {
pub async fn new(base_path: impl Into<PathBuf>) -> Result<Self, StorageError> {
let base_path = base_path.into();
let tmp_path = base_path.join(".tmp");
tokio::fs::create_dir_all(&base_path).await?;
tokio::fs::create_dir_all(&tmp_path).await?;
cleanup_orphaned_tmp_files(&tmp_path).await;
Ok(Self {
base_path,
tmp_path,
})
}
pub async fn from_env() -> Result<Self, StorageError> {
let path = std::env::var("BACKUP_STORAGE_PATH")
.map_err(|_| StorageError::Other("BACKUP_STORAGE_PATH not set".into()))?;
Self::new(path).await
}
fn resolve_path(&self, key: &str) -> Result<PathBuf, StorageError> {
validate_key(key)?;
Ok(self.base_path.join(key))
}
}
#[async_trait]
impl BackupStorage for FilesystemBackupStorage {
async fn put_backup(&self, did: &str, rev: &str, data: &[u8]) -> Result<String, StorageError> {
use tokio::io::AsyncWriteExt;
let key = format!("{}/{}.car", did, rev);
let final_path = self.resolve_path(&key)?;
ensure_parent_dir(&final_path).await?;
let tmp_file_name = uuid::Uuid::new_v4().to_string();
let tmp_path = self.tmp_path.join(&tmp_file_name);
let mut file = tokio::fs::File::create(&tmp_path).await?;
file.write_all(data).await?;
file.sync_all().await?;
drop(file);
rename_with_fallback(&tmp_path, &final_path).await?;
Ok(key)
}
async fn get_backup(&self, storage_key: &str) -> Result<Bytes, StorageError> {
let path = self.resolve_path(storage_key)?;
tokio::fs::read(&path)
.await
.map(Bytes::from)
.map_err(map_io_not_found(storage_key))
}
async fn delete_backup(&self, storage_key: &str) -> Result<(), StorageError> {
let path = self.resolve_path(storage_key)?;
tokio::fs::remove_file(&path).await.or_else(|e| {
(e.kind() == std::io::ErrorKind::NotFound)
.then_some(())
.ok_or(StorageError::Io(e))
})
}
}
pub async fn create_blob_storage() -> Arc<dyn BlobStorage> {
let backend = std::env::var("BLOB_STORAGE_BACKEND").unwrap_or_else(|_| "filesystem".into());
match backend.as_str() {
"s3" => {
tracing::info!("Initializing S3 blob storage");
Arc::new(S3BlobStorage::new().await)
}
_ => {
tracing::info!("Initializing filesystem blob storage");
FilesystemBlobStorage::from_env()
.await
.unwrap_or_else(|e| {
panic!(
"Failed to initialize filesystem blob storage: {}. \
Set BLOB_STORAGE_PATH to a valid directory path.",
e
);
})
.pipe(Arc::new)
}
}
}
pub async fn create_backup_storage() -> Option<Arc<dyn BackupStorage>> {
let enabled = std::env::var("BACKUP_ENABLED")
.map(|v| v != "false" && v != "0")
.unwrap_or(true);
if !enabled {
tracing::info!("Backup storage disabled via BACKUP_ENABLED=false");
return None;
}
let backend = std::env::var("BACKUP_STORAGE_BACKEND").unwrap_or_else(|_| "filesystem".into());
match backend.as_str() {
"s3" => S3BackupStorage::new().await.map_or_else(
|| {
tracing::error!(
"BACKUP_STORAGE_BACKEND=s3 but BACKUP_S3_BUCKET is not set. \
Backups will be disabled."
);
None
},
|storage| {
tracing::info!("Initialized S3 backup storage");
Some(Arc::new(storage) as Arc<dyn BackupStorage>)
},
),
_ => FilesystemBackupStorage::from_env().await.map_or_else(
|e| {
tracing::error!(
"Failed to initialize filesystem backup storage: {}. \
Set BACKUP_STORAGE_PATH to a valid directory path. \
Backups will be disabled.",
e
);
None
},
|storage| {
tracing::info!("Initialized filesystem backup storage");
Some(Arc::new(storage) as Arc<dyn BackupStorage>)
},
),
}
}
trait Pipe: Sized {
fn pipe<F, R>(self, f: F) -> R
where
F: FnOnce(Self) -> R,
{
f(self)
}
}
impl<T> Pipe for T {}

View File

@@ -7,7 +7,8 @@ This guide covers deploying Tranquil PDS using containers with podman.
## Prerequisites
- A VPS with at least 2GB RAM and 20GB disk
- A VPS with at least 2GB RAM
- Disk space for blobs (depends on usage; plan for ~1GB per active user as a baseline)
- A domain name pointing to your server's IP
- A **wildcard TLS certificate** for `*.pds.example.com` (user handles are served as subdomains)
- Root or sudo access
@@ -42,7 +43,7 @@ For production setups with proper service management, continue to either the Deb
## Standalone Containers (No Compose)
If you already have postgres, valkey, and minio running on the host (eg., from the [Debian install guide](install-debian.md)), you can run just the app containers.
If you already have postgres and valkey running on the host (eg., from the [Debian install guide](install-debian.md)), you can run just the app containers.
Build the images:
```sh
@@ -50,11 +51,12 @@ podman build -t tranquil-pds:latest .
podman build -t tranquil-pds-frontend:latest ./frontend
```
Run the backend with host networking (so it can access postgres/valkey/minio on localhost):
Run the backend with host networking (so it can access postgres/valkey on localhost) and mount the blob storage:
```sh
podman run -d --name tranquil-pds \
--network=host \
--env-file /etc/tranquil-pds/tranquil-pds.env \
-v /var/lib/tranquil:/var/lib/tranquil:Z \
tranquil-pds:latest
```
@@ -104,7 +106,7 @@ apt install -y podman
```bash
mkdir -p /etc/containers/systemd
mkdir -p /srv/tranquil-pds/{postgres,minio,valkey,certs,acme,config}
mkdir -p /srv/tranquil-pds/{postgres,valkey,blobs,backups,certs,acme,config}
```
## Create Environment File
@@ -152,26 +154,16 @@ podman build -t tranquil-pds-frontend:latest ./frontend
```bash
source /srv/tranquil-pds/config/tranquil-pds.env
echo "$DB_PASSWORD" | podman secret create tranquil-pds-db-password -
echo "$MINIO_ROOT_PASSWORD" | podman secret create tranquil-pds-minio-password -
```
## Start Services and Initialize
```bash
systemctl daemon-reload
systemctl start tranquil-pds-db tranquil-pds-minio tranquil-pds-valkey
systemctl start tranquil-pds-db tranquil-pds-valkey
sleep 10
```
Create the minio buckets:
```bash
podman run --rm --pod tranquil-pds \
-e MINIO_ROOT_USER=minioadmin \
-e MINIO_ROOT_PASSWORD=your-minio-password \
cgr.dev/chainguard/minio-client:latest-dev \
sh -c "mc alias set local http://localhost:9000 \$MINIO_ROOT_USER \$MINIO_ROOT_PASSWORD && mc mb --ignore-existing local/pds-blobs && mc mb --ignore-existing local/pds-backups"
```
Run migrations:
```bash
cargo install sqlx-cli --no-default-features --features postgres
@@ -215,7 +207,7 @@ systemctl restart tranquil-pds-nginx
## Enable All Services
```bash
systemctl enable tranquil-pds-db tranquil-pds-minio tranquil-pds-valkey tranquil-pds-app tranquil-pds-frontend tranquil-pds-nginx
systemctl enable tranquil-pds-db tranquil-pds-valkey tranquil-pds-app tranquil-pds-frontend tranquil-pds-nginx
```
## Configure Firewall
@@ -260,7 +252,7 @@ rc-service podman start
```sh
mkdir -p /srv/tranquil-pds/{data,config}
mkdir -p /srv/tranquil-pds/data/{postgres,minio,valkey,certs,acme}
mkdir -p /srv/tranquil-pds/data/{postgres,valkey,blobs,backups,certs,acme}
```
## Clone Repository and Build Images
@@ -342,16 +334,6 @@ rc-service tranquil-pds start
sleep 15
```
Create the minio buckets:
```sh
source /srv/tranquil-pds/config/tranquil-pds.env
podman run --rm --network tranquil-pds_default \
-e MINIO_ROOT_USER="$MINIO_ROOT_USER" \
-e MINIO_ROOT_PASSWORD="$MINIO_ROOT_PASSWORD" \
cgr.dev/chainguard/minio-client:latest-dev \
sh -c 'mc alias set local http://minio:9000 $MINIO_ROOT_USER $MINIO_ROOT_PASSWORD && mc mb --ignore-existing local/pds-blobs && mc mb --ignore-existing local/pds-backups'
```
Run migrations:
```sh
apk add rustup

View File

@@ -4,7 +4,8 @@ This guide covers installing Tranquil PDS on Debian 13.
## Prerequisites
- A VPS with at least 2GB RAM and 20GB disk
- A VPS with at least 2GB RAM
- Disk space for blobs (depends on usage; plan for ~1GB per active user as a baseline)
- A domain name pointing to your server's IP
- A wildcard TLS certificate for `*.pds.example.com` (user handles are served as subdomains)
- Root or sudo access
@@ -37,49 +38,13 @@ sudo -u postgres psql -c "CREATE DATABASE pds OWNER tranquil_pds;"
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE pds TO tranquil_pds;"
```
## Install minio
## Create Blob Storage Directories
```bash
curl -O https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
mv minio /usr/local/bin/
mkdir -p /var/lib/minio/data
useradd -r -s /sbin/nologin minio-user
chown -R minio-user:minio-user /var/lib/minio
cat > /etc/default/minio << 'EOF'
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=your-minio-password
MINIO_VOLUMES="/var/lib/minio/data"
MINIO_OPTS="--console-address :9001"
EOF
cat > /etc/systemd/system/minio.service << 'EOF'
[Unit]
Description=MinIO Object Storage
After=network.target
[Service]
User=minio-user
Group=minio-user
EnvironmentFile=/etc/default/minio
ExecStart=/usr/local/bin/minio server $MINIO_VOLUMES $MINIO_OPTS
Restart=always
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable minio
systemctl start minio
mkdir -p /var/lib/tranquil/blobs /var/lib/tranquil/backups
```
Create the buckets (wait a few seconds for minio to start):
```bash
curl -O https://dl.min.io/client/mc/release/linux-amd64/mc
chmod +x mc
mv mc /usr/local/bin/
mc alias set local http://localhost:9000 minioadmin your-minio-password
mc mb local/pds-blobs
mc mb local/pds-backups
```
We'll set ownership after creating the service user.
## Install valkey
@@ -142,12 +107,13 @@ chown -R www-data:www-data /var/www/tranquil-pds
```bash
useradd -r -s /sbin/nologin tranquil-pds
chown -R tranquil-pds:tranquil-pds /var/lib/tranquil
cp /opt/tranquil-pds/target/release/tranquil-pds /usr/local/bin/
cat > /etc/systemd/system/tranquil-pds.service << 'EOF'
[Unit]
Description=Tranquil PDS - AT Protocol PDS
After=network.target postgresql.service minio.service
After=network.target postgresql.service
[Service]
Type=simple
User=tranquil-pds
@@ -156,6 +122,10 @@ EnvironmentFile=/etc/tranquil-pds/tranquil-pds.env
ExecStart=/usr/local/bin/tranquil-pds
Restart=always
RestartSec=5
ProtectSystem=strict
ProtectHome=true
PrivateTmp=true
ReadWritePaths=/var/lib/tranquil
[Install]
WantedBy=multi-user.target
EOF

View File

@@ -4,15 +4,15 @@ If you're reaching for kubernetes for this app, you're experienced enough to kno
- cloudnativepg (or your preferred postgres operator)
- valkey
- s3-compatible object storage (minio operator, or just use a managed service)
- a PersistentVolume for blob storage
- the app itself (it's just a container with some env vars)
You'll need a wildcard TLS certificate for `*.your-pds-hostname.example.com`. User handles are served as subdomains.
The container image expects:
- `DATABASE_URL` - postgres connection string
- `S3_ENDPOINT`, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `S3_BUCKET`
- `BACKUP_S3_BUCKET` - bucket for repo backups (optional but recommended)
- `BLOB_STORAGE_PATH` - path to blob storage (mount a PV here)
- `BACKUP_STORAGE_PATH` - path for repo backups (optional but recommended)
- `VALKEY_URL` - redis:// connection string
- `PDS_HOSTNAME` - your PDS hostname (without protocol)
- `JWT_SECRET`, `DPOP_SECRET`, `MASTER_KEY` - generate with `openssl rand -base64 48`

View File

@@ -0,0 +1,780 @@
<script lang="ts">
import { navigate, routes, getFullUrl } from '../lib/router.svelte'
import { api } from '../lib/api'
import { _ } from '../lib/i18n'
import {
createRegistrationFlow,
restoreRegistrationFlow,
VerificationStep,
KeyChoiceStep,
DidDocStep,
AppPasswordStep,
} from '../lib/registration'
import {
prepareCreationOptions,
serializeAttestationResponse,
type WebAuthnCreationOptionsResponse,
} from '../lib/webauthn'
import AccountTypeSwitcher from '../components/AccountTypeSwitcher.svelte'
let serverInfo = $state<{
availableUserDomains: string[]
inviteCodeRequired: boolean
availableCommsChannels?: string[]
selfHostedDidWebEnabled?: boolean
} | null>(null)
let loadingServerInfo = $state(true)
let serverInfoLoaded = false
let ssoAvailable = $state(false)
let flow = $state<ReturnType<typeof createRegistrationFlow> | null>(null)
let passkeyName = $state('')
let clientName = $state<string | null>(null)
function getRequestUri(): string | null {
const params = new URLSearchParams(window.location.search)
return params.get('request_uri')
}
$effect(() => {
if (!serverInfoLoaded) {
serverInfoLoaded = true
loadServerInfo()
fetchClientName()
checkSsoAvailable()
}
})
async function checkSsoAvailable() {
try {
const response = await fetch('/oauth/sso/providers')
if (response.ok) {
const data = await response.json()
ssoAvailable = (data.providers?.length ?? 0) > 0
}
} catch {
ssoAvailable = false
}
}
async function fetchClientName() {
const requestUri = getRequestUri()
if (!requestUri) return
try {
const response = await fetch(`/oauth/authorize?request_uri=${encodeURIComponent(requestUri)}`, {
headers: { 'Accept': 'application/json' }
})
if (response.ok) {
const data = await response.json()
clientName = data.client_name || null
}
} catch {
clientName = null
}
}
$effect(() => {
if (flow?.state.step === 'redirect-to-dashboard') {
completeOAuthRegistration()
}
})
let creatingStarted = false
$effect(() => {
if (flow?.state.step === 'creating' && !creatingStarted) {
creatingStarted = true
flow.createPasskeyAccount()
}
})
async function loadServerInfo() {
try {
const restored = restoreRegistrationFlow()
if (restored && restored.state.mode === 'passkey') {
flow = restored
serverInfo = await api.describeServer()
} else {
serverInfo = await api.describeServer()
const hostname = serverInfo?.availableUserDomains?.[0] || window.location.hostname
flow = createRegistrationFlow('passkey', hostname)
}
} catch (e) {
console.error('Failed to load server info:', e)
} finally {
loadingServerInfo = false
}
}
function validateInfoStep(): string | null {
if (!flow) return 'Flow not initialized'
const info = flow.info
if (!info.handle.trim()) return $_('registerPasskey.errors.handleRequired')
if (info.handle.includes('.')) return $_('registerPasskey.errors.handleNoDots')
if (serverInfo?.inviteCodeRequired && !info.inviteCode?.trim()) {
return $_('registerPasskey.errors.inviteRequired')
}
if (info.didType === 'web-external') {
if (!info.externalDid?.trim()) return $_('registerPasskey.errors.externalDidRequired')
if (!info.externalDid.trim().startsWith('did:web:')) return $_('registerPasskey.errors.externalDidFormat')
}
switch (info.verificationChannel) {
case 'email':
if (!info.email.trim()) return $_('registerPasskey.errors.emailRequired')
break
case 'discord':
if (!info.discordId?.trim()) return $_('registerPasskey.errors.discordRequired')
break
case 'telegram':
if (!info.telegramUsername?.trim()) return $_('registerPasskey.errors.telegramRequired')
break
case 'signal':
if (!info.signalNumber?.trim()) return $_('registerPasskey.errors.signalRequired')
break
}
return null
}
async function handleInfoSubmit(e: Event) {
e.preventDefault()
if (!flow) return
const validationError = validateInfoStep()
if (validationError) {
flow.setError(validationError)
return
}
if (!window.PublicKeyCredential) {
flow.setError($_('registerPasskey.errors.passkeysNotSupported'))
return
}
flow.clearError()
flow.proceedFromInfo()
}
async function handlePasskeyRegistration() {
if (!flow || !flow.account) return
flow.setSubmitting(true)
flow.clearError()
try {
const { options } = await api.startPasskeyRegistrationForSetup(
flow.account.did,
flow.account.setupToken!,
passkeyName || undefined
)
const publicKeyOptions = prepareCreationOptions(options as unknown as WebAuthnCreationOptionsResponse)
const credential = await navigator.credentials.create({
publicKey: publicKeyOptions
})
if (!credential) {
flow.setError($_('registerPasskey.errors.passkeyCancelled'))
flow.setSubmitting(false)
return
}
const credentialResponse = serializeAttestationResponse(credential as PublicKeyCredential)
const result = await api.completePasskeySetup(
flow.account.did,
flow.account.setupToken!,
credentialResponse,
passkeyName || undefined
)
flow.setPasskeyComplete(result.appPassword, result.appPasswordName)
} catch (err) {
if (err instanceof DOMException && err.name === 'NotAllowedError') {
flow.setError($_('registerPasskey.errors.passkeyCancelled'))
} else if (err instanceof Error) {
flow.setError(err.message || $_('registerPasskey.errors.passkeyFailed'))
} else {
flow.setError($_('registerPasskey.errors.passkeyFailed'))
}
} finally {
flow.setSubmitting(false)
}
}
async function completeOAuthRegistration() {
const requestUri = getRequestUri()
if (!requestUri || !flow?.account) {
navigate(routes.dashboard)
return
}
try {
const response = await fetch('/oauth/register/complete', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
},
body: JSON.stringify({
request_uri: requestUri,
did: flow.account.did,
app_password: flow.account.appPassword,
}),
})
const data = await response.json()
if (!response.ok) {
flow.setError(data.error_description || data.error || $_('common.error'))
return
}
if (data.redirect_uri) {
window.location.href = data.redirect_uri
return
}
navigate(routes.dashboard)
} catch {
flow.setError($_('common.error'))
}
}
function isChannelAvailable(ch: string): boolean {
const available = serverInfo?.availableCommsChannels ?? ['email']
return available.includes(ch)
}
function channelLabel(ch: string): string {
switch (ch) {
case 'email':
return $_('register.email')
case 'discord':
return $_('register.discord')
case 'telegram':
return $_('register.telegram')
case 'signal':
return $_('register.signal')
default:
return ch
}
}
let fullHandle = $derived(() => {
if (!flow?.info.handle.trim()) return ''
return `${flow.info.handle.trim()}.${flow.state.pdsHostname}`
})
async function handleCancel() {
const requestUri = getRequestUri()
if (!requestUri) {
window.history.back()
return
}
try {
const response = await fetch('/oauth/authorize/deny', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json'
},
body: JSON.stringify({ request_uri: requestUri })
})
const data = await response.json()
if (data.redirect_uri) {
window.location.href = data.redirect_uri
}
} catch {
window.history.back()
}
}
function goToLogin() {
const requestUri = getRequestUri()
if (requestUri) {
navigate(routes.oauthLogin, { params: { request_uri: requestUri } })
} else {
navigate(routes.login)
}
}
</script>
<div class="oauth-register-container">
{#if loadingServerInfo}
<div class="loading">
<div class="spinner"></div>
<p>{$_('common.loading')}</p>
</div>
{:else if flow}
<header class="page-header">
<h1>{$_('oauth.register.title')}</h1>
<p class="subtitle">
{#if clientName}
{$_('oauth.register.subtitle')} <strong>{clientName}</strong>
{:else}
{$_('oauth.register.subtitleGeneric')}
{/if}
</p>
</header>
{#if flow.state.error}
<div class="error">{flow.state.error}</div>
{/if}
{#if flow.state.step === 'info'}
<div class="migrate-callout">
<div class="migrate-icon"></div>
<div class="migrate-content">
<strong>{$_('register.migrateTitle')}</strong>
<p>{$_('register.migrateDescription')}</p>
<a href={getFullUrl(routes.migrate)} class="migrate-link">
{$_('register.migrateLink')}
</a>
</div>
</div>
<AccountTypeSwitcher active="passkey" {ssoAvailable} oauthRequestUri={getRequestUri()} />
<div class="split-layout">
<div class="form-section">
<form onsubmit={handleInfoSubmit}>
<div class="field">
<label for="handle">{$_('register.handle')}</label>
<input
id="handle"
type="text"
bind:value={flow.info.handle}
placeholder={$_('register.handlePlaceholder')}
disabled={flow.state.submitting}
required
autocomplete="off"
/>
{#if fullHandle()}
<p class="hint">{$_('register.handleHint', { values: { handle: fullHandle() } })}</p>
{/if}
</div>
<fieldset>
<legend>{$_('register.contactMethod')}</legend>
<div class="contact-fields">
<div class="field">
<label for="verification-channel">{$_('register.verificationMethod')}</label>
<select id="verification-channel" bind:value={flow.info.verificationChannel} disabled={flow.state.submitting}>
<option value="email">{channelLabel('email')}</option>
{#if isChannelAvailable('discord')}
<option value="discord">{channelLabel('discord')}</option>
{/if}
{#if isChannelAvailable('telegram')}
<option value="telegram">{channelLabel('telegram')}</option>
{/if}
{#if isChannelAvailable('signal')}
<option value="signal">{channelLabel('signal')}</option>
{/if}
</select>
</div>
{#if flow.info.verificationChannel === 'email'}
<div class="field">
<label for="email">{$_('register.emailAddress')}</label>
<input
id="email"
type="email"
bind:value={flow.info.email}
placeholder={$_('register.emailPlaceholder')}
disabled={flow.state.submitting}
required
/>
</div>
{:else if flow.info.verificationChannel === 'discord'}
<div class="field">
<label for="discord-id">{$_('register.discordId')}</label>
<input
id="discord-id"
type="text"
bind:value={flow.info.discordId}
placeholder={$_('register.discordIdPlaceholder')}
disabled={flow.state.submitting}
required
/>
<p class="hint">{$_('register.discordIdHint')}</p>
</div>
{:else if flow.info.verificationChannel === 'telegram'}
<div class="field">
<label for="telegram-username">{$_('register.telegramUsername')}</label>
<input
id="telegram-username"
type="text"
bind:value={flow.info.telegramUsername}
placeholder={$_('register.telegramUsernamePlaceholder')}
disabled={flow.state.submitting}
required
/>
</div>
{:else if flow.info.verificationChannel === 'signal'}
<div class="field">
<label for="signal-number">{$_('register.signalNumber')}</label>
<input
id="signal-number"
type="tel"
bind:value={flow.info.signalNumber}
placeholder={$_('register.signalNumberPlaceholder')}
disabled={flow.state.submitting}
required
/>
<p class="hint">{$_('register.signalNumberHint')}</p>
</div>
{/if}
</div>
</fieldset>
<fieldset>
<legend>{$_('registerPasskey.identityType')}</legend>
<p class="section-hint">{$_('registerPasskey.identityTypeHint')}</p>
<div class="radio-group">
<label class="radio-label">
<input type="radio" name="didType" value="plc" bind:group={flow.info.didType} disabled={flow.state.submitting} />
<span class="radio-content">
<strong>{$_('registerPasskey.didPlcRecommended')}</strong>
<span class="radio-hint">{$_('registerPasskey.didPlcHint')}</span>
</span>
</label>
<label class="radio-label" class:disabled={serverInfo?.selfHostedDidWebEnabled === false}>
<input type="radio" name="didType" value="web" bind:group={flow.info.didType} disabled={flow.state.submitting || serverInfo?.selfHostedDidWebEnabled === false} />
<span class="radio-content">
<strong>{$_('registerPasskey.didWeb')}</strong>
{#if serverInfo?.selfHostedDidWebEnabled === false}
<span class="radio-hint disabled-hint">{$_('registerPasskey.didWebDisabledHint')}</span>
{:else}
<span class="radio-hint">{$_('registerPasskey.didWebHint')}</span>
{/if}
</span>
</label>
<label class="radio-label">
<input type="radio" name="didType" value="web-external" bind:group={flow.info.didType} disabled={flow.state.submitting} />
<span class="radio-content">
<strong>{$_('registerPasskey.didWebBYOD')}</strong>
<span class="radio-hint">{$_('registerPasskey.didWebBYODHint')}</span>
</span>
</label>
</div>
{#if flow.info.didType === 'web'}
<div class="warning-box">
<strong>{$_('registerPasskey.didWebWarningTitle')}</strong>
<ul>
<li><strong>{$_('registerPasskey.didWebWarning1')}</strong> {@html $_('registerPasskey.didWebWarning1Detail', { values: { did: `<code>did:web:yourhandle.${serverInfo?.availableUserDomains?.[0] || 'this-pds.com'}</code>` } })}</li>
<li><strong>{$_('registerPasskey.didWebWarning2')}</strong> {$_('registerPasskey.didWebWarning2Detail')}</li>
<li><strong>{$_('registerPasskey.didWebWarning3')}</strong> {$_('registerPasskey.didWebWarning3Detail')}</li>
<li><strong>{$_('registerPasskey.didWebWarning4')}</strong> {$_('registerPasskey.didWebWarning4Detail')}</li>
</ul>
</div>
{/if}
{#if flow.info.didType === 'web-external'}
<div class="field">
<label for="external-did">{$_('registerPasskey.externalDid')}</label>
<input id="external-did" type="text" bind:value={flow.info.externalDid} placeholder={$_('registerPasskey.externalDidPlaceholder')} disabled={flow.state.submitting} required />
<p class="hint">{$_('registerPasskey.externalDidHint')} <code>https://{flow.info.externalDid ? flow.extractDomain(flow.info.externalDid) : 'yourdomain.com'}/.well-known/did.json</code></p>
</div>
{/if}
</fieldset>
{#if serverInfo?.inviteCodeRequired}
<div class="field">
<label for="invite-code">{$_('register.inviteCode')} <span class="required">*</span></label>
<input
id="invite-code"
type="text"
bind:value={flow.info.inviteCode}
placeholder={$_('register.inviteCodePlaceholder')}
disabled={flow.state.submitting}
required
/>
</div>
{/if}
<div class="actions">
<button type="submit" class="primary" disabled={flow.state.submitting}>
{flow.state.submitting ? $_('common.loading') : $_('common.continue')}
</button>
</div>
<div class="secondary-actions">
<button type="button" class="link-btn" onclick={goToLogin}>
{$_('oauth.register.haveAccount')}
</button>
<button type="button" class="link-btn" onclick={handleCancel}>
{$_('common.cancel')}
</button>
</div>
</form>
<div class="form-links">
<p class="link-text">
{$_('register.alreadyHaveAccount')} <a href="/app/login">{$_('register.signIn')}</a>
</p>
</div>
</div>
<aside class="info-panel">
<h3>{$_('registerPasskey.infoWhyPasskey')}</h3>
<p>{$_('registerPasskey.infoWhyPasskeyDesc')}</p>
<h3>{$_('registerPasskey.infoHowItWorks')}</h3>
<p>{$_('registerPasskey.infoHowItWorksDesc')}</p>
<h3>{$_('registerPasskey.infoAppAccess')}</h3>
<p>{$_('registerPasskey.infoAppAccessDesc')}</p>
</aside>
</div>
{:else if flow.state.step === 'key-choice'}
<KeyChoiceStep {flow} />
{:else if flow.state.step === 'initial-did-doc'}
<DidDocStep {flow} type="initial" onConfirm={() => flow?.createPasskeyAccount()} onBack={() => flow?.goBack()} />
{:else if flow.state.step === 'creating'}
<div class="creating">
<div class="spinner"></div>
<p>{$_('registerPasskey.creatingAccount')}</p>
</div>
{:else if flow.state.step === 'passkey'}
<div class="passkey-step">
<h2>{$_('registerPasskey.setupPasskey')}</h2>
<p>{$_('registerPasskey.passkeyDescription')}</p>
<div class="field">
<label for="passkey-name">{$_('registerPasskey.passkeyName')}</label>
<input
id="passkey-name"
type="text"
bind:value={passkeyName}
placeholder={$_('registerPasskey.passkeyNamePlaceholder')}
disabled={flow.state.submitting}
/>
<p class="hint">{$_('registerPasskey.passkeyNameHint')}</p>
</div>
<button
type="button"
class="primary"
onclick={handlePasskeyRegistration}
disabled={flow.state.submitting}
>
{flow.state.submitting ? $_('common.loading') : $_('registerPasskey.registerPasskey')}
</button>
</div>
{:else if flow.state.step === 'app-password'}
<AppPasswordStep {flow} />
{:else if flow.state.step === 'verify'}
<VerificationStep {flow} />
{:else if flow.state.step === 'updated-did-doc'}
<DidDocStep {flow} type="updated" onConfirm={() => flow?.activateAccount()} />
{:else if flow.state.step === 'activating'}
<div class="creating">
<div class="spinner"></div>
<p>{$_('registerPasskey.activatingAccount')}</p>
</div>
{/if}
{/if}
</div>
<style>
.oauth-register-container {
max-width: var(--width-lg);
margin: var(--space-9) auto;
padding: var(--space-7);
}
.loading, .creating {
display: flex;
flex-direction: column;
align-items: center;
gap: var(--space-4);
padding: var(--space-8);
}
.loading p, .creating p {
color: var(--text-secondary);
}
.page-header {
margin-bottom: var(--space-6);
}
.page-header h1 {
margin: 0 0 var(--space-2) 0;
}
.subtitle {
color: var(--text-secondary);
margin: 0;
}
.form-section {
min-width: 0;
}
.form-links {
margin-top: var(--space-6);
}
.link-text {
text-align: center;
color: var(--text-secondary);
}
.link-text a {
color: var(--accent);
}
form {
display: flex;
flex-direction: column;
gap: var(--space-5);
}
.field {
display: flex;
flex-direction: column;
gap: var(--space-1);
}
label {
font-size: var(--text-sm);
font-weight: var(--font-medium);
}
input, select {
padding: var(--space-3);
border: 1px solid var(--border-color);
border-radius: var(--radius-md);
font-size: var(--text-base);
background: var(--bg-input);
color: var(--text-primary);
}
input:focus, select:focus {
outline: none;
border-color: var(--accent);
}
.hint {
font-size: var(--text-xs);
color: var(--text-muted);
margin: var(--space-1) 0 0 0;
}
.error {
padding: var(--space-3);
background: var(--error-bg);
border: 1px solid var(--error-border);
border-radius: var(--radius-md);
color: var(--error-text);
margin-bottom: var(--space-4);
}
.actions {
display: flex;
gap: var(--space-4);
margin-top: var(--space-2);
}
button.primary {
flex: 1;
padding: var(--space-3);
background: var(--accent);
color: var(--text-inverse);
border: none;
border-radius: var(--radius-md);
font-size: var(--text-base);
cursor: pointer;
transition: background-color var(--transition-fast);
}
button.primary:hover:not(:disabled) {
background: var(--accent-hover);
}
button.primary:disabled {
opacity: 0.6;
cursor: not-allowed;
}
.secondary-actions {
display: flex;
justify-content: center;
gap: var(--space-4);
margin-top: var(--space-4);
}
.link-btn {
background: none;
border: none;
color: var(--accent);
cursor: pointer;
font-size: var(--text-sm);
padding: var(--space-2);
}
.link-btn:hover {
text-decoration: underline;
}
.contact-fields {
display: flex;
flex-direction: column;
gap: var(--space-4);
}
.required {
color: var(--error-text);
}
.passkey-step {
display: flex;
flex-direction: column;
gap: var(--space-4);
}
.passkey-step h2 {
margin: 0;
}
.passkey-step p {
color: var(--text-secondary);
margin: 0;
}
fieldset {
border: 1px solid var(--border-color);
border-radius: var(--radius-md);
padding: var(--space-4);
}
legend {
padding: 0 var(--space-2);
font-weight: var(--font-medium);
}
.spinner {
width: 32px;
height: 32px;
border: 3px solid var(--border-color);
border-top-color: var(--accent);
border-radius: 50%;
animation: spin 1s linear infinite;
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
</style>

View File

@@ -0,0 +1,680 @@
<script lang="ts">
import { onMount } from 'svelte'
import { _ } from '../lib/i18n'
import { toast } from '../lib/toast.svelte'
import SsoIcon from '../components/SsoIcon.svelte'
interface PendingRegistration {
request_uri: string
provider: string
provider_user_id: string
provider_username: string | null
provider_email: string | null
provider_email_verified: boolean
}
interface CommsChannelConfig {
email: boolean
discord: boolean
telegram: boolean
signal: boolean
}
let pending = $state<PendingRegistration | null>(null)
let loading = $state(true)
let submitting = $state(false)
let error = $state<string | null>(null)
let handle = $state('')
let email = $state('')
let providerEmailOriginal = $state<string | null>(null)
let inviteCode = $state('')
let verificationChannel = $state('email')
let discordId = $state('')
let telegramUsername = $state('')
let signalNumber = $state('')
let handleAvailable = $state<boolean | null>(null)
let checkingHandle = $state(false)
let handleError = $state<string | null>(null)
let didType = $state<'plc' | 'web' | 'web-external'>('plc')
let externalDid = $state('')
let serverInfo = $state<{
availableUserDomains: string[]
inviteCodeRequired: boolean
selfHostedDidWebEnabled: boolean
} | null>(null)
let commsChannels = $state<CommsChannelConfig>({
email: true,
discord: false,
telegram: false,
signal: false,
})
function getToken(): string | null {
const params = new URLSearchParams(window.location.search)
return params.get('token')
}
function getProviderDisplayName(provider: string): string {
const names: Record<string, string> = {
github: 'GitHub',
discord: 'Discord',
google: 'Google',
gitlab: 'GitLab',
oidc: 'SSO',
}
return names[provider] || provider
}
function isChannelAvailable(ch: string): boolean {
return commsChannels[ch as keyof CommsChannelConfig] ?? false
}
function extractDomain(did: string): string {
return did.replace('did:web:', '').replace(/%3A/g, ':')
}
let fullHandle = $derived(() => {
if (!handle.trim()) return ''
const domain = serverInfo?.availableUserDomains?.[0]
return domain ? `${handle.trim()}.${domain}` : handle.trim()
})
onMount(() => {
loadPendingRegistration()
loadServerInfo()
})
async function loadServerInfo() {
try {
const response = await fetch('/xrpc/com.atproto.server.describeServer')
if (response.ok) {
const data = await response.json()
serverInfo = {
availableUserDomains: data.availableUserDomains || [],
inviteCodeRequired: data.inviteCodeRequired ?? false,
selfHostedDidWebEnabled: data.selfHostedDidWebEnabled ?? false,
}
if (data.commsChannels) {
commsChannels = {
email: data.commsChannels.email ?? true,
discord: data.commsChannels.discord ?? false,
telegram: data.commsChannels.telegram ?? false,
signal: data.commsChannels.signal ?? false,
}
}
}
} catch {
serverInfo = null
}
}
async function loadPendingRegistration() {
const token = getToken()
if (!token) {
error = $_('sso_register.error_expired')
loading = false
return
}
try {
const response = await fetch(`/oauth/sso/pending-registration?token=${encodeURIComponent(token)}`)
if (!response.ok) {
const data = await response.json()
error = data.message || $_('sso_register.error_expired')
loading = false
return
}
pending = await response.json()
if (pending?.provider_email) {
email = pending.provider_email
providerEmailOriginal = pending.provider_email
}
if (pending?.provider_username) {
handle = pending.provider_username.toLowerCase().replace(/[^a-z0-9-]/g, '')
}
} catch {
error = $_('sso_register.error_expired')
} finally {
loading = false
}
}
let checkHandleTimeout: ReturnType<typeof setTimeout> | null = null
$effect(() => {
if (checkHandleTimeout) {
clearTimeout(checkHandleTimeout)
}
handleAvailable = null
handleError = null
if (handle.length >= 3) {
checkHandleTimeout = setTimeout(() => checkHandleAvailability(), 400)
}
})
async function checkHandleAvailability() {
if (!handle || handle.length < 3) return
checkingHandle = true
handleError = null
try {
const response = await fetch(`/oauth/sso/check-handle-available?handle=${encodeURIComponent(handle)}`)
const data = await response.json()
handleAvailable = data.available
if (!data.available && data.reason) {
handleError = data.reason
}
} catch {
handleAvailable = null
handleError = $_('common.error')
} finally {
checkingHandle = false
}
}
let usingVerifiedProviderEmail = $derived(
pending?.provider_email_verified &&
verificationChannel === 'email' &&
email.trim().toLowerCase() === providerEmailOriginal?.toLowerCase()
)
function isChannelValid(): boolean {
switch (verificationChannel) {
case 'email':
return !!email.trim()
case 'discord':
return !!discordId.trim()
case 'telegram':
return !!telegramUsername.trim()
case 'signal':
return !!signalNumber.trim()
default:
return false
}
}
async function handleSubmit(e: Event) {
e.preventDefault()
const token = getToken()
if (!token || !pending) return
if (!handle || handle.length < 3) {
handleError = $_('sso_register.error_handle_required')
return
}
if (handleAvailable === false) {
handleError = $_('sso_register.handle_taken')
return
}
if (!isChannelValid()) {
toast.error($_(`register.validation.${verificationChannel === 'email' ? 'emailRequired' : verificationChannel + 'Required'}`))
return
}
submitting = true
try {
const response = await fetch('/oauth/sso/complete-registration', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
},
body: JSON.stringify({
token,
handle,
email: email || null,
invite_code: inviteCode || null,
verification_channel: verificationChannel,
discord_id: discordId || null,
telegram_username: telegramUsername || null,
signal_number: signalNumber || null,
did_type: didType,
did: didType === 'web-external' ? externalDid.trim() : null,
}),
})
const data = await response.json()
if (!response.ok) {
toast.error(data.message || data.error_description || data.error || $_('common.error'))
submitting = false
return
}
if (data.accessJwt && data.refreshJwt) {
localStorage.setItem('accessJwt', data.accessJwt)
localStorage.setItem('refreshJwt', data.refreshJwt)
}
if (data.redirectUrl) {
if (data.redirectUrl.startsWith('/app/verify')) {
localStorage.setItem('tranquil_pds_pending_verification', JSON.stringify({
did: data.did,
handle: data.handle,
channel: verificationChannel,
}))
const url = new URL(data.redirectUrl, window.location.origin)
url.searchParams.set('handle', data.handle)
url.searchParams.set('channel', verificationChannel)
window.location.href = url.pathname + url.search
return
}
window.location.href = data.redirectUrl
return
}
toast.error($_('common.error'))
submitting = false
} catch {
toast.error($_('common.error'))
submitting = false
}
}
</script>
<div class="sso-register-container">
{#if loading}
<div class="loading">
<div class="spinner"></div>
<p>{$_('common.loading')}</p>
</div>
{:else if error && !pending}
<div class="error-container">
<div class="error-icon">!</div>
<h2>{$_('common.error')}</h2>
<p>{error}</p>
<a href="/app/register-sso" class="back-link">{$_('sso_register.tryAgain')}</a>
</div>
{:else if pending}
<header class="page-header">
<h1>{$_('sso_register.title')}</h1>
<p class="subtitle">{$_('sso_register.subtitle', { values: { provider: getProviderDisplayName(pending.provider) } })}</p>
</header>
<div class="provider-info">
<div class="provider-badge">
<SsoIcon provider={pending.provider} size={32} />
<div class="provider-details">
<span class="provider-name">{getProviderDisplayName(pending.provider)}</span>
{#if pending.provider_username}
<span class="provider-username">@{pending.provider_username}</span>
{/if}
</div>
</div>
</div>
<div class="split-layout sidebar-right">
<div class="form-section">
<form onsubmit={handleSubmit}>
<div class="field">
<label for="handle">{$_('sso_register.handle_label')}</label>
<input
id="handle"
type="text"
bind:value={handle}
placeholder={$_('register.handlePlaceholder')}
disabled={submitting}
required
autocomplete="off"
/>
{#if checkingHandle}
<p class="hint">{$_('common.checking')}</p>
{:else if handleError}
<p class="hint error">{handleError}</p>
{:else if handleAvailable === false}
<p class="hint error">{$_('sso_register.handle_taken')}</p>
{:else if handleAvailable === true}
<p class="hint success">{$_('sso_register.handle_available')}</p>
{:else if fullHandle()}
<p class="hint">{$_('register.handleHint', { values: { handle: fullHandle() } })}</p>
{/if}
</div>
<fieldset>
<legend>{$_('register.contactMethod')}</legend>
<div class="contact-fields">
<div class="field">
<label for="verification-channel">{$_('register.verificationMethod')}</label>
<select id="verification-channel" bind:value={verificationChannel} disabled={submitting}>
<option value="email">{$_('register.email')}</option>
<option value="discord" disabled={!isChannelAvailable('discord')}>
{$_('register.discord')}{isChannelAvailable('discord') ? '' : ` (${$_('register.notConfigured')})`}
</option>
<option value="telegram" disabled={!isChannelAvailable('telegram')}>
{$_('register.telegram')}{isChannelAvailable('telegram') ? '' : ` (${$_('register.notConfigured')})`}
</option>
<option value="signal" disabled={!isChannelAvailable('signal')}>
{$_('register.signal')}{isChannelAvailable('signal') ? '' : ` (${$_('register.notConfigured')})`}
</option>
</select>
</div>
{#if verificationChannel === 'email'}
<div class="field">
<label for="email">{$_('register.emailAddress')}</label>
<input
id="email"
type="email"
bind:value={email}
placeholder={$_('register.emailPlaceholder')}
disabled={submitting}
required
/>
{#if pending?.provider_email && pending?.provider_email_verified}
{#if usingVerifiedProviderEmail}
<p class="hint success">{$_('sso_register.emailVerifiedByProvider', { values: { provider: getProviderDisplayName(pending.provider) } })}</p>
{:else}
<p class="hint">{$_('sso_register.emailChangedNeedsVerification')}</p>
{/if}
{/if}
</div>
{:else if verificationChannel === 'discord'}
<div class="field">
<label for="discord-id">{$_('register.discordId')}</label>
<input
id="discord-id"
type="text"
bind:value={discordId}
placeholder={$_('register.discordIdPlaceholder')}
disabled={submitting}
required
/>
<p class="hint">{$_('register.discordIdHint')}</p>
</div>
{:else if verificationChannel === 'telegram'}
<div class="field">
<label for="telegram-username">{$_('register.telegramUsername')}</label>
<input
id="telegram-username"
type="text"
bind:value={telegramUsername}
placeholder={$_('register.telegramUsernamePlaceholder')}
disabled={submitting}
required
/>
</div>
{:else if verificationChannel === 'signal'}
<div class="field">
<label for="signal-number">{$_('register.signalNumber')}</label>
<input
id="signal-number"
type="tel"
bind:value={signalNumber}
placeholder={$_('register.signalNumberPlaceholder')}
disabled={submitting}
required
/>
<p class="hint">{$_('register.signalNumberHint')}</p>
</div>
{/if}
</div>
</fieldset>
<fieldset>
<legend>{$_('registerPasskey.identityType')}</legend>
<p class="section-hint">{$_('registerPasskey.identityTypeHint')}</p>
<div class="radio-group">
<label class="radio-label">
<input type="radio" name="didType" value="plc" bind:group={didType} disabled={submitting} />
<span class="radio-content">
<strong>{$_('registerPasskey.didPlcRecommended')}</strong>
<span class="radio-hint">{$_('registerPasskey.didPlcHint')}</span>
</span>
</label>
<label class="radio-label" class:disabled={serverInfo?.selfHostedDidWebEnabled === false}>
<input type="radio" name="didType" value="web" bind:group={didType} disabled={submitting || serverInfo?.selfHostedDidWebEnabled === false} />
<span class="radio-content">
<strong>{$_('registerPasskey.didWeb')}</strong>
{#if serverInfo?.selfHostedDidWebEnabled === false}
<span class="radio-hint disabled-hint">{$_('registerPasskey.didWebDisabledHint')}</span>
{:else}
<span class="radio-hint">{$_('registerPasskey.didWebHint')}</span>
{/if}
</span>
</label>
<label class="radio-label">
<input type="radio" name="didType" value="web-external" bind:group={didType} disabled={submitting} />
<span class="radio-content">
<strong>{$_('registerPasskey.didWebBYOD')}</strong>
<span class="radio-hint">{$_('registerPasskey.didWebBYODHint')}</span>
</span>
</label>
</div>
{#if didType === 'web'}
<div class="warning-box">
<strong>{$_('registerPasskey.didWebWarningTitle')}</strong>
<ul>
<li><strong>{$_('registerPasskey.didWebWarning1')}</strong> {@html $_('registerPasskey.didWebWarning1Detail', { values: { did: `<code>did:web:yourhandle.${serverInfo?.availableUserDomains?.[0] || 'this-pds.com'}</code>` } })}</li>
<li><strong>{$_('registerPasskey.didWebWarning2')}</strong> {$_('registerPasskey.didWebWarning2Detail')}</li>
<li><strong>{$_('registerPasskey.didWebWarning3')}</strong> {$_('registerPasskey.didWebWarning3Detail')}</li>
<li><strong>{$_('registerPasskey.didWebWarning4')}</strong> {$_('registerPasskey.didWebWarning4Detail')}</li>
</ul>
</div>
{/if}
{#if didType === 'web-external'}
<div class="field">
<label for="external-did">{$_('registerPasskey.externalDid')}</label>
<input id="external-did" type="text" bind:value={externalDid} placeholder={$_('registerPasskey.externalDidPlaceholder')} disabled={submitting} required />
<p class="hint">{$_('registerPasskey.externalDidHint')} <code>https://{externalDid ? extractDomain(externalDid) : 'yourdomain.com'}/.well-known/did.json</code></p>
</div>
{/if}
</fieldset>
{#if serverInfo?.inviteCodeRequired}
<div class="field">
<label for="invite-code">{$_('register.inviteCode')} <span class="required">{$_('register.inviteCodeRequired')}</span></label>
<input
id="invite-code"
type="text"
bind:value={inviteCode}
placeholder={$_('register.inviteCodePlaceholder')}
disabled={submitting}
required
/>
</div>
{/if}
<button type="submit" disabled={submitting || !handle || handle.length < 3 || handleAvailable === false || checkingHandle || !isChannelValid()}>
{submitting ? $_('common.creating') : $_('sso_register.submit')}
</button>
</form>
</div>
<aside class="info-panel">
<h3>{$_('sso_register.infoAfterTitle')}</h3>
<ul class="info-list">
<li>{$_('sso_register.infoAddPassword')}</li>
<li>{$_('sso_register.infoAddPasskey')}</li>
<li>{$_('sso_register.infoLinkProviders')}</li>
<li>{$_('sso_register.infoChangeHandle')}</li>
</ul>
</aside>
</div>
{/if}
</div>
<style>
.sso-register-container {
max-width: var(--width-lg);
margin: var(--space-9) auto;
padding: var(--space-7);
}
.loading {
display: flex;
flex-direction: column;
align-items: center;
gap: var(--space-4);
padding: var(--space-8);
}
.loading p {
color: var(--text-secondary);
}
.error-container {
text-align: center;
padding: var(--space-8);
}
.error-icon {
width: 48px;
height: 48px;
border-radius: 50%;
background: var(--error-text);
color: var(--text-inverse);
display: flex;
align-items: center;
justify-content: center;
font-size: 24px;
font-weight: bold;
margin: 0 auto var(--space-4);
}
.error-container h2 {
margin-bottom: var(--space-2);
}
.error-container p {
color: var(--text-secondary);
margin-bottom: var(--space-6);
}
.back-link {
color: var(--accent);
text-decoration: none;
}
.back-link:hover {
text-decoration: underline;
}
.page-header {
margin-bottom: var(--space-6);
}
.page-header h1 {
margin: 0 0 var(--space-3) 0;
}
.subtitle {
color: var(--text-secondary);
margin: 0;
}
.form-section {
min-width: 0;
}
form {
display: flex;
flex-direction: column;
gap: var(--space-5);
}
.contact-fields {
display: flex;
flex-direction: column;
gap: var(--space-4);
}
.contact-fields .field {
margin-bottom: 0;
}
.hint.success {
color: var(--success-text);
}
.hint.error {
color: var(--error-text);
}
.info-panel {
background: var(--bg-secondary);
border-radius: var(--radius-xl);
padding: var(--space-6);
}
.info-panel h3 {
margin: 0 0 var(--space-4) 0;
font-size: var(--text-base);
font-weight: var(--font-semibold);
}
.info-list {
margin: 0;
padding-left: var(--space-5);
}
.info-list li {
margin-bottom: var(--space-2);
font-size: var(--text-sm);
color: var(--text-secondary);
line-height: var(--leading-relaxed);
}
.info-list li:last-child {
margin-bottom: 0;
}
.provider-info {
margin-bottom: var(--space-6);
}
.provider-badge {
display: flex;
align-items: center;
gap: var(--space-3);
padding: var(--space-4);
background: var(--bg-secondary);
border-radius: var(--radius-md);
}
.provider-details {
display: flex;
flex-direction: column;
}
.provider-name {
font-weight: var(--font-semibold);
}
.provider-username {
font-size: var(--text-sm);
color: var(--text-secondary);
}
.required {
color: var(--error-text);
}
button[type="submit"] {
margin-top: var(--space-3);
}
.spinner {
width: 32px;
height: 32px;
border: 3px solid var(--border-color);
border-top-color: var(--accent);
border-radius: 50%;
animation: spin 1s linear infinite;
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
</style>

View File

@@ -0,0 +1,51 @@
<script lang="ts">
import { startOAuthRegister } from '../lib/oauth'
import { _ } from '../lib/i18n'
let error = $state<string | null>(null)
let initiated = false
$effect(() => {
if (!initiated) {
initiated = true
startOAuthRegister().catch((err) => {
error = err instanceof Error ? err.message : 'Failed to start registration'
})
}
})
</script>
<div class="register-redirect">
{#if error}
<div class="message error">{error}</div>
<a href="/app/login">{$_('register.signIn')}</a>
{:else}
<div class="loading-content">
<div class="spinner"></div>
<p>{$_('common.loading')}</p>
</div>
{/if}
</div>
<style>
.register-redirect {
min-height: 100vh;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: var(--space-4);
}
.loading-content {
display: flex;
flex-direction: column;
align-items: center;
gap: var(--space-4);
}
.loading-content p {
margin: 0;
color: var(--text-secondary);
}
</style>

View File

@@ -44,15 +44,8 @@ nuke_installation() {
sudo -u postgres psql -c "DROP DATABASE IF EXISTS pds;" 2>/dev/null || true
sudo -u postgres psql -c "DROP USER IF EXISTS tranquil_pds;" 2>/dev/null || true
log_info "Removing minio buckets..."
if command -v mc &>/dev/null; then
mc rb local/pds-blobs --force 2>/dev/null || true
mc rb local/pds-backups --force 2>/dev/null || true
mc alias remove local 2>/dev/null || true
fi
systemctl stop minio 2>/dev/null || true
rm -rf /var/lib/minio/data/.minio.sys 2>/dev/null || true
rm -f /etc/default/minio 2>/dev/null || true
log_info "Removing blob storage..."
rm -rf /var/lib/tranquil 2>/dev/null || true
log_info "Removing nginx config..."
rm -f /etc/nginx/sites-enabled/tranquil-pds
@@ -79,7 +72,7 @@ if [[ -f /etc/tranquil-pds/tranquil-pds.env ]] || [[ -d /opt/tranquil-pds ]] ||
echo " - PostgreSQL database 'pds' and all data"
echo " - All Tranquil PDS configuration and credentials"
echo " - All source code in /opt/tranquil-pds"
echo " - MinIO buckets 'pds-blobs' and 'pds-backups' and all data"
echo " - All blobs and backups in /var/lib/tranquil/"
echo ""
read -p "Type 'NUKE' to confirm: " CONFIRM_NUKE
if [[ "$CONFIRM_NUKE" == "NUKE" ]]; then
@@ -153,7 +146,6 @@ else
DPOP_SECRET=$(openssl rand -base64 48)
MASTER_KEY=$(openssl rand -base64 48)
DB_PASSWORD=$(openssl rand -base64 24 | tr -dc 'a-zA-Z0-9' | head -c 32)
MINIO_PASSWORD=$(openssl rand -base64 24 | tr -dc 'a-zA-Z0-9' | head -c 32)
mkdir -p /etc/tranquil-pds
cat > "$CREDENTIALS_FILE" << EOF
@@ -161,7 +153,6 @@ JWT_SECRET="$JWT_SECRET"
DPOP_SECRET="$DPOP_SECRET"
MASTER_KEY="$MASTER_KEY"
DB_PASSWORD="$DB_PASSWORD"
MINIO_PASSWORD="$MINIO_PASSWORD"
EOF
chmod 600 "$CREDENTIALS_FILE"
log_success "Secrets generated"
@@ -213,70 +204,9 @@ apt install -y valkey 2>/dev/null || {
systemctl enable valkey-server 2>/dev/null || true
systemctl start valkey-server 2>/dev/null || true
log_info "Installing minio..."
if [[ ! -f /usr/local/bin/minio ]]; then
ARCH=$(dpkg --print-architecture)
case "$ARCH" in
amd64) curl -fsSL -o /tmp/minio https://dl.min.io/server/minio/release/linux-amd64/minio ;;
arm64) curl -fsSL -o /tmp/minio https://dl.min.io/server/minio/release/linux-arm64/minio ;;
*) log_error "Unsupported architecture: $ARCH"; exit 1 ;;
esac
chmod +x /tmp/minio
mv /tmp/minio /usr/local/bin/
fi
mkdir -p /var/lib/minio/data
id -u minio-user &>/dev/null || useradd -r -s /sbin/nologin minio-user
chown -R minio-user:minio-user /var/lib/minio
cat > /etc/default/minio << EOF
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=${MINIO_PASSWORD}
MINIO_VOLUMES="/var/lib/minio/data"
MINIO_OPTS="--console-address :9001"
EOF
chmod 600 /etc/default/minio
cat > /etc/systemd/system/minio.service << 'EOF'
[Unit]
Description=MinIO Object Storage
After=network.target
[Service]
User=minio-user
Group=minio-user
EnvironmentFile=/etc/default/minio
ExecStart=/usr/local/bin/minio server $MINIO_VOLUMES $MINIO_OPTS
Restart=always
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable minio
systemctl start minio
log_success "minio installed"
log_info "Waiting for minio..."
sleep 5
if [[ ! -f /usr/local/bin/mc ]]; then
ARCH=$(dpkg --print-architecture)
case "$ARCH" in
amd64) curl -fsSL -o /tmp/mc https://dl.min.io/client/mc/release/linux-amd64/mc ;;
arm64) curl -fsSL -o /tmp/mc https://dl.min.io/client/mc/release/linux-arm64/mc ;;
esac
chmod +x /tmp/mc
mv /tmp/mc /usr/local/bin/
fi
mc alias remove local 2>/dev/null || true
mc alias set local http://localhost:9000 minioadmin "${MINIO_PASSWORD}" --api S3v4
mc mb local/pds-blobs --ignore-existing
mc mb local/pds-backups --ignore-existing
log_success "minio buckets created"
log_info "Creating blob storage directories..."
mkdir -p /var/lib/tranquil/blobs /var/lib/tranquil/backups
log_success "Blob storage directories created"
log_info "Installing rust..."
if [[ -f "$HOME/.cargo/env" ]]; then
@@ -381,12 +311,8 @@ PDS_HOSTNAME=${PDS_DOMAIN}
DATABASE_URL=postgres://tranquil_pds:${DB_PASSWORD}@localhost:5432/pds
DATABASE_MAX_CONNECTIONS=100
DATABASE_MIN_CONNECTIONS=10
S3_ENDPOINT=http://localhost:9000
AWS_REGION=us-east-1
S3_BUCKET=pds-blobs
BACKUP_S3_BUCKET=pds-backups
AWS_ACCESS_KEY_ID=minioadmin
AWS_SECRET_ACCESS_KEY=${MINIO_PASSWORD}
BLOB_STORAGE_PATH=/var/lib/tranquil/blobs
BACKUP_STORAGE_PATH=/var/lib/tranquil/backups
VALKEY_URL=redis://localhost:6379
JWT_SECRET=${JWT_SECRET}
DPOP_SECRET=${DPOP_SECRET}
@@ -406,11 +332,12 @@ cp /opt/tranquil-pds/target/release/tranquil-pds /usr/local/bin/
mkdir -p /var/lib/tranquil-pds
cp -r /opt/tranquil-pds/frontend/dist /var/lib/tranquil-pds/frontend
chown -R tranquil-pds:tranquil-pds /var/lib/tranquil-pds
chown -R tranquil-pds:tranquil-pds /var/lib/tranquil
cat > /etc/systemd/system/tranquil-pds.service << 'EOF'
[Unit]
Description=Tranquil PDS - AT Protocol PDS
After=network.target postgresql.service minio.service
After=network.target postgresql.service
[Service]
Type=simple
@@ -420,6 +347,10 @@ EnvironmentFile=/etc/tranquil-pds/tranquil-pds.env
ExecStart=/usr/local/bin/tranquil-pds
Restart=always
RestartSec=5
ProtectSystem=strict
ProtectHome=true
PrivateTmp=true
ReadWritePaths=/var/lib/tranquil
[Install]
WantedBy=multi-user.target
@@ -577,8 +508,11 @@ echo ""
echo "PDS: https://${PDS_DOMAIN}"
echo ""
echo "Credentials (also in /etc/tranquil-pds/.credentials):"
echo " DB password: ${DB_PASSWORD}"
echo " MinIO password: ${MINIO_PASSWORD}"
echo " DB password: ${DB_PASSWORD}"
echo ""
echo "Data locations:"
echo " Blobs: /var/lib/tranquil/blobs"
echo " Backups: /var/lib/tranquil/backups"
echo ""
echo "Commands:"
echo " journalctl -u tranquil-pds -f # logs"