mirror of
https://tangled.org/tranquil.farm/tranquil-pds
synced 2026-02-08 21:30:08 +00:00
Update TODO, more logging during blob migration
This commit is contained in:
151
TODO.md
151
TODO.md
@@ -2,32 +2,139 @@
|
||||
|
||||
## Active development
|
||||
|
||||
### Storage backend abstraction
|
||||
Make storage layers swappable via traits.
|
||||
|
||||
filesystem blob storage
|
||||
- [ ] FilesystemBlobStorage implementation
|
||||
- [ ] directory structure (content-addressed like blobs/{cid} already used in objsto)
|
||||
- [ ] atomic writes (write to temp, rename)
|
||||
- [ ] config option to choose backend (env var or config flag)
|
||||
- [ ] also traitify BackupStorage (currently hardcoded to objsto)
|
||||
|
||||
sqlite database backend
|
||||
- [ ] abstract db layer behind trait (queries, transactions, migrations)
|
||||
- [ ] sqlite implementation matching postgres behavior
|
||||
- [ ] handle sqlite's single-writer limitation (connection pooling strategy)
|
||||
- [ ] migrations system that works for both
|
||||
- [ ] testing: run full test suite against both backends
|
||||
- [ ] config option to choose backend (postgres vs sqlite)
|
||||
- [ ] document tradeoffs (sqlite for single-user/small, postgres for multi-user/scale)
|
||||
|
||||
### Plugin system
|
||||
Extensible architecture allowing third-party plugins to add functionality. Going with wasm-based rather than scripting language.
|
||||
WASM component model plugins. Compile to wasm32-wasip2, sandboxed via wasmtime, capability-gated. Based on zed's extensions.
|
||||
|
||||
- [ ] Plugin manifest format (name, version, deps, permissions, hooks)
|
||||
- [ ] Plugin loading and lifecycle (enable/disable/hot reload)
|
||||
- [ ] WASM host bindings for PDS APIs (database, storage, http, etc.)
|
||||
- [ ] Resource limits (memory, cpu time, capability restrictions)
|
||||
- [ ] Extension points: request middleware, record lifecycle hooks, custom XRPC endpoints
|
||||
- [ ] Extension points: custom lexicons, storage backends, auth providers, notification channels
|
||||
- [ ] Extension points: firehose consumers (react to repo events)
|
||||
- [ ] Plugin sdk crate with traits and helpers?
|
||||
- [ ] Example plugins: cdc, extra logging to 3rd party, content filter, better S3 backup
|
||||
- [ ] Plugin registry with signature verification?
|
||||
WIT interface
|
||||
- [ ] record hooks before/after create, update, delete
|
||||
- [ ] blob hooks before/after upload, validate
|
||||
- [ ] xrpc hooks before/after (middleware), custom endpoint handler
|
||||
- [ ] firehose hook on_commit
|
||||
- [ ] host imports http client, kv store, logging, read records
|
||||
|
||||
### Plugin: Private/encrypted data
|
||||
Records that only authorized parties can see and decrypt. Requires key federation between PDSes. Implemented as a plugin using the plugin system above.
|
||||
wasmtime host
|
||||
- [ ] engine with epoch interruption (kill runaway plugins)
|
||||
- [ ] plugin manifest (plugin.toml): id, version, capabilities, hooks
|
||||
- [ ] capability enforcement at runtime
|
||||
- [ ] plugin loader, lifecycle (enable/disable/reload)
|
||||
- [ ] resource limits (memory, time)
|
||||
- [ ] per-plugin fs sandbox
|
||||
|
||||
- [ ] Survey current ATProto discourse on private data
|
||||
- [ ] Document Bluesky team's likely approach
|
||||
- [ ] Design key management strategy
|
||||
- [ ] Per-user encryption keys (separate from signing keys)
|
||||
- [ ] Key derivation for per-record or per-collection encryption
|
||||
- [ ] Encrypted record storage format
|
||||
- [ ] Transparent encryption/decryption in repo operations
|
||||
- [ ] Protocol for sharing decryption keys between PDSes
|
||||
- [ ] Handle key rotation and revocation
|
||||
capabilities
|
||||
- [ ] http:fetch with domain allowlist
|
||||
- [ ] kv:read, kv:write
|
||||
- [ ] record:read, blob:read
|
||||
- [ ] xrpc:register
|
||||
- [ ] firehose:subscribe
|
||||
|
||||
pds-plugin-api (rust), MVP for plugin system
|
||||
- [ ] plugin trait with default impls
|
||||
- [ ] register_plugin! macro
|
||||
- [ ] typed host import wrappers
|
||||
- [ ] publish to crates.io
|
||||
- [ ] docs + example
|
||||
|
||||
pds-plugin-api in golang, nice to have after the fact
|
||||
- [ ] wit-bindgen-go bindings
|
||||
- [ ] go wrappers
|
||||
- [ ] tinygo build instructions
|
||||
- [ ] example
|
||||
|
||||
@pds/plugin-api in typescript, nice to have after the fact
|
||||
- [ ] jco/componentize-js bindings
|
||||
- [ ] typeScript types
|
||||
- [ ] build tooling
|
||||
- [ ] example
|
||||
|
||||
example plugins
|
||||
- [ ] content filter
|
||||
- [ ] webhook notifier
|
||||
- [ ] objsto backup mirror
|
||||
- [ ] custom lexicon handler
|
||||
- [ ] better audit logger
|
||||
|
||||
### Misc
|
||||
|
||||
migration handle preservation
|
||||
- [ ] allow users to keep their existing handle during migration (eg. lewis.moe instead of forcing lewis.newpds.com)
|
||||
- [ ] UI option to preserve external handle vs create new pds-subdomain handle
|
||||
- [ ] handle the DNS verification flow for external handles during migration
|
||||
|
||||
cross-pds delegation
|
||||
when a client (eg. tangled.org) tries to log into a delegated account:
|
||||
- [ ] client starts oauth flow to delegated account's pds
|
||||
- [ ] delegated pds sees account is externally controlled, launches oauth to controller's pds (delegated pds acts as oauth client)
|
||||
- [ ] controller authenticates at their own pds
|
||||
- [ ] delegated pds verifies controller perms and scope from its local delegation grants
|
||||
- [ ] delegated pds issues session to client within the intersection of controller's granted scope and client's requested scope
|
||||
|
||||
per-request "act as"
|
||||
- [ ] authed as user X, perform action as delegated user Y in single request
|
||||
- [ ] approach decision
|
||||
- [ ] option 1: `X-Act-As` header with target did, server verifies delegation grant
|
||||
- [ ] option 2: token exchange (RFC 8693) for short-lived delegated token
|
||||
- [ ] option 3 (lewis fav): extend existing `act` claim to support on-demand minting
|
||||
- [ ] something else?
|
||||
|
||||
### Private/encrypted data
|
||||
Records only authorized parties can see and decrypt.
|
||||
|
||||
research
|
||||
- [ ] survey atproto discourse on private data
|
||||
- [ ] document bluesky team's likely approach. wait.. are they even gonna do this? whatever
|
||||
- [ ] look at matrix/signal for federated e2ee patterns
|
||||
|
||||
key management
|
||||
- [ ] db schema for encryption keys (user_keys, key_grants, key_rotations)
|
||||
- [ ] per-user encryption keypair generation (separate from signing keys)
|
||||
- [ ] key derivation scheme (per-collection? per-record? both?)
|
||||
- [ ] key storage (encrypted at rest, hsm option?)
|
||||
- [ ] rotation and revocation flow
|
||||
|
||||
storage layer
|
||||
- [ ] encrypted record format (encrypted cbor blob + metadata)
|
||||
- [ ] collection-level vs per-record encryption flag
|
||||
- [ ] how encrypted records appear in mst (hash of ciphertext? separate tree?)
|
||||
- [ ] blob encryption (same keys? separate?)
|
||||
|
||||
api surface
|
||||
- [ ] xrpc getPublicKey, grantAccess, revokeAccess, listGrants
|
||||
- [ ] xrpc getEncryptedRecord (ciphertext for client-side decrypt)
|
||||
- [ ] or transparent server-side decrypt if requester has grant?
|
||||
- [ ] lexicon for key grant records
|
||||
|
||||
sync/federation
|
||||
- [ ] how encrypted records appear on firehose (ciphertext? omitted? placeholder?)
|
||||
- [ ] pds-to-pds key exchange protocol
|
||||
- [ ] appview behavior (can't index without grants)
|
||||
- [ ] relay behavior with encrypted commits
|
||||
|
||||
client integration
|
||||
- [ ] client-side encryption (pds never sees plaintext) vs server-side with trust
|
||||
- [ ] key backup/recovery (lose key = lose data)
|
||||
|
||||
plugin hooks (once core exists)
|
||||
- [ ] on_access_grant_request for custom authorization
|
||||
- [ ] on_key_rotation to notify interested parties
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -48,6 +48,10 @@ export class AtprotoClient {
|
||||
return this.accessToken;
|
||||
}
|
||||
|
||||
getBaseUrl(): string {
|
||||
return this.baseUrl;
|
||||
}
|
||||
|
||||
setDPoPKeyPair(keyPair: DPoPKeyPair | null) {
|
||||
this.dpopKeyPair = keyPair;
|
||||
}
|
||||
|
||||
@@ -20,7 +20,15 @@ export async function migrateBlobs(
|
||||
console.log("[blob-migration] Starting blob migration for", userDid);
|
||||
console.log(
|
||||
"[blob-migration] Source client:",
|
||||
sourceClient ? "available" : "NOT AVAILABLE",
|
||||
sourceClient ? `available (baseUrl: ${sourceClient.getBaseUrl()})` : "NOT AVAILABLE",
|
||||
);
|
||||
console.log(
|
||||
"[blob-migration] Local client baseUrl:",
|
||||
localClient.getBaseUrl(),
|
||||
);
|
||||
console.log(
|
||||
"[blob-migration] Local client has access token:",
|
||||
localClient.getAccessToken() ? "yes" : "NO",
|
||||
);
|
||||
|
||||
onProgress({ currentOperation: "Checking for missing blobs..." });
|
||||
@@ -95,12 +103,13 @@ export async function migrateBlobs(
|
||||
"contentType:",
|
||||
contentType,
|
||||
);
|
||||
await localClient.uploadBlob(blobData, contentType);
|
||||
console.log("[blob-migration] Uploading blob", cid, "to local PDS...");
|
||||
const uploadResult = await localClient.uploadBlob(blobData, contentType);
|
||||
console.log(
|
||||
"[blob-migration] Uploaded blob",
|
||||
"[blob-migration] Upload response for",
|
||||
cid,
|
||||
"with contentType:",
|
||||
contentType,
|
||||
":",
|
||||
JSON.stringify(uploadResult),
|
||||
);
|
||||
migrated++;
|
||||
onProgress({ blobsMigrated: migrated });
|
||||
|
||||
@@ -469,7 +469,28 @@ export function createInboundMigrationFlow() {
|
||||
}
|
||||
|
||||
async function migrateBlobs(): Promise<void> {
|
||||
if (!sourceClient || !localClient) return;
|
||||
if (!sourceClient) {
|
||||
console.error("[migration] migrateBlobs: sourceClient is null, skipping blob migration");
|
||||
migrationLog("migrateBlobs SKIPPED: sourceClient is null");
|
||||
setProgress({
|
||||
currentOperation: "Warning: Could not migrate blobs - source PDS connection lost",
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!localClient) {
|
||||
console.error("[migration] migrateBlobs: localClient is null, skipping blob migration");
|
||||
migrationLog("migrateBlobs SKIPPED: localClient is null");
|
||||
setProgress({
|
||||
currentOperation: "Warning: Could not migrate blobs - local PDS connection lost",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
migrationLog("migrateBlobs: Starting blob migration", {
|
||||
sourceClientBaseUrl: sourceClient.getBaseUrl(),
|
||||
localClientBaseUrl: localClient.getBaseUrl(),
|
||||
localClientHasToken: !!localClient.getAccessToken(),
|
||||
});
|
||||
|
||||
const result = await migrateBlobsUtil(
|
||||
localClient,
|
||||
@@ -482,7 +503,10 @@ export function createInboundMigrationFlow() {
|
||||
}
|
||||
|
||||
async function migratePreferences(): Promise<void> {
|
||||
if (!sourceClient || !localClient) return;
|
||||
if (!sourceClient || !localClient) {
|
||||
console.warn("[migration] migratePreferences: client missing, skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const prefs = await sourceClient.getPreferences();
|
||||
|
||||
@@ -427,7 +427,16 @@ impl IntoResponse for ApiError {
|
||||
error: self.error_name(),
|
||||
message: self.message(),
|
||||
};
|
||||
(self.status_code(), Json(body)).into_response()
|
||||
let mut response = (self.status_code(), Json(body)).into_response();
|
||||
if matches!(self, Self::ExpiredToken(_)) {
|
||||
response.headers_mut().insert(
|
||||
"WWW-Authenticate",
|
||||
"Bearer error=\"invalid_token\", error_description=\"Token has expired\""
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
response
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -590,7 +590,13 @@ pub fn app(state: AppState) -> Router {
|
||||
CorsLayer::new()
|
||||
.allow_origin(Any)
|
||||
.allow_methods([Method::GET, Method::POST, Method::OPTIONS])
|
||||
.allow_headers(Any),
|
||||
.allow_headers(Any)
|
||||
.expose_headers([
|
||||
"WWW-Authenticate".parse().unwrap(),
|
||||
"DPoP-Nonce".parse().unwrap(),
|
||||
"atproto-repo-rev".parse().unwrap(),
|
||||
"atproto-content-labelers".parse().unwrap(),
|
||||
]),
|
||||
)
|
||||
.with_state(state);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user