test(s3-retention): purge stale buckets before each create to avoid volume exhaustion

The WORM suite creates one bucket per test, each backed by ~3 reserved
volumes on the data node. With ~30 tests and the default `weed mini`
volume cap, the data node runs out of slots midway through the run and
every PutObject after that fails with InternalError.

Hook a sweep of every test-prefix bucket into the create helpers so a
panicked or interrupted prior test cannot leak buckets into the next.
This commit is contained in:
Chris Lu
2026-04-27 22:14:20 -07:00
parent e4a635a04d
commit 363d5caa85
4 changed files with 40 additions and 6 deletions

View File

@@ -19,6 +19,8 @@ func TestReproduceObjectLockIssue(t *testing.T) {
t.Logf("=== Reproducing Object Lock Header Processing Issue ===")
t.Logf("Bucket name: %s", bucketName)
cleanupLeftoverTestBuckets(t, client)
// Step 1: Create bucket with Object Lock enabled header
t.Logf("\n1. Creating bucket with ObjectLockEnabledForBucket=true")
t.Logf(" This should send x-amz-bucket-object-lock-enabled: true header")
@@ -95,6 +97,8 @@ func TestNormalBucketCreationStillWorks(t *testing.T) {
t.Logf("=== Testing Normal Bucket Creation ===")
cleanupLeftoverTestBuckets(t, client)
// Create bucket without Object Lock
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),

View File

@@ -22,6 +22,8 @@ func TestObjectLockValidation(t *testing.T) {
t.Logf("=== Validating S3 Object Lock Functionality ===")
t.Logf("Bucket: %s", bucketName)
cleanupLeftoverTestBuckets(t, client)
// Step 1: Create bucket with Object Lock header
t.Log("\n1. Creating bucket with x-amz-bucket-object-lock-enabled: true")
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{

View File

@@ -296,6 +296,7 @@ func putObjectWithLockHeaders(t *testing.T, client *s3.Client, bucketName, key,
// createBucketWithObjectLock creates a bucket with object lock enabled
func createBucketWithObjectLock(t *testing.T, client *s3.Client, bucketName string) {
cleanupLeftoverTestBuckets(t, client)
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
ObjectLockEnabledForBucket: aws.Bool(true),

View File

@@ -68,6 +68,17 @@ func getS3Client(t *testing.T) *s3.Client {
})
}
// allTestBucketPrefixes lists every prefix used to name buckets in this test suite.
// cleanupLeftoverTestBuckets uses it to find stale buckets from prior tests/runs.
// Add the new prefix here whenever a test introduces one.
var allTestBucketPrefixes = []string{
"test-retention-",
"object-lock-test-",
"object-lock-config-",
"bucket-defaults-",
"normal-test-",
}
// getNewBucketName generates a unique bucket name
func getNewBucketName() string {
timestamp := time.Now().UnixNano()
@@ -77,6 +88,7 @@ func getNewBucketName() string {
// createBucket creates a new bucket for testing with Object Lock enabled
// Object Lock is required for retention and legal hold functionality per AWS S3 specification
func createBucket(t *testing.T, client *s3.Client, bucketName string) {
cleanupLeftoverTestBuckets(t, client)
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
ObjectLockEnabledForBucket: aws.Bool(true),
@@ -87,6 +99,7 @@ func createBucket(t *testing.T, client *s3.Client, bucketName string) {
// createBucketWithoutObjectLock creates a new bucket without Object Lock enabled
// Use this only for tests that specifically need to verify non-Object-Lock bucket behavior
func createBucketWithoutObjectLock(t *testing.T, client *s3.Client, bucketName string) {
cleanupLeftoverTestBuckets(t, client)
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
@@ -264,24 +277,38 @@ func putObject(t *testing.T, client *s3.Client, bucketName, key, content string)
return resp
}
// cleanupAllTestBuckets cleans up any leftover test buckets
// cleanupAllTestBuckets cleans up any leftover test buckets matching any prefix this
// suite uses. Called both at test-suite teardown and before each new bucket creation
// so a single `weed mini` data node does not exhaust its volume slots after many tests.
func cleanupAllTestBuckets(t *testing.T, client *s3.Client) {
// List all buckets
listResp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
if err != nil {
t.Logf("Warning: failed to list buckets for cleanup: %v", err)
return
}
// Delete buckets that match our test prefix
for _, bucket := range listResp.Buckets {
if bucket.Name != nil && strings.HasPrefix(*bucket.Name, defaultConfig.BucketPrefix) {
t.Logf("Cleaning up leftover test bucket: %s", *bucket.Name)
deleteBucket(t, client, *bucket.Name)
if bucket.Name == nil {
continue
}
for _, prefix := range allTestBucketPrefixes {
if strings.HasPrefix(*bucket.Name, prefix) {
t.Logf("Cleaning up leftover test bucket: %s", *bucket.Name)
deleteBucket(t, client, *bucket.Name)
break
}
}
}
}
// cleanupLeftoverTestBuckets is invoked from the createBucket* helpers so each new bucket
// starts with a clean slate even when a prior test panicked, was interrupted, or its
// volumes have not yet been reclaimed by the master. Without this the WORM suite
// accumulates ~3 reserved volumes per bucket and the data node hits its volume cap.
func cleanupLeftoverTestBuckets(t *testing.T, client *s3.Client) {
cleanupAllTestBuckets(t, client)
}
// TestBasicRetentionWorkflow tests the basic retention functionality
func TestBasicRetentionWorkflow(t *testing.T) {
client := getS3Client(t)