migration: scope key migration to stores (#9005)

This commit is contained in:
Sam Kleinman
2022-07-15 21:14:18 -04:00
committed by GitHub
parent 503ddf1c4d
commit cc07318866
4 changed files with 508 additions and 339 deletions

View File

@@ -34,7 +34,6 @@ func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.C
// reduce the possibility of the
// ephemeral data overwriting later data
"tx_index",
"peerstore",
"light",
"blockstore",
"state",
@@ -57,7 +56,7 @@ func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.C
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, db); err != nil {
if err = keymigrate.Migrate(ctx, dbctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}

View File

@@ -25,5 +25,6 @@ type DBProvider func(*DBContext) (dbm.DB, error)
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -26,7 +26,7 @@ type (
migrateFunc func(keyID) (keyID, error)
)
func getAllLegacyKeys(db dbm.DB) ([]keyID, error) {
func getAllLegacyKeys(db dbm.DB, storeName string) ([]keyID, error) {
var out []keyID
iter, err := db.Iterator(nil, nil)
@@ -37,9 +37,16 @@ func getAllLegacyKeys(db dbm.DB) ([]keyID, error) {
for ; iter.Valid(); iter.Next() {
k := iter.Key()
// make sure it's a key with a legacy format, and skip
// all other keys, to make it safe to resume the migration.
if !checkKeyType(k).isLegacy() {
// make sure it's a key that we'd expect to see in
// this database, with a legacy format, and skip all
// other keys, to make it safe to resume the
// migration.
kt, err := checkKeyType(k, storeName)
if err != nil {
return nil, err
}
if !kt.isLegacy() {
continue
}
@@ -88,73 +95,377 @@ var prefixes = []struct {
ktype keyType
check func(keyID) bool
}{
{[]byte("consensusParamsKey:"), consensusParamsKey, nil},
{[]byte("abciResponsesKey:"), abciResponsesKey, nil},
{[]byte("validatorsKey:"), validatorsKey, nil},
{[]byte("stateKey"), stateStoreKey, nil},
{[]byte("H:"), blockMetaKey, nil},
{[]byte("P:"), blockPartKey, nil},
{[]byte("C:"), commitKey, nil},
{[]byte("SC:"), seenCommitKey, nil},
{[]byte("BH:"), blockHashKey, nil},
{[]byte("size"), lightSizeKey, nil},
{[]byte("lb/"), lightBlockKey, nil},
{[]byte("\x00"), evidenceCommittedKey, checkEvidenceKey},
{[]byte("\x01"), evidencePendingKey, checkEvidenceKey},
{[]byte(legacyConsensusParamsPrefix), consensusParamsKey, nil},
{[]byte(legacyAbciResponsePrefix), abciResponsesKey, nil},
{[]byte(legacyValidatorPrefix), validatorsKey, nil},
{[]byte(legacyStateKeyPrefix), stateStoreKey, nil},
{[]byte(legacyBlockMetaPrefix), blockMetaKey, nil},
{[]byte(legacyBlockPartPrefix), blockPartKey, nil},
{[]byte(legacyCommitPrefix), commitKey, nil},
{[]byte(legacySeenCommitPrefix), seenCommitKey, nil},
{[]byte(legacyBlockHashPrefix), blockHashKey, nil},
{[]byte(legacyLightSizePrefix), lightSizeKey, nil},
{[]byte(legacyLightBlockPrefix), lightBlockKey, nil},
{[]byte(legacyEvidenceComittedPrefix), evidenceCommittedKey, checkEvidenceKey},
{[]byte(legacyEvidencePendingPrefix), evidencePendingKey, checkEvidenceKey},
}
const (
legacyConsensusParamsPrefix = "consensusParamsKey:"
legacyAbciResponsePrefix = "abciResponsesKey:"
legacyValidatorPrefix = "validatorsKey:"
legacyStateKeyPrefix = "stateKey"
legacyBlockMetaPrefix = "H:"
legacyBlockPartPrefix = "P:"
legacyCommitPrefix = "C:"
legacySeenCommitPrefix = "SC:"
legacyBlockHashPrefix = "BH:"
legacyLightSizePrefix = "size"
legacyLightBlockPrefix = "lb/"
legacyEvidenceComittedPrefix = "\x00"
legacyEvidencePendingPrefix = "\x01"
)
type migrationDefinition struct {
name string
storeName string
prefix []byte
ktype keyType
check func(keyID) bool
transform migrateFunc
}
var migrations = []migrationDefinition{
{
name: "consensus-params",
storeName: "state",
prefix: []byte(legacyConsensusParamsPrefix),
ktype: consensusParamsKey,
transform: func(key keyID) (keyID, error) {
val, err := strconv.Atoi(string(key[19:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(6), int64(val))
},
},
{
name: "abci-responses",
storeName: "state",
prefix: []byte(legacyAbciResponsePrefix),
ktype: abciResponsesKey,
transform: func(key keyID) (keyID, error) {
val, err := strconv.Atoi(string(key[17:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(7), int64(val))
},
},
{
name: "validators",
storeName: "state",
prefix: []byte(legacyValidatorPrefix),
ktype: validatorsKey,
transform: func(key keyID) (keyID, error) {
val, err := strconv.Atoi(string(key[14:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(5), int64(val))
},
},
{
name: "tendermint-state",
storeName: "state",
prefix: []byte(legacyStateKeyPrefix),
ktype: stateStoreKey,
transform: func(key keyID) (keyID, error) {
return orderedcode.Append(nil, int64(8))
},
},
{
name: "block-meta",
storeName: "blockstore",
prefix: []byte(legacyBlockMetaPrefix),
ktype: blockMetaKey,
transform: func(key keyID) (keyID, error) {
val, err := strconv.Atoi(string(key[2:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(0), int64(val))
},
},
{
name: "block-part",
storeName: "blockstore",
prefix: []byte(legacyBlockPartPrefix),
ktype: blockPartKey,
transform: func(key keyID) (keyID, error) {
parts := bytes.Split(key[2:], []byte(":"))
if len(parts) != 2 {
return nil, fmt.Errorf("block parts key has %d rather than 2 components",
len(parts))
}
valOne, err := strconv.Atoi(string(parts[0]))
if err != nil {
return nil, err
}
valTwo, err := strconv.Atoi(string(parts[1]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo))
},
},
{
name: "commit",
storeName: "blockstore",
prefix: []byte(legacyCommitPrefix),
ktype: commitKey,
transform: func(key keyID) (keyID, error) {
val, err := strconv.Atoi(string(key[2:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(2), int64(val))
},
},
{
name: "seen-commit",
storeName: "blockstore",
prefix: []byte(legacySeenCommitPrefix),
ktype: seenCommitKey,
transform: func(key keyID) (keyID, error) {
val, err := strconv.Atoi(string(key[3:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(3), int64(val))
},
},
{
name: "block-hash",
storeName: "blockstore",
prefix: []byte(legacyBlockHashPrefix),
ktype: blockHashKey,
transform: func(key keyID) (keyID, error) {
hash := string(key[3:])
if len(hash)%2 == 1 {
hash = "0" + hash
}
val, err := hex.DecodeString(hash)
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(4), string(val))
},
},
{
name: "light-size",
storeName: "light",
prefix: []byte(legacyLightSizePrefix),
ktype: lightSizeKey,
transform: func(key keyID) (keyID, error) {
return orderedcode.Append(nil, int64(12))
},
},
{
name: "light-block",
storeName: "light",
prefix: []byte(legacyLightBlockPrefix),
ktype: lightBlockKey,
transform: func(key keyID) (keyID, error) {
if len(key) < 24 {
return nil, fmt.Errorf("light block evidence %q in invalid format", string(key))
}
val, err := strconv.Atoi(string(key[len(key)-20:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(11), int64(val))
},
},
{
name: "evidence-pending",
storeName: "evidence",
prefix: []byte(legacyEvidencePendingPrefix),
ktype: evidencePendingKey,
transform: func(key keyID) (keyID, error) {
return convertEvidence(key, 10)
},
},
{
name: "evidence-committed",
storeName: "evidence",
prefix: []byte(legacyEvidenceComittedPrefix),
ktype: evidenceCommittedKey,
transform: func(key keyID) (keyID, error) {
return convertEvidence(key, 9)
},
},
{
name: "event-tx",
storeName: "tx_index",
prefix: nil,
ktype: txHeightKey,
transform: func(key keyID) (keyID, error) {
parts := bytes.Split(key, []byte("/"))
if len(parts) != 4 {
return nil, fmt.Errorf("key has %d parts rather than 4", len(parts))
}
parts = parts[1:] // drop prefix
elems := make([]interface{}, 0, len(parts)+1)
elems = append(elems, "tx.height")
for idx, pt := range parts {
val, err := strconv.Atoi(string(pt))
if err != nil {
return nil, err
}
if idx == 0 {
elems = append(elems, fmt.Sprintf("%d", val))
} else {
elems = append(elems, int64(val))
}
}
return orderedcode.Append(nil, elems...)
},
},
{
name: "event-abci",
storeName: "tx_index",
prefix: nil,
ktype: abciEventKey,
transform: func(key keyID) (keyID, error) {
parts := bytes.Split(key, []byte("/"))
elems := make([]interface{}, 0, 4)
if len(parts) == 4 {
elems = append(elems, string(parts[0]), string(parts[1]))
val, err := strconv.Atoi(string(parts[2]))
if err != nil {
return nil, err
}
elems = append(elems, int64(val))
val2, err := strconv.Atoi(string(parts[3]))
if err != nil {
return nil, err
}
elems = append(elems, int64(val2))
} else {
elems = append(elems, string(parts[0]))
parts = parts[1:]
val, err := strconv.Atoi(string(parts[len(parts)-1]))
if err != nil {
return nil, err
}
val2, err := strconv.Atoi(string(parts[len(parts)-2]))
if err != nil {
return nil, err
}
appKey := bytes.Join(parts[:len(parts)-3], []byte("/"))
elems = append(elems, string(appKey), int64(val), int64(val2))
}
return orderedcode.Append(nil, elems...)
},
},
{
name: "event-tx-hash",
storeName: "tx_index",
prefix: nil,
ktype: txHashKey,
transform: func(key keyID) (keyID, error) {
return orderedcode.Append(nil, "tx.hash", string(key))
},
},
}
// checkKeyType classifies a candidate key based on its structure.
func checkKeyType(key keyID) keyType {
for _, p := range prefixes {
if bytes.HasPrefix(key, p.prefix) {
if p.check == nil || p.check(key) {
return p.ktype
func checkKeyType(key keyID, storeName string) (keyType, error) {
var migrations []migrationDefinition
for _, m := range migrations {
if m.storeName != storeName {
continue
}
if m.prefix == nil && storeName == "tx_index" {
// A legacy event key has the form:
//
// <name> / <value> / <height> / <index>
//
// Transaction hashes are stored as a raw binary hash with no prefix.
//
// Note, though, that nothing prevents event names or values from containing
// additional "/" separators, so the parse has to be forgiving.
parts := bytes.Split(key, []byte("/"))
if len(parts) >= 4 {
// Special case for tx.height.
if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) {
return txHeightKey, nil
}
// The name cannot be empty, but we don't know where the name ends and
// the value begins, so insist that there be something.
var n int
for _, part := range parts[:len(parts)-2] {
n += len(part)
}
// Check whether the last two fields could be .../height/index.
if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) {
return abciEventKey, nil
}
}
}
}
// A legacy event key has the form:
//
// <name> / <value> / <height> / <index>
//
// Transaction hashes are stored as a raw binary hash with no prefix.
//
// Because a hash can contain any byte, it is possible (though unlikely)
// that a hash could have the correct form for an event key, in which case
// we would translate it incorrectly. To reduce the likelihood of an
// incorrect interpretation, we parse candidate event keys and check for
// some structural properties before making a decision.
//
// Note, though, that nothing prevents event names or values from containing
// additional "/" separators, so the parse has to be forgiving.
parts := bytes.Split(key, []byte("/"))
if len(parts) >= 4 {
// Special case for tx.height.
if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) {
return txHeightKey
}
// The name cannot be empty, but we don't know where the name ends and
// the value begins, so insist that there be something.
var n int
for _, part := range parts[:len(parts)-2] {
n += len(part)
}
// Check whether the last two fields could be .../height/index.
if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) {
return abciEventKey
// If we get here, it's not an event key. Treat it as a hash if it is the
// right length. Note that it IS possible this could collide with the
// translation of some other key (though not a hash, since encoded hashes
// will be longer). The chance of that is small, but there is nothing we can
// do to detect it.
if len(key) == 32 {
return txHashKey, nil
}
} else if bytes.HasPrefix(key, m.prefix) {
if m.check == nil || m.check(key) {
return m.ktype, nil
}
// we have an expected legacy prefix but that
// didn't pass the check. This probably means
// the evidence data is currupt (based on the
// defined migrations) best to error here.
return -1, fmt.Errorf("in store %q, key %q exists but is not a valid key of type %q", storeName, key, m.ktype)
}
// if we get here, the key in question is either
// migrated or of a different type. We can't break
// here because there are more than one key type in a
// specific database, so we have to keep iterating.
}
// if we've looked at every migration and not identified a key
// type, then the key has been migrated *or* we (possibly, but
// very unlikely have data that is in the wrong place or the
// sign of corruption.) In either case we should not attempt
// more migrations at this point
// If we get here, it's not an event key. Treat it as a hash if it is the
// right length. Note that it IS possible this could collide with the
// translation of some other key (though not a hash, since encoded hashes
// will be longer). The chance of that is small, but there is nothing we can
// do to detect it.
if len(key) == 32 {
return txHashKey
}
return nonLegacyKey
return nonLegacyKey, nil
}
// isDecimal reports whether buf is a non-empty sequence of Unicode decimal
@@ -168,161 +479,21 @@ func isDecimal(buf []byte) bool {
return len(buf) != 0
}
func migrateKey(key keyID) (keyID, error) {
switch checkKeyType(key) {
case blockMetaKey:
val, err := strconv.Atoi(string(key[2:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(0), int64(val))
case blockPartKey:
parts := bytes.Split(key[2:], []byte(":"))
if len(parts) != 2 {
return nil, fmt.Errorf("block parts key has %d rather than 2 components",
len(parts))
}
valOne, err := strconv.Atoi(string(parts[0]))
if err != nil {
return nil, err
}
valTwo, err := strconv.Atoi(string(parts[1]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo))
case commitKey:
val, err := strconv.Atoi(string(key[2:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(2), int64(val))
case seenCommitKey:
val, err := strconv.Atoi(string(key[3:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(3), int64(val))
case blockHashKey:
hash := string(key[3:])
if len(hash)%2 == 1 {
hash = "0" + hash
}
val, err := hex.DecodeString(hash)
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(4), string(val))
case validatorsKey:
val, err := strconv.Atoi(string(key[14:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(5), int64(val))
case consensusParamsKey:
val, err := strconv.Atoi(string(key[19:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(6), int64(val))
case abciResponsesKey:
val, err := strconv.Atoi(string(key[17:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(7), int64(val))
case stateStoreKey:
return orderedcode.Append(nil, int64(8))
case evidenceCommittedKey:
return convertEvidence(key, 9)
case evidencePendingKey:
return convertEvidence(key, 10)
case lightBlockKey:
if len(key) < 24 {
return nil, fmt.Errorf("light block evidence %q in invalid format", string(key))
}
val, err := strconv.Atoi(string(key[len(key)-20:]))
if err != nil {
return nil, err
}
return orderedcode.Append(nil, int64(11), int64(val))
case lightSizeKey:
return orderedcode.Append(nil, int64(12))
case txHeightKey:
parts := bytes.Split(key, []byte("/"))
if len(parts) != 4 {
return nil, fmt.Errorf("key has %d parts rather than 4", len(parts))
}
parts = parts[1:] // drop prefix
elems := make([]interface{}, 0, len(parts)+1)
elems = append(elems, "tx.height")
for idx, pt := range parts {
val, err := strconv.Atoi(string(pt))
if err != nil {
return nil, err
}
if idx == 0 {
elems = append(elems, fmt.Sprintf("%d", val))
} else {
elems = append(elems, int64(val))
}
}
return orderedcode.Append(nil, elems...)
case abciEventKey:
parts := bytes.Split(key, []byte("/"))
elems := make([]interface{}, 0, 4)
if len(parts) == 4 {
elems = append(elems, string(parts[0]), string(parts[1]))
val, err := strconv.Atoi(string(parts[2]))
if err != nil {
return nil, err
}
elems = append(elems, int64(val))
val2, err := strconv.Atoi(string(parts[3]))
if err != nil {
return nil, err
}
elems = append(elems, int64(val2))
} else {
elems = append(elems, string(parts[0]))
parts = parts[1:]
val, err := strconv.Atoi(string(parts[len(parts)-1]))
if err != nil {
return nil, err
}
val2, err := strconv.Atoi(string(parts[len(parts)-2]))
if err != nil {
return nil, err
}
appKey := bytes.Join(parts[:len(parts)-3], []byte("/"))
elems = append(elems, string(appKey), int64(val), int64(val2))
}
return orderedcode.Append(nil, elems...)
case txHashKey:
return orderedcode.Append(nil, "tx.hash", string(key))
default:
return nil, fmt.Errorf("key %q is in the wrong format", string(key))
func migrateKey(key keyID, storeName string) (keyID, error) {
kt, err := checkKeyType(key, storeName)
if err != nil {
return nil, err
}
for _, migration := range migrations {
if migration.storeName != storeName {
continue
}
if kt == migration.ktype {
return migration.transform(key)
}
}
return nil, fmt.Errorf("key %q is in the wrong format", string(key))
}
func convertEvidence(key keyID, newPrefix int64) ([]byte, error) {
@@ -374,7 +545,29 @@ func isHex(data []byte) bool {
return len(data) != 0
}
func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error {
func getMigrationFunc(storeName string, key keyID) (*migrationDefinition, error) {
for idx := range migrations {
migration := migrations[idx]
if migration.storeName == storeName {
if migration.prefix == nil {
return &migration, nil
}
if bytes.HasPrefix(migration.prefix, key) {
return &migration, nil
}
}
}
return nil, fmt.Errorf("no migration defined for data store %q and key %q", storeName, key)
}
func replaceKey(db dbm.DB, storeName string, key keyID) error {
migration, err := getMigrationFunc(storeName, key)
if err != nil {
return err
}
gooseFn := migration.transform
exists, err := db.Has(key)
if err != nil {
return err
@@ -433,8 +626,8 @@ func replaceKey(db dbm.DB, key keyID, gooseFn migrateFunc) error {
// The context allows for a safe termination of the operation
// (e.g connected to a singal handler,) to abort the operation
// in-between migration operations.
func Migrate(ctx context.Context, db dbm.DB) error {
keys, err := getAllLegacyKeys(db)
func Migrate(ctx context.Context, storeName string, db dbm.DB) error {
keys, err := getAllLegacyKeys(db, storeName)
if err != nil {
return err
}
@@ -451,7 +644,7 @@ func Migrate(ctx context.Context, db dbm.DB) error {
if err := ctx.Err(); err != nil {
return err
}
return replaceKey(db, key, migrateKey)
return replaceKey(db, storeName, key)
})
}
if g.Wait() != nil {

View File

@@ -1,16 +1,12 @@
package keymigrate
import (
"context"
"errors"
"fmt"
"math"
"strings"
"testing"
"github.com/google/orderedcode"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
)
func makeKey(t *testing.T, elems ...interface{}) []byte {
@@ -78,30 +74,6 @@ func getNewPrefixKeys(t *testing.T, val int) map[string][]byte {
}
}
func getLegacyDatabase(t *testing.T) (int, dbm.DB) {
db := dbm.NewMemDB()
batch := db.NewBatch()
ct := 0
generated := []map[string][]byte{
getLegacyPrefixKeys(8),
getLegacyPrefixKeys(9001),
getLegacyPrefixKeys(math.MaxInt32 << 1),
getLegacyPrefixKeys(math.MaxInt64 - 8),
}
// populate database
for _, km := range generated {
for _, key := range km {
ct++
require.NoError(t, batch.Set(key, []byte(fmt.Sprintf(`{"value": %d}`, ct))))
}
}
require.NoError(t, batch.WriteSync())
require.NoError(t, batch.Close())
return ct - (2 * len(generated)) + 2, db
}
func TestMigration(t *testing.T) {
t.Run("Idempotency", func(t *testing.T) {
// we want to make sure that the key space for new and
@@ -113,37 +85,12 @@ func TestMigration(t *testing.T) {
require.Equal(t, len(legacyPrefixes), len(newPrefixes))
t.Run("Legacy", func(t *testing.T) {
for kind, le := range legacyPrefixes {
require.True(t, checkKeyType(le).isLegacy(), kind)
}
})
t.Run("New", func(t *testing.T) {
for kind, ne := range newPrefixes {
require.False(t, checkKeyType(ne).isLegacy(), kind)
}
})
t.Run("Conversion", func(t *testing.T) {
for kind, le := range legacyPrefixes {
nk, err := migrateKey(le)
require.NoError(t, err, kind)
require.False(t, checkKeyType(nk).isLegacy(), kind)
}
})
t.Run("Hashes", func(t *testing.T) {
t.Run("NewKeysAreNotHashes", func(t *testing.T) {
for _, key := range getNewPrefixKeys(t, 9001) {
require.True(t, len(key) != 32)
}
})
t.Run("ContrivedLegacyKeyDetection", func(t *testing.T) {
// length 32: should appear to be a hash
require.Equal(t, txHashKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")))
// length ≠ 32: should not appear to be a hash
require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx--")))
require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")))
})
})
})
t.Run("Migrations", func(t *testing.T) {
@@ -171,72 +118,101 @@ func TestMigration(t *testing.T) {
"UserKey3": []byte("foo/bar/baz/1.2/4"),
}
for kind, key := range table {
out, err := migrateKey(key)
out, err := migrateKey(key, "")
// TODO probably these error at the
// moment because of store missmatches
require.Error(t, err, kind)
require.Nil(t, out, kind)
}
})
t.Run("Replacement", func(t *testing.T) {
t.Run("MissingKey", func(t *testing.T) {
db := dbm.NewMemDB()
require.NoError(t, replaceKey(db, keyID("hi"), nil))
})
t.Run("ReplacementFails", func(t *testing.T) {
db := dbm.NewMemDB()
key := keyID("hi")
require.NoError(t, db.Set(key, []byte("world")))
require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) {
return nil, errors.New("hi")
}))
})
t.Run("KeyDisappears", func(t *testing.T) {
db := dbm.NewMemDB()
key := keyID("hi")
require.NoError(t, db.Set(key, []byte("world")))
require.Error(t, replaceKey(db, key, func(k keyID) (keyID, error) {
require.NoError(t, db.Delete(key))
return keyID("wat"), nil
}))
exists, err := db.Has(key)
require.NoError(t, err)
require.False(t, exists)
exists, err = db.Has(keyID("wat"))
require.NoError(t, err)
require.False(t, exists)
})
})
})
t.Run("Integration", func(t *testing.T) {
t.Run("KeyDiscovery", func(t *testing.T) {
size, db := getLegacyDatabase(t)
keys, err := getAllLegacyKeys(db)
require.NoError(t, err)
require.Equal(t, size, len(keys))
legacyKeys := 0
for _, k := range keys {
if checkKeyType(k).isLegacy() {
legacyKeys++
}
}
require.Equal(t, size, legacyKeys)
})
t.Run("KeyIdempotency", func(t *testing.T) {
for _, key := range getNewPrefixKeys(t, 84) {
require.False(t, checkKeyType(key).isLegacy())
}
})
t.Run("Migrate", func(t *testing.T) {
_, db := getLegacyDatabase(t)
ctx := context.Background()
err := Migrate(ctx, db)
require.NoError(t, err)
keys, err := getAllLegacyKeys(db)
require.NoError(t, err)
require.Equal(t, 0, len(keys))
})
})
}
func TestGlobalDataStructuresForRefactor(t *testing.T) {
defer func() {
if t.Failed() {
t.Log("number of migrations:", len(migrations))
}
}()
const unPrefixedLegacyKeys = 3
t.Run("MigrationsAreDefined", func(t *testing.T) {
if len(prefixes)+unPrefixedLegacyKeys != len(migrations) {
t.Fatal("migrationse are not correctly defined",
"prefixes", len(prefixes),
"migrations", len(migrations))
}
})
t.Run("AllMigrationsHavePrefixDefined", func(t *testing.T) {
for _, m := range migrations {
if m.prefix == nil && m.storeName != "tx_index" {
t.Errorf("migration named %q for store %q does not have a prefix defined", m.name, m.storeName)
}
}
})
t.Run("Deduplication", func(t *testing.T) {
t.Run("Prefixes", func(t *testing.T) {
set := map[string]struct{}{}
for _, prefix := range prefixes {
set[string(prefix.prefix)] = struct{}{}
}
if len(set) != len(prefixes) {
t.Fatal("duplicate prefix definition",
"set", len(set),
"values", set)
}
})
t.Run("MigrationName", func(t *testing.T) {
set := map[string]struct{}{}
for _, migration := range migrations {
set[migration.name] = struct{}{}
}
if len(set) != len(migrations) {
t.Fatal("duplicate migration name defined",
"set", len(set),
"values", set)
}
})
t.Run("MigrationPrefix", func(t *testing.T) {
set := map[string]struct{}{}
for _, migration := range migrations {
set[string(migration.prefix)] = struct{}{}
}
// three keys don't have prefixes in the
// legacy system; this is fine but it means
// the set will have 1 less than expected
// (well 2 less, but the empty key takes one
// of the slots):
expectedDupl := unPrefixedLegacyKeys - 1
if len(set) != len(migrations)-expectedDupl {
t.Fatal("duplicate migration prefix defined",
"set", len(set),
"expected", len(migrations)-expectedDupl,
"values", set)
}
})
t.Run("MigrationStoreName", func(t *testing.T) {
set := map[string]struct{}{}
for _, migration := range migrations {
set[migration.storeName] = struct{}{}
}
if len(set) != 5 {
t.Fatal("duplicate migration store name defined",
"set", len(set),
"values", set)
}
if _, ok := set[""]; ok {
t.Fatal("empty store name defined")
}
})
})
t.Run("NilPrefix", func(t *testing.T) {
_, err := getMigrationFunc("tx_index", []byte("fooo"))
if err != nil {
t.Fatal("should find an index for tx", err)
}
})
}