mirror of
https://github.com/tendermint/tendermint.git
synced 2026-04-23 17:20:33 +00:00
store: move pacakge to internal (#6978)
This commit is contained in:
@@ -10,11 +10,11 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -18,10 +18,10 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -24,11 +24,11 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
tmstore "github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
tmstore "github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -19,11 +19,11 @@ import (
|
||||
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
tmbytes "github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -26,11 +26,11 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
statemocks "github.com/tendermint/tendermint/internal/state/mocks"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -29,13 +29,13 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
smmocks "github.com/tendermint/tendermint/internal/state/mocks"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer/sink"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/mocks"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -17,11 +17,11 @@ import (
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/mocks"
|
||||
statefactory "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
testfactory "github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
@@ -20,12 +20,12 @@ import (
|
||||
proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks"
|
||||
smmocks "github.com/tendermint/tendermint/internal/state/mocks"
|
||||
"github.com/tendermint/tendermint/internal/statesync/mocks"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/light/provider"
|
||||
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
||||
631
internal/store/store.go
Normal file
631
internal/store/store.go
Normal file
@@ -0,0 +1,631 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/google/orderedcode"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
/*
|
||||
BlockStore is a simple low level store for blocks.
|
||||
|
||||
There are three types of information stored:
|
||||
- BlockMeta: Meta information about each block
|
||||
- Block part: Parts of each block, aggregated w/ PartSet
|
||||
- Commit: The commit part of each block, for gossiping precommit votes
|
||||
|
||||
Currently the precommit signatures are duplicated in the Block parts as
|
||||
well as the Commit. In the future this may change, perhaps by moving
|
||||
the Commit data outside the Block. (TODO)
|
||||
|
||||
The store can be assumed to contain all contiguous blocks between base and height (inclusive).
|
||||
|
||||
// NOTE: BlockStore methods will panic if they encounter errors
|
||||
// deserializing loaded data, indicating probable corruption on disk.
|
||||
*/
|
||||
type BlockStore struct {
|
||||
db dbm.DB
|
||||
}
|
||||
|
||||
// NewBlockStore returns a new BlockStore with the given DB,
|
||||
// initialized to the last height that was committed to the DB.
|
||||
func NewBlockStore(db dbm.DB) *BlockStore {
|
||||
return &BlockStore{db}
|
||||
}
|
||||
|
||||
// Base returns the first known contiguous block height, or 0 for empty block stores.
|
||||
func (bs *BlockStore) Base() int64 {
|
||||
iter, err := bs.db.Iterator(
|
||||
blockMetaKey(1),
|
||||
blockMetaKey(1<<63-1),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
if iter.Valid() {
|
||||
height, err := decodeBlockMetaKey(iter.Key())
|
||||
if err == nil {
|
||||
return height
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Height returns the last known contiguous block height, or 0 for empty block stores.
|
||||
func (bs *BlockStore) Height() int64 {
|
||||
iter, err := bs.db.ReverseIterator(
|
||||
blockMetaKey(1),
|
||||
blockMetaKey(1<<63-1),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
if iter.Valid() {
|
||||
height, err := decodeBlockMetaKey(iter.Key())
|
||||
if err == nil {
|
||||
return height
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Size returns the number of blocks in the block store.
|
||||
func (bs *BlockStore) Size() int64 {
|
||||
height := bs.Height()
|
||||
if height == 0 {
|
||||
return 0
|
||||
}
|
||||
return height + 1 - bs.Base()
|
||||
}
|
||||
|
||||
// LoadBase atomically loads the base block meta, or returns nil if no base is found.
|
||||
func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta {
|
||||
iter, err := bs.db.Iterator(
|
||||
blockMetaKey(1),
|
||||
blockMetaKey(1<<63-1),
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
if iter.Valid() {
|
||||
var pbbm = new(tmproto.BlockMeta)
|
||||
err = proto.Unmarshal(iter.Value(), pbbm)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unmarshal to tmproto.BlockMeta: %w", err))
|
||||
}
|
||||
|
||||
blockMeta, err := types.BlockMetaFromProto(pbbm)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error from proto blockMeta: %w", err))
|
||||
}
|
||||
|
||||
return blockMeta
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBlock returns the block with the given height.
|
||||
// If no block is found for that height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
var blockMeta = bs.LoadBlockMeta(height)
|
||||
if blockMeta == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pbb := new(tmproto.Block)
|
||||
buf := []byte{}
|
||||
for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ {
|
||||
part := bs.LoadBlockPart(height, i)
|
||||
// If the part is missing (e.g. since it has been deleted after we
|
||||
// loaded the block meta) we consider the whole block to be missing.
|
||||
if part == nil {
|
||||
return nil
|
||||
}
|
||||
buf = append(buf, part.Bytes...)
|
||||
}
|
||||
err := proto.Unmarshal(buf, pbb)
|
||||
if err != nil {
|
||||
// NOTE: The existence of meta should imply the existence of the
|
||||
// block. So, make sure meta is only saved after blocks are saved.
|
||||
panic(fmt.Sprintf("Error reading block: %v", err))
|
||||
}
|
||||
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error from proto block: %w", err))
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
// LoadBlockByHash returns the block with the given hash.
|
||||
// If no block is found for that hash, it returns nil.
|
||||
// Panics if it fails to parse height associated with the given hash.
|
||||
func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
bz, err := bs.db.Get(blockHashKey(hash))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s := string(bz)
|
||||
height, err := strconv.ParseInt(s, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to extract height from %s: %v", s, err))
|
||||
}
|
||||
return bs.LoadBlock(height)
|
||||
}
|
||||
|
||||
// LoadBlockPart returns the Part at the given index
|
||||
// from the block at the given height.
|
||||
// If no part is found for the given height and index, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
var pbpart = new(tmproto.Part)
|
||||
|
||||
bz, err := bs.db.Get(blockPartKey(height, index))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = proto.Unmarshal(bz, pbpart)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unmarshal to tmproto.Part failed: %w", err))
|
||||
}
|
||||
part, err := types.PartFromProto(pbpart)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error reading block part: %v", err))
|
||||
}
|
||||
|
||||
return part
|
||||
}
|
||||
|
||||
// LoadBlockMeta returns the BlockMeta for the given height.
|
||||
// If no block is found for the given height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
var pbbm = new(tmproto.BlockMeta)
|
||||
bz, err := bs.db.Get(blockMetaKey(height))
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = proto.Unmarshal(bz, pbbm)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unmarshal to tmproto.BlockMeta: %w", err))
|
||||
}
|
||||
|
||||
blockMeta, err := types.BlockMetaFromProto(pbbm)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error from proto blockMeta: %w", err))
|
||||
}
|
||||
|
||||
return blockMeta
|
||||
}
|
||||
|
||||
// LoadBlockCommit returns the Commit for the given height.
|
||||
// This commit consists of the +2/3 and other Precommit-votes for block at `height`,
|
||||
// and it comes from the block.LastCommit for `height+1`.
|
||||
// If no commit is found for the given height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
var pbc = new(tmproto.Commit)
|
||||
bz, err := bs.db.Get(blockCommitKey(height))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = proto.Unmarshal(bz, pbc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error reading block commit: %w", err))
|
||||
}
|
||||
commit, err := types.CommitFromProto(pbc)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error reading block commit: %v", err))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
|
||||
// LoadSeenCommit returns the last locally seen Commit before being
|
||||
// cannonicalized. This is useful when we've seen a commit, but there
|
||||
// has not yet been a new block at `height + 1` that includes this
|
||||
// commit in its block.LastCommit.
|
||||
func (bs *BlockStore) LoadSeenCommit() *types.Commit {
|
||||
var pbc = new(tmproto.Commit)
|
||||
bz, err := bs.db.Get(seenCommitKey())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = proto.Unmarshal(bz, pbc)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error reading block seen commit: %v", err))
|
||||
}
|
||||
|
||||
commit, err := types.CommitFromProto(pbc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error from proto commit: %w", err))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
|
||||
// PruneBlocks removes block up to (but not including) a height. It returns the number of blocks pruned.
|
||||
func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
if height <= 0 {
|
||||
return 0, fmt.Errorf("height must be greater than 0")
|
||||
}
|
||||
|
||||
if height > bs.Height() {
|
||||
return 0, fmt.Errorf("height must be equal to or less than the latest height %d", bs.Height())
|
||||
}
|
||||
|
||||
// when removing the block meta, use the hash to remove the hash key at the same time
|
||||
removeBlockHash := func(key, value []byte, batch dbm.Batch) error {
|
||||
// unmarshal block meta
|
||||
var pbbm = new(tmproto.BlockMeta)
|
||||
err := proto.Unmarshal(value, pbbm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshal to tmproto.BlockMeta: %w", err)
|
||||
}
|
||||
|
||||
blockMeta, err := types.BlockMetaFromProto(pbbm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error from proto blockMeta: %w", err)
|
||||
}
|
||||
|
||||
// delete the hash key corresponding to the block meta's hash
|
||||
if err := batch.Delete(blockHashKey(blockMeta.BlockID.Hash)); err != nil {
|
||||
return fmt.Errorf("failed to delete hash key: %X: %w", blockHashKey(blockMeta.BlockID.Hash), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remove block meta first as this is used to indicate whether the block exists.
|
||||
// For this reason, we also use ony block meta as a measure of the amount of blocks pruned
|
||||
pruned, err := bs.pruneRange(blockMetaKey(0), blockMetaKey(height), removeBlockHash)
|
||||
if err != nil {
|
||||
return pruned, err
|
||||
}
|
||||
|
||||
if _, err := bs.pruneRange(blockPartKey(0, 0), blockPartKey(height, 0), nil); err != nil {
|
||||
return pruned, err
|
||||
}
|
||||
|
||||
if _, err := bs.pruneRange(blockCommitKey(0), blockCommitKey(height), nil); err != nil {
|
||||
return pruned, err
|
||||
}
|
||||
|
||||
return pruned, nil
|
||||
}
|
||||
|
||||
// pruneRange is a generic function for deleting a range of values based on the lowest
|
||||
// height up to but excluding retainHeight. For each key/value pair, an optional hook can be
|
||||
// executed before the deletion itself is made. pruneRange will use batch delete to delete
|
||||
// keys in batches of at most 1000 keys.
|
||||
func (bs *BlockStore) pruneRange(
|
||||
start []byte,
|
||||
end []byte,
|
||||
preDeletionHook func(key, value []byte, batch dbm.Batch) error,
|
||||
) (uint64, error) {
|
||||
var (
|
||||
err error
|
||||
pruned uint64
|
||||
totalPruned uint64 = 0
|
||||
)
|
||||
|
||||
batch := bs.db.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
pruned, start, err = bs.batchDelete(batch, start, end, preDeletionHook)
|
||||
if err != nil {
|
||||
return totalPruned, err
|
||||
}
|
||||
|
||||
// loop until we have finished iterating over all the keys by writing, opening a new batch
|
||||
// and incrementing through the next range of keys.
|
||||
for !bytes.Equal(start, end) {
|
||||
if err := batch.Write(); err != nil {
|
||||
return totalPruned, err
|
||||
}
|
||||
|
||||
totalPruned += pruned
|
||||
|
||||
if err := batch.Close(); err != nil {
|
||||
return totalPruned, err
|
||||
}
|
||||
|
||||
batch = bs.db.NewBatch()
|
||||
|
||||
pruned, start, err = bs.batchDelete(batch, start, end, preDeletionHook)
|
||||
if err != nil {
|
||||
return totalPruned, err
|
||||
}
|
||||
}
|
||||
|
||||
// once we looped over all keys we do a final flush to disk
|
||||
if err := batch.WriteSync(); err != nil {
|
||||
return totalPruned, err
|
||||
}
|
||||
totalPruned += pruned
|
||||
return totalPruned, nil
|
||||
}
|
||||
|
||||
// batchDelete runs an iterator over a set of keys, first preforming a pre deletion hook before adding it to the batch.
|
||||
// The function ends when either 1000 keys have been added to the batch or the iterator has reached the end.
|
||||
func (bs *BlockStore) batchDelete(
|
||||
batch dbm.Batch,
|
||||
start, end []byte,
|
||||
preDeletionHook func(key, value []byte, batch dbm.Batch) error,
|
||||
) (uint64, []byte, error) {
|
||||
var pruned uint64 = 0
|
||||
iter, err := bs.db.Iterator(start, end)
|
||||
if err != nil {
|
||||
return pruned, start, err
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
key := iter.Key()
|
||||
if preDeletionHook != nil {
|
||||
if err := preDeletionHook(key, iter.Value(), batch); err != nil {
|
||||
return 0, start, fmt.Errorf("pruning error at key %X: %w", iter.Key(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := batch.Delete(key); err != nil {
|
||||
return 0, start, fmt.Errorf("pruning error at key %X: %w", iter.Key(), err)
|
||||
}
|
||||
|
||||
pruned++
|
||||
if pruned == 1000 {
|
||||
return pruned, iter.Key(), iter.Error()
|
||||
}
|
||||
}
|
||||
|
||||
return pruned, end, iter.Error()
|
||||
}
|
||||
|
||||
// SaveBlock persists the given block, blockParts, and seenCommit to the underlying db.
|
||||
// blockParts: Must be parts of the block
|
||||
// seenCommit: The +2/3 precommits that were seen which committed at height.
|
||||
// If all the nodes restart after committing a block,
|
||||
// we need this to reload the precommits to catch-up nodes to the
|
||||
// most recent height. Otherwise they'd stall at H-1.
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
if block == nil {
|
||||
panic("BlockStore can only save a non-nil block")
|
||||
}
|
||||
|
||||
batch := bs.db.NewBatch()
|
||||
|
||||
height := block.Height
|
||||
hash := block.Hash()
|
||||
|
||||
if g, w := height, bs.Height()+1; bs.Base() > 0 && g != w {
|
||||
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
}
|
||||
if !blockParts.IsComplete() {
|
||||
panic("BlockStore can only save complete block part sets")
|
||||
}
|
||||
|
||||
// Save block parts. This must be done before the block meta, since callers
|
||||
// typically load the block meta first as an indication that the block exists
|
||||
// and then go on to load block parts - we must make sure the block is
|
||||
// complete as soon as the block meta is written.
|
||||
for i := 0; i < int(blockParts.Total()); i++ {
|
||||
part := blockParts.GetPart(i)
|
||||
bs.saveBlockPart(height, i, part, batch)
|
||||
}
|
||||
|
||||
blockMeta := types.NewBlockMeta(block, blockParts)
|
||||
pbm := blockMeta.ToProto()
|
||||
if pbm == nil {
|
||||
panic("nil blockmeta")
|
||||
}
|
||||
|
||||
metaBytes := mustEncode(pbm)
|
||||
if err := batch.Set(blockMetaKey(height), metaBytes); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := batch.Set(blockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pbc := block.LastCommit.ToProto()
|
||||
blockCommitBytes := mustEncode(pbc)
|
||||
if err := batch.Set(blockCommitKey(height-1), blockCommitBytes); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Save seen commit (seen +2/3 precommits for block)
|
||||
pbsc := seenCommit.ToProto()
|
||||
seenCommitBytes := mustEncode(pbsc)
|
||||
if err := batch.Set(seenCommitKey(), seenCommitBytes); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := batch.WriteSync(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := batch.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, batch dbm.Batch) {
|
||||
pbp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to make part into proto: %w", err))
|
||||
}
|
||||
partBytes := mustEncode(pbp)
|
||||
if err := batch.Set(blockPartKey(height, index), partBytes); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node.
|
||||
func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error {
|
||||
pbc := seenCommit.ToProto()
|
||||
seenCommitBytes, err := proto.Marshal(pbc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal commit: %w", err)
|
||||
}
|
||||
return bs.db.Set(seenCommitKey(), seenCommitBytes)
|
||||
}
|
||||
|
||||
func (bs *BlockStore) SaveSignedHeader(sh *types.SignedHeader, blockID types.BlockID) error {
|
||||
// first check that the block store doesn't already have the block
|
||||
bz, err := bs.db.Get(blockMetaKey(sh.Height))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bz != nil {
|
||||
return fmt.Errorf("block at height %d already saved", sh.Height)
|
||||
}
|
||||
|
||||
// FIXME: saving signed headers although necessary for proving evidence,
|
||||
// doesn't have complete parity with block meta's thus block size and num
|
||||
// txs are filled with negative numbers. We should aim to find a solution to
|
||||
// this.
|
||||
blockMeta := &types.BlockMeta{
|
||||
BlockID: blockID,
|
||||
BlockSize: -1,
|
||||
Header: *sh.Header,
|
||||
NumTxs: -1,
|
||||
}
|
||||
|
||||
batch := bs.db.NewBatch()
|
||||
|
||||
pbm := blockMeta.ToProto()
|
||||
metaBytes := mustEncode(pbm)
|
||||
if err := batch.Set(blockMetaKey(sh.Height), metaBytes); err != nil {
|
||||
return fmt.Errorf("unable to save block meta: %w", err)
|
||||
}
|
||||
|
||||
pbc := sh.Commit.ToProto()
|
||||
blockCommitBytes := mustEncode(pbc)
|
||||
if err := batch.Set(blockCommitKey(sh.Height), blockCommitBytes); err != nil {
|
||||
return fmt.Errorf("unable to save commit: %w", err)
|
||||
}
|
||||
|
||||
if err := batch.WriteSync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return batch.Close()
|
||||
}
|
||||
|
||||
//---------------------------------- KEY ENCODING -----------------------------------------
|
||||
|
||||
// key prefixes
|
||||
const (
|
||||
// prefixes are unique across all tm db's
|
||||
prefixBlockMeta = int64(0)
|
||||
prefixBlockPart = int64(1)
|
||||
prefixBlockCommit = int64(2)
|
||||
prefixSeenCommit = int64(3)
|
||||
prefixBlockHash = int64(4)
|
||||
)
|
||||
|
||||
func blockMetaKey(height int64) []byte {
|
||||
key, err := orderedcode.Append(nil, prefixBlockMeta, height)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func decodeBlockMetaKey(key []byte) (height int64, err error) {
|
||||
var prefix int64
|
||||
remaining, err := orderedcode.Parse(string(key), &prefix, &height)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(remaining) != 0 {
|
||||
return -1, fmt.Errorf("expected complete key but got remainder: %s", remaining)
|
||||
}
|
||||
if prefix != prefixBlockMeta {
|
||||
return -1, fmt.Errorf("incorrect prefix. Expected %v, got %v", prefixBlockMeta, prefix)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func blockPartKey(height int64, partIndex int) []byte {
|
||||
key, err := orderedcode.Append(nil, prefixBlockPart, height, int64(partIndex))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func blockCommitKey(height int64) []byte {
|
||||
key, err := orderedcode.Append(nil, prefixBlockCommit, height)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func seenCommitKey() []byte {
|
||||
key, err := orderedcode.Append(nil, prefixSeenCommit)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func blockHashKey(hash []byte) []byte {
|
||||
key, err := orderedcode.Append(nil, prefixBlockHash, string(hash))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// mustEncode proto encodes a proto.message and panics if fails
|
||||
func mustEncode(pb proto.Message) []byte {
|
||||
bz, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to marshal: %w", err))
|
||||
}
|
||||
return bz
|
||||
}
|
||||
555
internal/store/store_test.go
Normal file
555
internal/store/store_test.go
Normal file
@@ -0,0 +1,555 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// make a Commit with a single vote containing just the height and a timestamp
|
||||
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
commitSigs := []types.CommitSig{{
|
||||
BlockIDFlag: types.BlockIDFlagCommit,
|
||||
ValidatorAddress: tmrand.Bytes(crypto.AddressSize),
|
||||
Timestamp: timestamp,
|
||||
Signature: []byte("Signature"),
|
||||
}}
|
||||
return types.NewCommit(
|
||||
height,
|
||||
0,
|
||||
types.BlockID{
|
||||
Hash: crypto.CRandBytes(32),
|
||||
PartSetHeader: types.PartSetHeader{Hash: crypto.CRandBytes(32), Total: 2},
|
||||
},
|
||||
commitSigs)
|
||||
}
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
|
||||
cfg := config.ResetTestRoot("blockchain_reactor_test")
|
||||
blockDB := dbm.NewMemDB()
|
||||
state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) }
|
||||
}
|
||||
|
||||
func freshBlockStore() (*BlockStore, dbm.DB) {
|
||||
db := dbm.NewMemDB()
|
||||
return NewBlockStore(db), db
|
||||
}
|
||||
|
||||
var (
|
||||
state sm.State
|
||||
block *types.Block
|
||||
partSet *types.PartSet
|
||||
part1 *types.Part
|
||||
part2 *types.Part
|
||||
seenCommit1 *types.Commit
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var cleanup cleanupFunc
|
||||
state, _, cleanup = makeStateAndBlockStore(log.NewNopLogger())
|
||||
block = factory.MakeBlock(state, 1, new(types.Commit))
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||
code := m.Run()
|
||||
cleanup()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger())
|
||||
defer cleanup()
|
||||
require.Equal(t, bs.Base(), int64(0), "initially the base should be zero")
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
|
||||
// check there are no blocks at various heights
|
||||
noBlockHeights := []int64{0, -1, 100, 1000, 2}
|
||||
for i, height := range noBlockHeights {
|
||||
if g := bs.LoadBlock(height); g != nil {
|
||||
t.Errorf("#%d: height(%d) got a block; want nil", i, height)
|
||||
}
|
||||
}
|
||||
|
||||
// save a block
|
||||
block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit))
|
||||
validPartSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed")
|
||||
require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed")
|
||||
|
||||
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
|
||||
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
|
||||
_, err := uncontiguousPartSet.AddPart(part2)
|
||||
require.Error(t, err)
|
||||
|
||||
header1 := types.Header{
|
||||
Version: version.Consensus{Block: version.BlockProtocol},
|
||||
Height: 1,
|
||||
ChainID: "block_test",
|
||||
Time: tmtime.Now(),
|
||||
ProposerAddress: tmrand.Bytes(crypto.AddressSize),
|
||||
}
|
||||
|
||||
// End of setup, test data
|
||||
commitAtH10 := makeTestCommit(10, tmtime.Now())
|
||||
tuples := []struct {
|
||||
block *types.Block
|
||||
parts *types.PartSet
|
||||
seenCommit *types.Commit
|
||||
wantPanic string
|
||||
wantErr bool
|
||||
|
||||
corruptBlockInDB bool
|
||||
corruptCommitInDB bool
|
||||
corruptSeenCommitInDB bool
|
||||
eraseCommitInDB bool
|
||||
eraseSeenCommitInDB bool
|
||||
}{
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
},
|
||||
|
||||
{
|
||||
block: nil,
|
||||
wantPanic: "only save a non-nil block",
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock( // New block at height 5 in empty block store is fine
|
||||
types.Header{
|
||||
Version: version.Consensus{Block: version.BlockProtocol},
|
||||
Height: 5,
|
||||
ChainID: "block_test",
|
||||
Time: tmtime.Now(),
|
||||
ProposerAddress: tmrand.Bytes(crypto.AddressSize)},
|
||||
makeTestCommit(5, tmtime.Now()),
|
||||
),
|
||||
parts: validPartSet,
|
||||
seenCommit: makeTestCommit(5, tmtime.Now()),
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: incompletePartSet,
|
||||
wantPanic: "only save complete block", // incomplete parts
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
corruptCommitInDB: true, // Corrupt the DB's commit entry
|
||||
wantPanic: "error reading block commit",
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
wantPanic: "unmarshal to tmproto.BlockMeta",
|
||||
corruptBlockInDB: true, // Corrupt the DB's block entry
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
// Expecting no error and we want a nil back
|
||||
eraseSeenCommitInDB: true,
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
corruptSeenCommitInDB: true,
|
||||
wantPanic: "error reading block seen commit",
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
// Expecting no error and we want a nil back
|
||||
eraseCommitInDB: true,
|
||||
},
|
||||
}
|
||||
|
||||
type quad struct {
|
||||
block *types.Block
|
||||
commit *types.Commit
|
||||
meta *types.BlockMeta
|
||||
|
||||
seenCommit *types.Commit
|
||||
}
|
||||
|
||||
for i, tuple := range tuples {
|
||||
tuple := tuple
|
||||
bs, db := freshBlockStore()
|
||||
// SaveBlock
|
||||
res, err, panicErr := doFn(func() (interface{}, error) {
|
||||
bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit)
|
||||
if tuple.block == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if tuple.corruptBlockInDB {
|
||||
err := db.Set(blockMetaKey(tuple.block.Height), []byte("block-bogus"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
bBlock := bs.LoadBlock(tuple.block.Height)
|
||||
bBlockMeta := bs.LoadBlockMeta(tuple.block.Height)
|
||||
|
||||
if tuple.eraseSeenCommitInDB {
|
||||
err := db.Delete(seenCommitKey())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tuple.corruptSeenCommitInDB {
|
||||
err := db.Set(seenCommitKey(), []byte("bogus-seen-commit"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
bSeenCommit := bs.LoadSeenCommit()
|
||||
|
||||
commitHeight := tuple.block.Height - 1
|
||||
if tuple.eraseCommitInDB {
|
||||
err := db.Delete(blockCommitKey(commitHeight))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tuple.corruptCommitInDB {
|
||||
err := db.Set(blockCommitKey(commitHeight), []byte("foo-bogus"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
bCommit := bs.LoadBlockCommit(commitHeight)
|
||||
return &quad{block: bBlock, seenCommit: bSeenCommit, commit: bCommit,
|
||||
meta: bBlockMeta}, nil
|
||||
})
|
||||
|
||||
if subStr := tuple.wantPanic; subStr != "" {
|
||||
if panicErr == nil {
|
||||
t.Errorf("#%d: want a non-nil panic", i)
|
||||
} else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) {
|
||||
t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tuple.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("#%d: got nil error", i)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, panicErr, "#%d: unexpected panic", i)
|
||||
assert.Nil(t, err, "#%d: expecting a non-nil error", i)
|
||||
qua, ok := res.(*quad)
|
||||
if !ok || qua == nil {
|
||||
t.Errorf("#%d: got nil quad back; gotType=%T", i, res)
|
||||
continue
|
||||
}
|
||||
if tuple.eraseSeenCommitInDB {
|
||||
assert.Nil(t, qua.seenCommit,
|
||||
"erased the seenCommit in the DB hence we should get back a nil seenCommit")
|
||||
}
|
||||
if tuple.eraseCommitInDB {
|
||||
assert.Nil(t, qua.commit,
|
||||
"erased the commit in the DB hence we should get back a nil commit")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBaseMeta(t *testing.T) {
|
||||
cfg := config.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
bs := NewBlockStore(dbm.NewMemDB())
|
||||
|
||||
for h := int64(1); h <= 10; h++ {
|
||||
block := factory.MakeBlock(state, h, new(types.Commit))
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(h, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
}
|
||||
|
||||
pruned, err := bs.PruneBlocks(4)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 3, pruned)
|
||||
|
||||
baseBlock := bs.LoadBaseMeta()
|
||||
assert.EqualValues(t, 4, baseBlock.Header.Height)
|
||||
assert.EqualValues(t, 4, bs.Base())
|
||||
}
|
||||
|
||||
func TestLoadBlockPart(t *testing.T) {
|
||||
bs, db := freshBlockStore()
|
||||
height, index := int64(10), 1
|
||||
loadPart := func() (interface{}, error) {
|
||||
part := bs.LoadBlockPart(height, index)
|
||||
return part, nil
|
||||
}
|
||||
|
||||
// Initially no contents.
|
||||
// 1. Requesting for a non-existent block shouldn't fail
|
||||
res, _, panicErr := doFn(loadPart)
|
||||
require.Nil(t, panicErr, "a non-existent block part shouldn't cause a panic")
|
||||
require.Nil(t, res, "a non-existent block part should return nil")
|
||||
|
||||
// 2. Next save a corrupted block then try to load it
|
||||
err := db.Set(blockPartKey(height, index), []byte("Tendermint"))
|
||||
require.NoError(t, err)
|
||||
res, _, panicErr = doFn(loadPart)
|
||||
require.NotNil(t, panicErr, "expecting a non-nil panic")
|
||||
require.Contains(t, panicErr.Error(), "unmarshal to tmproto.Part failed")
|
||||
|
||||
// 3. A good block serialized and saved to the DB should be retrievable
|
||||
pb1, err := part1.ToProto()
|
||||
require.NoError(t, err)
|
||||
err = db.Set(blockPartKey(height, index), mustEncode(pb1))
|
||||
require.NoError(t, err)
|
||||
gotPart, _, panicErr := doFn(loadPart)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved block should return a proper block")
|
||||
require.Equal(t, gotPart.(*types.Part), part1,
|
||||
"expecting successful retrieval of previously saved block")
|
||||
}
|
||||
|
||||
func TestPruneBlocks(t *testing.T) {
|
||||
cfg := config.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
db := dbm.NewMemDB()
|
||||
bs := NewBlockStore(db)
|
||||
assert.EqualValues(t, 0, bs.Base())
|
||||
assert.EqualValues(t, 0, bs.Height())
|
||||
assert.EqualValues(t, 0, bs.Size())
|
||||
|
||||
_, err = bs.PruneBlocks(0)
|
||||
require.Error(t, err)
|
||||
|
||||
// make more than 1000 blocks, to test batch deletions
|
||||
for h := int64(1); h <= 1500; h++ {
|
||||
block := factory.MakeBlock(state, h, new(types.Commit))
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(h, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
}
|
||||
|
||||
assert.EqualValues(t, 1, bs.Base())
|
||||
assert.EqualValues(t, 1500, bs.Height())
|
||||
assert.EqualValues(t, 1500, bs.Size())
|
||||
|
||||
prunedBlock := bs.LoadBlock(1199)
|
||||
|
||||
// Check that basic pruning works
|
||||
pruned, err := bs.PruneBlocks(1200)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 1199, pruned)
|
||||
assert.EqualValues(t, 1200, bs.Base())
|
||||
assert.EqualValues(t, 1500, bs.Height())
|
||||
assert.EqualValues(t, 301, bs.Size())
|
||||
|
||||
require.NotNil(t, bs.LoadBlock(1200))
|
||||
require.Nil(t, bs.LoadBlock(1199))
|
||||
require.Nil(t, bs.LoadBlockByHash(prunedBlock.Hash()))
|
||||
require.Nil(t, bs.LoadBlockCommit(1199))
|
||||
require.Nil(t, bs.LoadBlockMeta(1199))
|
||||
require.Nil(t, bs.LoadBlockPart(1199, 1))
|
||||
|
||||
for i := int64(1); i < 1200; i++ {
|
||||
require.Nil(t, bs.LoadBlock(i))
|
||||
}
|
||||
for i := int64(1200); i <= 1500; i++ {
|
||||
require.NotNil(t, bs.LoadBlock(i))
|
||||
}
|
||||
|
||||
// Pruning below the current base should not error
|
||||
_, err = bs.PruneBlocks(1199)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pruning to the current base should work
|
||||
pruned, err = bs.PruneBlocks(1200)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, pruned)
|
||||
|
||||
// Pruning again should work
|
||||
pruned, err = bs.PruneBlocks(1300)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 100, pruned)
|
||||
assert.EqualValues(t, 1300, bs.Base())
|
||||
|
||||
// Pruning beyond the current height should error
|
||||
_, err = bs.PruneBlocks(1501)
|
||||
require.Error(t, err)
|
||||
|
||||
// Pruning to the current height should work
|
||||
pruned, err = bs.PruneBlocks(1500)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 200, pruned)
|
||||
assert.Nil(t, bs.LoadBlock(1499))
|
||||
assert.NotNil(t, bs.LoadBlock(1500))
|
||||
assert.Nil(t, bs.LoadBlock(1501))
|
||||
}
|
||||
|
||||
func TestLoadBlockMeta(t *testing.T) {
|
||||
bs, db := freshBlockStore()
|
||||
height := int64(10)
|
||||
loadMeta := func() (interface{}, error) {
|
||||
meta := bs.LoadBlockMeta(height)
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Initially no contents.
|
||||
// 1. Requesting for a non-existent blockMeta shouldn't fail
|
||||
res, _, panicErr := doFn(loadMeta)
|
||||
require.Nil(t, panicErr, "a non-existent blockMeta shouldn't cause a panic")
|
||||
require.Nil(t, res, "a non-existent blockMeta should return nil")
|
||||
|
||||
// 2. Next save a corrupted blockMeta then try to load it
|
||||
err := db.Set(blockMetaKey(height), []byte("Tendermint-Meta"))
|
||||
require.NoError(t, err)
|
||||
res, _, panicErr = doFn(loadMeta)
|
||||
require.NotNil(t, panicErr, "expecting a non-nil panic")
|
||||
require.Contains(t, panicErr.Error(), "unmarshal to tmproto.BlockMeta")
|
||||
|
||||
// 3. A good blockMeta serialized and saved to the DB should be retrievable
|
||||
meta := &types.BlockMeta{Header: types.Header{
|
||||
Version: version.Consensus{
|
||||
Block: version.BlockProtocol, App: 0}, Height: 1, ProposerAddress: tmrand.Bytes(crypto.AddressSize)}}
|
||||
pbm := meta.ToProto()
|
||||
err = db.Set(blockMetaKey(height), mustEncode(pbm))
|
||||
require.NoError(t, err)
|
||||
gotMeta, _, panicErr := doFn(loadMeta)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ")
|
||||
pbmeta := meta.ToProto()
|
||||
if gmeta, ok := gotMeta.(*types.BlockMeta); ok {
|
||||
pbgotMeta := gmeta.ToProto()
|
||||
require.Equal(t, mustEncode(pbmeta), mustEncode(pbgotMeta),
|
||||
"expecting successful retrieval of previously saved blockMeta")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockFetchAtHeight(t *testing.T) {
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger())
|
||||
defer cleanup()
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit))
|
||||
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
blockAtHeight := bs.LoadBlock(bs.Height())
|
||||
b1, err := block.ToProto()
|
||||
require.NoError(t, err)
|
||||
b2, err := blockAtHeight.ToProto()
|
||||
require.NoError(t, err)
|
||||
bz1 := mustEncode(b1)
|
||||
bz2 := mustEncode(b2)
|
||||
require.Equal(t, bz1, bz2)
|
||||
require.Equal(t, block.Hash(), blockAtHeight.Hash(),
|
||||
"expecting a successful load of the last saved block")
|
||||
|
||||
blockAtHeightPlus1 := bs.LoadBlock(bs.Height() + 1)
|
||||
require.Nil(t, blockAtHeightPlus1, "expecting an unsuccessful load of Height()+1")
|
||||
blockAtHeightPlus2 := bs.LoadBlock(bs.Height() + 2)
|
||||
require.Nil(t, blockAtHeightPlus2, "expecting an unsuccessful load of Height()+2")
|
||||
}
|
||||
|
||||
func TestSeenAndCanonicalCommit(t *testing.T) {
|
||||
bs, _ := freshBlockStore()
|
||||
loadCommit := func() (interface{}, error) {
|
||||
meta := bs.LoadSeenCommit()
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Initially no contents.
|
||||
// 1. Requesting for a non-existent blockMeta shouldn't fail
|
||||
res, _, panicErr := doFn(loadCommit)
|
||||
require.Nil(t, panicErr, "a non-existent blockMeta shouldn't cause a panic")
|
||||
require.Nil(t, res, "a non-existent blockMeta should return nil")
|
||||
|
||||
// produce a few blocks and check that the correct seen and cannoncial commits
|
||||
// are persisted.
|
||||
for h := int64(3); h <= 5; h++ {
|
||||
blockCommit := makeTestCommit(h-1, tmtime.Now())
|
||||
block := factory.MakeBlock(state, h, blockCommit)
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(h, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
c3 := bs.LoadSeenCommit()
|
||||
require.NotNil(t, c3)
|
||||
require.Equal(t, h, c3.Height)
|
||||
require.Equal(t, seenCommit.Hash(), c3.Hash())
|
||||
c5 := bs.LoadBlockCommit(h)
|
||||
require.Nil(t, c5)
|
||||
c6 := bs.LoadBlockCommit(h - 1)
|
||||
require.Equal(t, blockCommit.Hash(), c6.Hash())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch e := r.(type) {
|
||||
case error:
|
||||
panicErr = e
|
||||
case string:
|
||||
panicErr = fmt.Errorf("%s", e)
|
||||
default:
|
||||
if st, ok := r.(fmt.Stringer); ok {
|
||||
panicErr = fmt.Errorf("%s", st)
|
||||
} else {
|
||||
panicErr = fmt.Errorf("%s", debug.Stack())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
res, err = fn()
|
||||
return res, err, panicErr
|
||||
}
|
||||
|
||||
func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block {
|
||||
return &types.Block{
|
||||
Header: hdr,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user