Files
tendermint/blocksync/reactor_test.go
Sergio Mena 12f7e31610 Improved checks for vote extension signing in the unit tests (#9895)
* Added checks to vote signing. Need to refactor makevote and signaddvote

* Merged a number of similar versions of `makeVote`

* refactor `signAddVote` so `MakeVote` can reuse the checks
2022-12-20 11:53:45 +01:00

313 lines
8.4 KiB
Go

package blocksync
import (
"fmt"
"os"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/test"
"github.com/tendermint/tendermint/libs/log"
mpmocks "github.com/tendermint/tendermint/mempool/mocks"
"github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
var config *cfg.Config
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: test.DefaultTestChainID,
Validators: validators,
}, privValidators
}
type ReactorPair struct {
reactor *Reactor
app proxy.AppConns
}
func newReactor(
t *testing.T,
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) ReactorPair {
if len(privVals) != 1 {
panic("only support one validator")
}
app := abci.NewBaseApplication()
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics())
err := proxyApp.Start()
if err != nil {
panic(fmt.Errorf("error start app: %w", err))
}
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockStore := store.NewBlockStore(blockDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
mp := &mpmocks.Mempool{}
mp.On("Lock").Return()
mp.On("Unlock").Return()
mp.On("FlushAppConn", mock.Anything).Return(nil)
mp.On("Update",
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything).Return(nil)
// Make the Reactor itself.
// NOTE we have to create and commit the blocks first because
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mp, sm.EmptyEvidencePool{}, blockStore)
if err = stateStore.Save(state); err != nil {
panic(err)
}
// The commit we are building for the current height.
seenExtCommit := &types.ExtendedCommit{}
// let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
lastExtCommit := seenExtCommit.Clone()
thisBlock := state.MakeBlock(blockHeight, nil, lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address)
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
// Simulate a commit for the current height
pubKey, err := privVals[0].GetPubKey()
if err != nil {
panic(err)
}
addr := pubKey.Address()
idx, _ := state.Validators.GetByAddress(addr)
vote, err := types.MakeVote(
privVals[0],
thisBlock.Header.ChainID,
idx,
thisBlock.Header.Height,
0,
tmproto.PrecommitType,
blockID,
time.Now(),
)
if err != nil {
panic(err)
}
seenExtCommit = &types.ExtendedCommit{
Height: vote.Height,
Round: vote.Round,
BlockID: blockID,
ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()},
}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
if err != nil {
panic(fmt.Errorf("error apply block: %w", err))
}
blockStore.SaveBlockWithExtendedCommit(thisBlock, thisParts, seenExtCommit)
}
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics())
bcReactor.SetLogger(logger.With("module", "blocksync"))
return ReactorPair{bcReactor, proxyApp}
}
func TestNoBlockResponse(t *testing.T) {
config = test.ResetTestRoot("blocksync_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(65)
reactorPairs := make([]ReactorPair, 2)
reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
require.NoError(t, err)
err = r.app.Stop()
require.NoError(t, err)
}
}()
tests := []struct {
height int64
existent bool
}{
{maxBlockHeight + 2, false},
{10, true},
{1, true},
{100, false},
}
for {
if reactorPairs[1].reactor.pool.IsCaughtUp() {
break
}
time.Sleep(10 * time.Millisecond)
}
assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height())
for _, tt := range tests {
block := reactorPairs[1].reactor.store.LoadBlock(tt.height)
if tt.existent {
assert.True(t, block != nil)
} else {
assert.True(t, block == nil)
}
}
}
// NOTE: This is too hard to test without
// an easy way to add test peer to switch
// or without significant refactoring of the module.
// Alternatively we could actually dial a TCP conn but
// that seems extreme.
func TestBadBlockStopsPeer(t *testing.T) {
config = test.ResetTestRoot("blocksync_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(148)
// Other chain needs a different validator set
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
otherChain := newReactor(t, log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
defer func() {
err := otherChain.reactor.Stop()
require.Error(t, err)
err = otherChain.app.Stop()
require.NoError(t, err)
}()
reactorPairs := make([]ReactorPair, 4)
reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
reactorPairs[2] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
require.NoError(t, err)
err = r.app.Stop()
require.NoError(t, err)
}
}()
for {
time.Sleep(1 * time.Second)
caughtUp := true
for _, r := range reactorPairs {
if !r.reactor.pool.IsCaughtUp() {
caughtUp = false
}
}
if caughtUp {
break
}
}
// at this time, reactors[0-3] is the newest
assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
// Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data
// race, but can't be easily avoided.
reactorPairs[3].reactor.store = otherChain.reactor.store
lastReactorPair := newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
reactorPairs = append(reactorPairs, lastReactorPair)
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor)
return s
}, p2p.Connect2Switches)...)
for i := 0; i < len(reactorPairs)-1; i++ {
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
}
for {
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
break
}
time.Sleep(1 * time.Second)
}
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
}