mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-07 13:55:17 +00:00
pubsub 2.0 (#3227)
* green pubsub tests :OK:
* get rid of clientToQueryMap
* Subscribe and SubscribeUnbuffered
* start adapting other pkgs to new pubsub
* nope
* rename MsgAndTags to Message
* remove TagMap
it does not bring any additional benefits
* bring back EventSubscriber
* fix test
* fix data race in TestStartNextHeightCorrectly
```
Write at 0x00c0001c7418 by goroutine 796:
github.com/tendermint/tendermint/consensus.TestStartNextHeightCorrectly()
/go/src/github.com/tendermint/tendermint/consensus/state_test.go:1296 +0xad
testing.tRunner()
/usr/local/go/src/testing/testing.go:827 +0x162
Previous read at 0x00c0001c7418 by goroutine 858:
github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote()
/go/src/github.com/tendermint/tendermint/consensus/state.go:1631 +0x1366
github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote()
/go/src/github.com/tendermint/tendermint/consensus/state.go:1476 +0x8f
github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg()
/go/src/github.com/tendermint/tendermint/consensus/state.go:667 +0xa1e
github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine()
/go/src/github.com/tendermint/tendermint/consensus/state.go:628 +0x794
Goroutine 796 (running) created at:
testing.(*T).Run()
/usr/local/go/src/testing/testing.go:878 +0x659
testing.runTests.func1()
/usr/local/go/src/testing/testing.go:1119 +0xa8
testing.tRunner()
/usr/local/go/src/testing/testing.go:827 +0x162
testing.runTests()
/usr/local/go/src/testing/testing.go:1117 +0x4ee
testing.(*M).Run()
/usr/local/go/src/testing/testing.go:1034 +0x2ee
main.main()
_testmain.go:214 +0x332
Goroutine 858 (running) created at:
github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines()
/go/src/github.com/tendermint/tendermint/consensus/state.go:334 +0x221
github.com/tendermint/tendermint/consensus.startTestRound()
/go/src/github.com/tendermint/tendermint/consensus/common_test.go:122 +0x63
github.com/tendermint/tendermint/consensus.TestStateFullRound1()
/go/src/github.com/tendermint/tendermint/consensus/state_test.go:255 +0x397
testing.tRunner()
/usr/local/go/src/testing/testing.go:827 +0x162
```
* fixes after my own review
* fix formatting
* wait 100ms before kicking a subscriber out
+ a test for indexer_service
* fixes after my second review
* no timeout
* add changelog entries
* fix merge conflicts
* fix typos after Thane's review
Co-Authored-By: melekes <anton.kalyaev@gmail.com>
* reformat code
* rewrite indexer service in the attempt to fix failing test
https://github.com/tendermint/tendermint/pull/3227/#issuecomment-462316527
* Revert "rewrite indexer service in the attempt to fix failing test"
This reverts commit 0d9107a098.
* another attempt to fix indexer
* fixes after Ethan's review
* use unbuffered channel when indexing transactions
Refs https://github.com/tendermint/tendermint/pull/3227#discussion_r258786716
* add a comment for EventBus#SubscribeUnbuffered
* format code
This commit is contained in:
committed by
Ethan Buchman
parent
67fd428354
commit
2137ecc130
@@ -46,7 +46,7 @@ func TestByzantine(t *testing.T) {
|
||||
switches[i].SetLogger(p2pLogger.With("validator", i))
|
||||
}
|
||||
|
||||
eventChans := make([]chan interface{}, N)
|
||||
blocksSubs := make([]types.Subscription, N)
|
||||
reactors := make([]p2p.Reactor, N)
|
||||
for i := 0; i < N; i++ {
|
||||
// make first val byzantine
|
||||
@@ -65,8 +65,8 @@ func TestByzantine(t *testing.T) {
|
||||
eventBus := css[i].eventBus
|
||||
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||
|
||||
eventChans[i] = make(chan interface{}, 1)
|
||||
err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
var err error
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
@@ -131,7 +131,7 @@ func TestByzantine(t *testing.T) {
|
||||
p2p.Connect2Switches(switches, ind1, ind2)
|
||||
|
||||
// wait for someone in the big partition (B) to make a block
|
||||
<-eventChans[ind2]
|
||||
<-blocksSubs[ind2].Out()
|
||||
|
||||
t.Log("A block has been committed. Healing partition")
|
||||
p2p.Connect2Switches(switches, ind0, ind1)
|
||||
@@ -143,7 +143,7 @@ func TestByzantine(t *testing.T) {
|
||||
wg.Add(2)
|
||||
for i := 1; i < N-1; i++ {
|
||||
go func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -227,24 +227,22 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
|
||||
cs.mtx.Unlock()
|
||||
}
|
||||
|
||||
// genesis
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
voteCh0 := make(chan interface{})
|
||||
err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0)
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
|
||||
votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
|
||||
}
|
||||
voteCh := make(chan interface{})
|
||||
ch := make(chan tmpubsub.Message)
|
||||
go func() {
|
||||
for v := range voteCh0 {
|
||||
vote := v.(types.EventDataVote)
|
||||
for msg := range votesSub.Out() {
|
||||
vote := msg.Data().(types.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
voteCh <- v
|
||||
ch <- msg
|
||||
}
|
||||
}
|
||||
}()
|
||||
return voteCh
|
||||
return ch
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
@@ -321,7 +319,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration,
|
||||
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
||||
errorMessage string) {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
@@ -331,28 +329,28 @@ func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNoNewEventOnChannel(ch <-chan interface{}) {
|
||||
func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
||||
ensureNoNewEvent(
|
||||
ch,
|
||||
ensureTimeout,
|
||||
"We should be stuck waiting, not receiving new event on the channel")
|
||||
}
|
||||
|
||||
func ensureNoNewRoundStep(stepCh <-chan interface{}) {
|
||||
func ensureNoNewRoundStep(stepCh <-chan tmpubsub.Message) {
|
||||
ensureNoNewEvent(
|
||||
stepCh,
|
||||
ensureTimeout,
|
||||
"We should be stuck waiting, not receiving NewRoundStep event")
|
||||
}
|
||||
|
||||
func ensureNoNewUnlock(unlockCh <-chan interface{}) {
|
||||
func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) {
|
||||
ensureNoNewEvent(
|
||||
unlockCh,
|
||||
ensureTimeout,
|
||||
"We should be stuck waiting, not receiving Unlock event")
|
||||
}
|
||||
|
||||
func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) {
|
||||
func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
ensureNoNewEvent(
|
||||
stepCh,
|
||||
@@ -360,142 +358,157 @@ func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) {
|
||||
"We should be stuck waiting, not receiving NewTimeout event")
|
||||
}
|
||||
|
||||
func ensureNewEvent(
|
||||
ch <-chan interface{},
|
||||
height int64,
|
||||
round int,
|
||||
timeout time.Duration,
|
||||
errorMessage string) {
|
||||
|
||||
func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int, timeout time.Duration, errorMessage string) {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
panic(errorMessage)
|
||||
case ev := <-ch:
|
||||
rs, ok := ev.(types.EventDataRoundState)
|
||||
case msg := <-ch:
|
||||
roundStateEvent, ok := msg.Data().(types.EventDataRoundState)
|
||||
if !ok {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataRoundState, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
if roundStateEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, roundStateEvent.Height))
|
||||
}
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
if roundStateEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, roundStateEvent.Round))
|
||||
}
|
||||
// TODO: We could check also for a step at this point!
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
|
||||
func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewRound event")
|
||||
case ev := <-roundCh:
|
||||
rs, ok := ev.(types.EventDataNewRound)
|
||||
case msg := <-roundCh:
|
||||
newRoundEvent, ok := msg.Data().(types.EventDataNewRound)
|
||||
if !ok {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataNewRound, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
if newRoundEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, newRoundEvent.Height))
|
||||
}
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
if newRoundEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, newRoundEvent.Round))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
|
||||
func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
||||
"Timeout expired while waiting for NewTimeout event")
|
||||
}
|
||||
|
||||
func ensureNewProposal(proposalCh <-chan interface{}, height int64, round int) {
|
||||
func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case ev := <-proposalCh:
|
||||
rs, ok := ev.(types.EventDataCompleteProposal)
|
||||
case msg := <-proposalCh:
|
||||
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataCompleteProposal, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
if proposalEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
|
||||
}
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
if proposalEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewValidBlock(validBlockCh <-chan interface{}, height int64, round int) {
|
||||
func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
ensureNewEvent(validBlockCh, height, round, ensureTimeout,
|
||||
"Timeout expired while waiting for NewValidBlock event")
|
||||
}
|
||||
|
||||
func ensureNewBlock(blockCh <-chan interface{}, height int64) {
|
||||
func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewBlock event")
|
||||
case ev := <-blockCh:
|
||||
block, ok := ev.(types.EventDataNewBlock)
|
||||
case msg := <-blockCh:
|
||||
blockEvent, ok := msg.Data().(types.EventDataNewBlock)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a *types.EventDataNewBlock, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(block)))
|
||||
panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if block.Block.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, block.Block.Height))
|
||||
if blockEvent.Block.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, blockEvent.Block.Height))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewBlockHeader(blockCh <-chan interface{}, height int64, blockHash cmn.HexBytes) {
|
||||
func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHash cmn.HexBytes) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewBlockHeader event")
|
||||
case ev := <-blockCh:
|
||||
blockHeader, ok := ev.(types.EventDataNewBlockHeader)
|
||||
case msg := <-blockCh:
|
||||
blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a *types.EventDataNewBlockHeader, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(blockHeader)))
|
||||
panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if blockHeader.Header.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeader.Header.Height))
|
||||
if blockHeaderEvent.Header.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeaderEvent.Header.Height))
|
||||
}
|
||||
if !bytes.Equal(blockHeader.Header.Hash(), blockHash) {
|
||||
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeader.Header.Hash()))
|
||||
if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) {
|
||||
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewUnlock(unlockCh <-chan interface{}, height int64, round int) {
|
||||
func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
ensureNewEvent(unlockCh, height, round, ensureTimeout,
|
||||
"Timeout expired while waiting for NewUnlock event")
|
||||
}
|
||||
|
||||
func ensureVote(voteCh <-chan interface{}, height int64, round int,
|
||||
func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int, propID types.BlockID) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case msg := <-proposalCh:
|
||||
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if proposalEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
|
||||
}
|
||||
if proposalEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
|
||||
}
|
||||
if !proposalEvent.BlockID.Equals(propID) {
|
||||
panic("Proposed block does not match expected block")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrecommitType)
|
||||
}
|
||||
|
||||
func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrevoteType)
|
||||
}
|
||||
|
||||
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int,
|
||||
voteType types.SignedMsgType) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewVote event")
|
||||
case v := <-voteCh:
|
||||
edv, ok := v.(types.EventDataVote)
|
||||
case msg := <-voteCh:
|
||||
voteEvent, ok := msg.Data().(types.EventDataVote)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a *types.Vote, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(v)))
|
||||
panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
vote := edv.Vote
|
||||
vote := voteEvent.Vote
|
||||
if vote.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
|
||||
}
|
||||
@@ -508,39 +521,7 @@ func ensureVote(voteCh <-chan interface{}, height int64, round int,
|
||||
}
|
||||
}
|
||||
|
||||
func ensureProposal(proposalCh <-chan interface{}, height int64, round int, propId types.BlockID) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case ev := <-proposalCh:
|
||||
rs, ok := ev.(types.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataCompleteProposal, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
}
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
}
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
}
|
||||
if !rs.BlockID.Equals(propId) {
|
||||
panic("Proposed block does not match expected block")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensurePrecommit(voteCh <-chan interface{}, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrecommitType)
|
||||
}
|
||||
|
||||
func ensurePrevote(voteCh <-chan interface{}, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrevoteType)
|
||||
}
|
||||
|
||||
func ensureNewEventOnChannel(ch <-chan interface{}) {
|
||||
func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for new activity on the channel")
|
||||
|
||||
@@ -117,9 +117,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
for nTxs := 0; nTxs < NTxs; {
|
||||
ticker := time.NewTicker(time.Second * 30)
|
||||
select {
|
||||
case b := <-newBlockCh:
|
||||
evt := b.(types.EventDataNewBlock)
|
||||
nTxs += int(evt.Block.Header.NumTxs)
|
||||
case msg := <-newBlockCh:
|
||||
blockEvent := msg.Data().(types.EventDataNewBlock)
|
||||
nTxs += int(blockEvent.Block.Header.NumTxs)
|
||||
case <-ticker.C:
|
||||
panic("Timed out waiting to commit blocks with transactions")
|
||||
}
|
||||
|
||||
@@ -30,9 +30,13 @@ import (
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*ConsensusReactor, []chan interface{}, []*types.EventBus) {
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
[]*ConsensusReactor,
|
||||
[]types.Subscription,
|
||||
[]*types.EventBus,
|
||||
) {
|
||||
reactors := make([]*ConsensusReactor, N)
|
||||
eventChans := make([]chan interface{}, N)
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, N)
|
||||
for i := 0; i < N; i++ {
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
@@ -44,9 +48,9 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*Consensus
|
||||
eventBuses[i] = css[i].eventBus
|
||||
reactors[i].SetEventBus(eventBuses[i])
|
||||
|
||||
eventChans[i] = make(chan interface{}, 1)
|
||||
err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
blocksSubs = append(blocksSubs, blocksSub)
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
@@ -63,7 +67,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*Consensus
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, 0)
|
||||
}
|
||||
return reactors, eventChans, eventBuses
|
||||
return reactors, blocksSubs, eventBuses
|
||||
}
|
||||
|
||||
func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuses []*types.EventBus) {
|
||||
@@ -84,11 +88,11 @@ func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
|
||||
@@ -161,20 +165,20 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
css[i] = cs
|
||||
}
|
||||
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block with no evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
blockI := <-eventChans[j]
|
||||
block := blockI.(types.EventDataNewBlock).Block
|
||||
msg := <-blocksSubs[j].Out()
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
assert.True(t, len(block.Evidence.Evidence) == 0)
|
||||
}, css)
|
||||
|
||||
// second block should have evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
blockI := <-eventChans[j]
|
||||
block := blockI.(types.EventDataNewBlock).Block
|
||||
msg := <-blocksSubs[j].Out()
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
assert.True(t, len(block.Evidence.Evidence) > 0)
|
||||
}, css)
|
||||
}
|
||||
@@ -221,7 +225,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// send a tx
|
||||
@@ -231,7 +235,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
|
||||
@@ -240,12 +244,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
|
||||
// Get peer
|
||||
@@ -265,7 +269,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
logger := log.TestingLogger()
|
||||
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
@@ -277,7 +281,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nVals, func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -288,10 +292,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -300,10 +304,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -312,10 +316,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -329,7 +333,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
defer cleanup()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
@@ -341,7 +345,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nPeers, func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -354,22 +358,22 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
// send newValTx to change vals in block 3
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 3.
|
||||
// it includes the commit for block 2, which is by the original validator set
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 4.
|
||||
// it includes the commit for block 3, which is by the original validator set
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
// the commits for block 4 should be with the updated validator set
|
||||
activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
|
||||
|
||||
// wait till everyone makes block 5
|
||||
// it includes the commit for block 4, which should have the updated validator set
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
@@ -379,10 +383,10 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -399,12 +403,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
|
||||
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing removing two validators at once")
|
||||
@@ -412,12 +416,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
delete(activeVals, string(newValidatorPubKey2.Address()))
|
||||
delete(activeVals, string(newValidatorPubKey3.Address()))
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
}
|
||||
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
@@ -430,23 +434,27 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
css[i].config.SkipTimeoutCommit = false
|
||||
}
|
||||
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N-1)
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N-1, func(j int) {
|
||||
<-eventChans[j]
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
|
||||
func waitForAndValidateBlock(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*ConsensusState,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
css[j].Logger.Debug("waitForAndValidateBlock")
|
||||
newBlockI, ok := <-eventChans[j]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
@@ -457,17 +465,21 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
|
||||
func waitForAndValidateBlockWithTx(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*ConsensusState,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
ntxs := 0
|
||||
BLOCK_TX_LOOP:
|
||||
for {
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
|
||||
newBlockI, ok := <-eventChans[j]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
@@ -488,18 +500,21 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) {
|
||||
func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
t *testing.T,
|
||||
n int,
|
||||
updatedVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*ConsensusState,
|
||||
) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
|
||||
var newBlock *types.Block
|
||||
LOOP:
|
||||
for {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
|
||||
newBlockI, ok := <-eventChans[j]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock = newBlockI.(types.EventDataNewBlock).Block
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock = msg.Data().(types.EventDataNewBlock).Block
|
||||
if newBlock.LastCommit.Size() == len(updatedVals) {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
|
||||
break LOOP
|
||||
|
||||
@@ -42,7 +42,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
// Unmarshal and apply a single message to the consensus state as if it were
|
||||
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running.
|
||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
|
||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
|
||||
// Skip meta messages which exist for demarcating boundaries.
|
||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
return nil
|
||||
@@ -54,15 +54,17 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
|
||||
cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
|
||||
// these are playback checks
|
||||
ticker := time.After(time.Second * 2)
|
||||
if newStepCh != nil {
|
||||
if newStepSub != nil {
|
||||
select {
|
||||
case mi := <-newStepCh:
|
||||
m2 := mi.(types.EventDataRoundState)
|
||||
case stepMsg := <-newStepSub.Out():
|
||||
m2 := stepMsg.Data().(types.EventDataRoundState)
|
||||
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
|
||||
return fmt.Errorf("RoundState mismatch. Got %v; Expected %v", m2, m)
|
||||
}
|
||||
case <-newStepSub.Cancelled():
|
||||
return fmt.Errorf("Failed to read off newStepSub.Out(). newStepSub was cancelled")
|
||||
case <-ticker:
|
||||
return fmt.Errorf("Failed to read off newStepCh")
|
||||
return fmt.Errorf("Failed to read off newStepSub.Out()")
|
||||
}
|
||||
}
|
||||
case msgInfo:
|
||||
|
||||
@@ -51,25 +51,13 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
cs.startForReplay()
|
||||
|
||||
// ensure all new step events are regenerated as expected
|
||||
newStepCh := make(chan interface{}, 1)
|
||||
|
||||
ctx := context.Background()
|
||||
err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
|
||||
newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
|
||||
}
|
||||
defer func() {
|
||||
// drain newStepCh to make sure we don't block
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-newStepCh:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
}()
|
||||
defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
|
||||
// just open the file for reading, no need to use wal
|
||||
fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
|
||||
@@ -94,7 +82,7 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
||||
if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -131,7 +119,7 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.S
|
||||
}
|
||||
|
||||
// go back count steps by resetting the state and running (pb.count - count) steps
|
||||
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
func (pb *playback) replayReset(count int, newStepSub types.Subscription) error {
|
||||
pb.cs.Stop()
|
||||
pb.cs.Wait()
|
||||
|
||||
@@ -161,7 +149,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
||||
if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil {
|
||||
return err
|
||||
}
|
||||
pb.count++
|
||||
@@ -225,27 +213,15 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
|
||||
ctx := context.Background()
|
||||
// ensure all new step events are regenerated as expected
|
||||
newStepCh := make(chan interface{}, 1)
|
||||
|
||||
err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
|
||||
newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
if err != nil {
|
||||
cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
|
||||
}
|
||||
defer func() {
|
||||
// drain newStepCh to make sure we don't block
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-newStepCh:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
}()
|
||||
defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
|
||||
if len(tokens) == 1 {
|
||||
if err := pb.replayReset(1, newStepCh); err != nil {
|
||||
if err := pb.replayReset(1, newStepSub); err != nil {
|
||||
pb.cs.Logger.Error("Replay reset error", "err", err)
|
||||
}
|
||||
} else {
|
||||
@@ -255,7 +231,7 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
} else if i > pb.count {
|
||||
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
|
||||
} else {
|
||||
if err := pb.replayReset(i, newStepCh); err != nil {
|
||||
if err := pb.replayReset(i, newStepSub); err != nil {
|
||||
pb.cs.Logger.Error("Replay reset error", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,13 +78,14 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *
|
||||
// in the WAL itself. Assuming the consensus state is running, replay of any
|
||||
// WAL, including the empty one, should eventually be followed by a new
|
||||
// block, or else something is wrong.
|
||||
newBlockCh := make(chan interface{}, 1)
|
||||
err = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh)
|
||||
newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-newBlockCh:
|
||||
case <-newBlockSub.Out():
|
||||
case <-newBlockSub.Cancelled():
|
||||
t.Fatal("newBlockSub was cancelled")
|
||||
case <-time.After(120 * time.Second):
|
||||
t.Fatalf("Timed out waiting for new block (see trace above)")
|
||||
t.Fatal("Timed out waiting for new block (see trace above)")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1620,11 +1620,10 @@ func TestStateOutputVoteStats(t *testing.T) {
|
||||
}
|
||||
|
||||
// subscribe subscribes test client to the given query and returns a channel with cap = 1.
|
||||
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} {
|
||||
out := make(chan interface{}, 1)
|
||||
err := eventBus.Subscribe(context.Background(), testSubscriber, q, out)
|
||||
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
|
||||
}
|
||||
return out
|
||||
return sub.Out()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user