Compare commits

...

23 Commits

Author SHA1 Message Date
William Banfield
48a4e2163d metrics: add metric for proposal timestamp difference 2022-01-12 18:51:58 -05:00
William Banfield
d9792cdb21 proto: rename timing params to synchrony params (#7554) 2022-01-12 12:02:01 -05:00
Anca Zamfir
2617a5cf33 Prevote nil if proposal is not timely (#7415)
* Prevote nil if not timely

* William's suggestion to get the proposal from the proposer instead of
generating it.

* Don't check rhs for genesis block

* Update IsTimely to match the specification

* Fix proposal tests

* Add more timely tests and check votes

* Mark proposal invalid in SetProposal, fix in the future test

* save proposal time on roundstate

* received -> receive

* always reset proposal time

* Add IsTimely test for genesis proposal

* Check timely before ValidateBlock

* Review comments from Daniel

Co-authored-by: William Banfield <wbanfield@gmail.com>
2022-01-11 11:44:07 +01:00
William Banfield
9ade45d81b consensus: check that proposal is non-nil before voting (#7480) 2021-12-22 11:09:57 -05:00
Anca Zamfir
e6d8b7c043 Fix pbts tests (#7413)
* Allow nil block ID check in ensureProposalWithTimout

* William's suggestion to get the proposal from the proposer instead of
generating it.

* Remove error check on service stop
2021-12-21 20:39:11 +01:00
Anca Zamfir
e8a37cefb2 Merge commit 'b03dced9a' into wb/proposer-based-timestamps 2021-12-18 02:27:36 +01:00
William Banfield
56a20056ec internal/consensus: prevote nil if proposal timestamp does not match (#7391)
This change updates the proposal logic to use the block's timestamp in the proposal message. It adds an additional piece of validation logic to the prevote step to check that the block's timestamp matches the proposal message's timestamp.
2021-12-15 14:46:55 -05:00
William Banfield
e4598b1de1 internal/consensus: remove proposal wait time (#7418) 2021-12-09 17:18:41 -05:00
Anca Zamfir
b03dced9af Fix compilation 2021-12-09 08:13:49 -05:00
Anca Zamfir
8f204cf5c7 Remove MedianTime, set block time to Now() (#7382)
* Remove MedianTime, set block time to Now()

* Fix goimports

* Fix import ordering
2021-12-09 05:01:27 +01:00
William Banfield
e91bac3565 internal/consensus: proposer waits for previous block time (#7376)
This change introduces the logic to have the proposer wait until the previous block time has passed before attempting to propose the next block.

The change achieves this by by adding a new clause into the enterPropose state machine method. The method now checks if the validator is the proposer and if the validator's clock is behind the previous block's time. If the validator's clock is behind the previous block time, it schedules a timeout to re-enter the enter propose method after enough time has passed.
2021-12-08 11:23:33 -05:00
William Banfield
a9b2bbd70d types: add new consensus params from proto (#7354)
This change adds the new TimingParams proto messages. These new messages were build using the wb/proposer-based-timestamps branch on the spec repo.
This change also adds validation that these values are positive when parsed and adds the new parameters into the existing tests.
2021-12-05 19:20:37 -05:00
William Banfield
a9aab99b41 internal/consensus: refactor ensure functions to use a common function (#7373)
* internal/consensus: refactor the common_test functions to use a single timeout function

* remove ensurePrecommit

* Update internal/consensus/common_test.go

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>

* join lines for fatal messages

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-12-02 19:09:02 -05:00
Sam Kleinman
884e4e99ca tools: remove tm-signer-harness (#7370) 2021-12-02 15:36:20 -05:00
William Banfield
591cc87669 types: remove accuracy from timestamp params (#7341) 2021-11-30 11:52:51 -05:00
William Banfield
1d68f340a6 consensus: ensure proposal receipt waits for maxWaitingTime (#7307)
* consensus: ensure proposal receipt waits for maxWaitingTime

* rebase fixups

* lint++

* lint++

* register result chan separately

* lint++
2021-11-30 09:45:36 -05:00
William Banfield
6be5efaaa9 consensus: add calculation for proposal step waits from pbts (#7290)
* initial proposerWaitsUntil implementation

* switch to duration for easier use with timeout scheduling

* add proposal step waiting time with tests

* minor aesthetic change to IsTimely

* minor language fix

* Update internal/consensus/state.go

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>

* reword comment

* change accuracy to precision

* move tests to separate pbts test file

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-11-24 18:47:34 -05:00
William Banfield
24c40b5b7b consensus: refactor the fake validator to take a clock source (#7300) 2021-11-24 18:46:46 -05:00
William Banfield
d4e712e4f1 factory: simplify validator and genesis factory functions (#7305) 2021-11-24 18:27:30 -05:00
William Banfield
15d6aefaf9 state: add an 'IsTimely' method to implement the 'timely' check for proposer-based timestamps (#7170)
* state: add an IsTimely function to implement the check for timely in proposer-based timestamps

* move time checks into block.go and add time source mechanism

* timestamp params comment

* add todo related to pbts spec and timestamp params

* remove old istimely

* switch to using built in before function

* lint++

* wip

* move into proposal and create a default set of params

* defer using default cons params for now
2021-11-24 17:59:25 -05:00
William Banfield
0c3401c5a4 consensus: update state to prevote nil when proposal block does not match locked block. (#6986)
* add failing test

* tweak comments in failing test

* failing test comment

* initial attempt at removing prevote locked block logic

* comment out broken function

* undo reset on prevotes

* fixing TestProposeValidBlock test

* update test for completed POL update

* comment updates

* further unlock testing

* update comments

* Update internal/consensus/state.go

* spacing nit

* comment cleanup

* nil check in addVote

* update unlock description

* update precommit on relock comment

* add ensure new timeout back

* rename IsZero to IsNil and replace uses of block len check with helper

* add testing.T to new assertions

* begin removing unlock condition

* fix TestStateProposerSelection2 to precommit for nil correctly

* remove erroneous sleep

* update TestStatePOL comment

* update relock test to be more clear

* add _ into test names

* rename slashing

* udpate no relock function to be cleaner

* do not relock on old proposal test cleanup

* con state name update

* remove all references to unlock

* update test comments to include new

* add relock test

* add ensureRelock to common_test

* remove all event unlock

* remove unlock checks

* no lint add space

* lint ++

* add test for nil prevote on different proposal

* fix prevote nil condition

* fix defaultDoPrevote

* state_test.go fixes to accomodate prevoting for nil

* add failing test for POL from previous round case

* update prevote logic to prevote POL from previous round

* state.go comment fixes

* update validatePrevotes to correctly look for nil

* update new test name and comment

* update POLFromPreviousRound test

* fixes post merge

* fix spacing

* make the linter happy

* change prevote log message

* update prevote nil debug line

* update enterPrevote comment

* lint

* Update internal/consensus/state.go

Co-authored-by: Dev Ojha <ValarDragon@users.noreply.github.com>

* Update internal/consensus/state.go

Co-authored-by: Dev Ojha <ValarDragon@users.noreply.github.com>

* add english description of alg rules

* Update internal/consensus/state.go

Co-authored-by: Dev Ojha <ValarDragon@users.noreply.github.com>

* comment fixes from review

* fix comment

* fix comment

Co-authored-by: Dev Ojha <ValarDragon@users.noreply.github.com>
2021-11-24 17:58:24 -05:00
William Banfield
d42addfe9c consensus: remove logic to unlock block on 2/3 prevote for nil (#6954) 2021-11-24 17:33:12 -05:00
William Banfield
765e1e313c consensus: remove panics from test helper functions (#6969) 2021-11-24 16:51:08 -05:00
58 changed files with 3220 additions and 2293 deletions

View File

@@ -41,6 +41,7 @@ Special thanks to external contributors on this release:
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield)
### IMPROVEMENTS

View File

@@ -569,6 +569,10 @@ var testGenesisFmt = `{
"max_gas": "-1",
"time_iota_ms": "10"
},
"synchrony": {
"message_delay": "500000000",
"precision": "10000000"
},
"evidence": {
"max_age_num_blocks": "100000",
"max_age_duration": "172800000000000",

View File

@@ -12,7 +12,6 @@ Tendermint has some tools that are associated with it for:
- [Debugging](./debugging/pro.md)
- [Benchmarking](#benchmarking)
- [Testnets](#testnets)
- [Validation of remote signers](./remote-signer-validation.md)
## Benchmarking

View File

@@ -1,156 +0,0 @@
# Remote Signer
Located under the `tools/tm-signer-harness` folder in the [Tendermint
repository](https://github.com/tendermint/tendermint).
The Tendermint remote signer test harness facilitates integration testing
between Tendermint and remote signers such as
[tkkms](https://github.com/iqlusioninc/tmkms). Such remote signers allow for signing
of important Tendermint messages using
[HSMs](https://en.wikipedia.org/wiki/Hardware_security_module), providing
additional security.
When executed, `tm-signer-harness`:
1. Runs a listener (either TCP or Unix sockets).
2. Waits for a connection from the remote signer.
3. Upon connection from the remote signer, executes a number of automated tests
to ensure compatibility.
4. Upon successful validation, the harness process exits with a 0 exit code.
Upon validation failure, it exits with a particular exit code related to the
error.
## Prerequisites
Requires the same prerequisites as for building
[Tendermint](https://github.com/tendermint/tendermint).
## Building
From the `tools/tm-signer-harness` directory in your Tendermint source
repository, simply run:
```bash
make
# To have global access to this executable
make install
```
## Docker Image
To build a Docker image containing the `tm-signer-harness`, also from the
`tools/tm-signer-harness` directory of your Tendermint source repo, simply run:
```bash
make docker-image
```
## Running against KMS
As an example of how to use `tm-signer-harness`, the following instructions show
you how to execute its tests against [tkkms](https://github.com/iqlusioninc/tmkms).
For this example, we will make use of the **software signing module in KMS**, as
the hardware signing module requires a physical
[YubiHSM](https://www.yubico.com/products/yubihsm/) device.
### Step 1: Install KMS on your local machine
See the [tkkms repo](https://github.com/iqlusioninc/tmkms) for details on how to set
KMS up on your local machine.
If you have [Rust](https://www.rust-lang.org/) installed on your local machine,
you can simply install KMS by:
```bash
cargo install tmkms
```
### Step 2: Make keys for KMS
The KMS software signing module needs a key with which to sign messages. In our
example, we will simply export a signing key from our local Tendermint instance.
```bash
# Will generate all necessary Tendermint configuration files, including:
# - ~/.tendermint/config/priv_validator_key.json
# - ~/.tendermint/data/priv_validator_state.json
tendermint init validator
# Extract the signing key from our local Tendermint instance
tm-signer-harness extract_key \ # Use the "extract_key" command
-tmhome ~/.tendermint \ # Where to find the Tendermint home directory
-output ./signing.key # Where to write the key
```
Also, because we want KMS to connect to `tm-signer-harness`, we will need to
provide a secret connection key from KMS' side:
```bash
tmkms keygen secret_connection.key
```
### Step 3: Configure and run KMS
KMS needs some configuration to tell it to use the softer signing module as well
as the `signing.key` file we just generated. Save the following to a file called
`tmkms.toml`:
```toml
[[validator]]
addr = "tcp://127.0.0.1:61219" # This is where we will find tm-signer-harness.
chain_id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (found in ~/.tendermint/config/genesis.json).
reconnect = true # true is the default
secret_key = "./secret_connection.key" # Where to find our secret connection key.
[[providers.softsign]]
id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (same as validator.chain_id above).
path = "./signing.key" # The signing key we extracted earlier.
```
Then run KMS with this configuration:
```bash
tmkms start -c tmkms.toml
```
This will start KMS, which will repeatedly try to connect to
`tcp://127.0.0.1:61219` until it is successful.
### Step 4: Run tm-signer-harness
Now we get to run the signer test harness:
```bash
tm-signer-harness run \ # The "run" command executes the tests
-addr tcp://127.0.0.1:61219 \ # The address we promised KMS earlier
-tmhome ~/.tendermint # Where to find our Tendermint configuration/data files.
```
If the current version of Tendermint and KMS are compatible, `tm-signer-harness`
should now exit with a 0 exit code. If they are somehow not compatible, it
should exit with a meaningful non-zero exit code (see the exit codes below).
### Step 5: Shut down KMS
Simply hit Ctrl+Break on your KMS instance (or use the `kill` command in Linux)
to terminate it gracefully.
## Exit Code Meanings
The following list shows the various exit codes from `tm-signer-harness` and
their meanings:
| Exit Code | Description |
| --- | --- |
| 0 | Success! |
| 1 | Invalid command line parameters supplied to `tm-signer-harness` |
| 2 | Maximum number of accept retries reached (the `-accept-retries` parameter) |
| 3 | Failed to load `${TMHOME}/config/genesis.json` |
| 4 | Failed to create listener specified by `-addr` parameter |
| 5 | Failed to start listener |
| 6 | Interrupted by `SIGINT` (e.g. when hitting Ctrl+Break or Ctrl+C) |
| 7 | Other unknown error |
| 8 | Test 1 failed: public key mismatch |
| 9 | Test 2 failed: signing of proposals failed |
| 10 | Test 3 failed: signing of votes failed |

View File

@@ -200,7 +200,8 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
valSet, privVals := factory.ValidatorSet(1, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
maxBlockHeight := int64(64)
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -239,7 +240,8 @@ func TestReactor_SyncTime(t *testing.T) {
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
valSet, privVals := factory.ValidatorSet(1, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
maxBlockHeight := int64(101)
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -264,10 +266,10 @@ func TestReactor_NoBlockResponse(t *testing.T) {
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
valSet, privVals := factory.ValidatorSet(1, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
maxBlockHeight := int64(65)
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -319,7 +321,8 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
defer os.RemoveAll(cfg.RootDir)
maxBlockHeight := int64(48)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
valSet, privVals := factory.ValidatorSet(1, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
@@ -353,7 +356,8 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
//
// XXX: This causes a potential race condition.
// See: https://github.com/tendermint/tendermint/issues/6005
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30)
valSet, otherPrivVals := factory.ValidatorSet(1, 30)
otherGenDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
newNode := rts.network.MakeNode(ctx, t, p2ptest.NodeOptions{
MaxPeers: uint16(len(rts.nodes) + 1),
MaxConnected: uint16(len(rts.nodes) + 1),

View File

@@ -7,6 +7,7 @@ import (
"path"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -42,7 +43,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
tickerFunc := newMockTickerFunc(true)
appFunc := newKVStore
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30)
valSet, privVals := factory.ValidatorSet(nValidators, 30)
genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil)
states := make([]*State, nValidators)
for i := 0; i < nValidators; i++ {
@@ -59,7 +61,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
@@ -212,7 +214,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
// Make proposal
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID)
proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID, block.Header.Time)
p := proposal.ToProto()
if err := lazyNodeState.privValidator.SignProposal(ctx, lazyNodeState.state.ChainID, p); err == nil {
proposal.Signature = p.Signature

View File

@@ -3,6 +3,7 @@ package consensus
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
@@ -73,9 +74,10 @@ func configSetup(t *testing.T) *config.Config {
return cfg
}
func ensureDir(dir string, mode os.FileMode) {
func ensureDir(t *testing.T, dir string, mode os.FileMode) {
t.Helper()
if err := tmos.EnsureDir(dir, mode); err != nil {
panic(err)
t.Fatalf("error opening directory: %s", err)
}
}
@@ -90,6 +92,7 @@ type validatorStub struct {
Index int32 // Validator index. NOTE: we don't assume validator set changes.
Height int64
Round int32
clock tmtime.Source
types.PrivValidator
VotingPower int64
lastVote *types.Vote
@@ -102,16 +105,15 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida
Index: valIndex,
PrivValidator: privValidator,
VotingPower: testMinPower,
clock: tmtime.DefaultSource{},
}
}
func (vs *validatorStub) signVote(
ctx context.Context,
cfg *config.Config,
voteType tmproto.SignedMsgType,
hash []byte,
header types.PartSetHeader,
) (*types.Vote, error) {
chainID string,
blockID types.BlockID) (*types.Vote, error) {
pubKey, err := vs.PrivValidator.GetPubKey(ctx)
if err != nil {
@@ -123,12 +125,12 @@ func (vs *validatorStub) signVote(
ValidatorAddress: pubKey.Address(),
Height: vs.Height,
Round: vs.Round,
Timestamp: tmtime.Now(),
Timestamp: vs.clock.Now(),
Type: voteType,
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
BlockID: blockID,
}
v := vote.ToProto()
if err := vs.PrivValidator.SignVote(ctx, cfg.ChainID(), v); err != nil {
if err := vs.PrivValidator.SignVote(ctx, chainID, v); err != nil {
return nil, fmt.Errorf("sign vote failed: %w", err)
}
@@ -148,13 +150,11 @@ func (vs *validatorStub) signVote(
func signVote(
ctx context.Context,
vs *validatorStub,
cfg *config.Config,
voteType tmproto.SignedMsgType,
hash []byte,
header types.PartSetHeader,
) *types.Vote {
chainID string,
blockID types.BlockID) *types.Vote {
v, err := vs.signVote(ctx, cfg, voteType, hash, header)
v, err := vs.signVote(ctx, voteType, chainID, blockID)
if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err))
}
@@ -166,14 +166,13 @@ func signVote(
func signVotes(
ctx context.Context,
cfg *config.Config,
voteType tmproto.SignedMsgType,
hash []byte,
header types.PartSetHeader,
chainID string,
blockID types.BlockID,
vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss))
for i, vs := range vss {
votes[i] = signVote(ctx, vs, cfg, voteType, hash, header)
votes[i] = signVote(ctx, vs, voteType, chainID, blockID)
}
return votes
}
@@ -231,26 +230,28 @@ func startTestRound(ctx context.Context, cs *State, height int64, round int32) {
// Create proposal block from cs1 but sign it with vs.
func decideProposal(
ctx context.Context,
t *testing.T,
cs1 *State,
vs *validatorStub,
height int64,
round int32,
) (proposal *types.Proposal, block *types.Block) {
t.Helper()
cs1.mtx.Lock()
block, blockParts := cs1.createProposalBlock()
validRound := cs1.ValidRound
chainID := cs1.state.ChainID
cs1.mtx.Unlock()
if block == nil {
panic("Failed to createProposalBlock. Did you forget to add commit for previous block?")
t.Fatal("Failed to createProposalBlock. Did you forget to add commit for previous block?")
}
// Make proposal
polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
proposal = types.NewProposal(height, round, polRound, propBlockID)
proposal = types.NewProposal(height, round, polRound, propBlockID, block.Header.Time)
p := proposal.ToProto()
if err := vs.SignProposal(ctx, chainID, p); err != nil {
panic(err)
t.Fatalf("error signing proposal: %s", err)
}
proposal.Signature = p.Signature
@@ -266,54 +267,49 @@ func addVotes(to *State, votes ...*types.Vote) {
func signAddVotes(
ctx context.Context,
cfg *config.Config,
to *State,
voteType tmproto.SignedMsgType,
hash []byte,
header types.PartSetHeader,
chainID string,
blockID types.BlockID,
vss ...*validatorStub,
) {
addVotes(to, signVotes(ctx, cfg, voteType, hash, header, vss...)...)
addVotes(to, signVotes(ctx, voteType, chainID, blockID, vss...)...)
}
func validatePrevote(
ctx context.Context,
t *testing.T,
cs *State,
round int32,
privVal *validatorStub,
blockHash []byte,
) {
// nolint: lll
func validatePrevote(ctx context.Context, t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) {
t.Helper()
prevotes := cs.Votes.Prevotes(round)
pubKey, err := privVal.GetPubKey(ctx)
require.NoError(t, err)
address := pubKey.Address()
var vote *types.Vote
if vote = prevotes.GetByAddress(address); vote == nil {
panic("Failed to find prevote from validator")
t.Fatalf("Failed to find prevote from validator")
}
if blockHash == nil {
if vote.BlockID.Hash != nil {
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash))
t.Fatalf("Expected prevote to be for nil, got %X", vote.BlockID.Hash)
}
} else {
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash))
t.Fatalf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash)
}
}
}
func validateLastPrecommit(ctx context.Context, t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) {
t.Helper()
votes := cs.LastCommit
pv, err := privVal.GetPubKey(ctx)
require.NoError(t, err)
address := pv.Address()
var vote *types.Vote
if vote = votes.GetByAddress(address); vote == nil {
panic("Failed to find precommit from validator")
t.Fatalf("Failed to find precommit from validator")
}
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash))
t.Fatalf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash)
}
}
@@ -327,41 +323,42 @@ func validatePrecommit(
votedBlockHash,
lockedBlockHash []byte,
) {
t.Helper()
precommits := cs.Votes.Precommits(thisRound)
pv, err := privVal.GetPubKey(ctx)
require.NoError(t, err)
address := pv.Address()
var vote *types.Vote
if vote = precommits.GetByAddress(address); vote == nil {
panic("Failed to find precommit from validator")
t.Fatalf("Failed to find precommit from validator")
}
if votedBlockHash == nil {
if vote.BlockID.Hash != nil {
panic("Expected precommit to be for nil")
t.Fatalf("Expected precommit to be for nil")
}
} else {
if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) {
panic("Expected precommit to be for proposal block")
t.Fatalf("Expected precommit to be for proposal block")
}
}
if lockedBlockHash == nil {
if cs.LockedRound != lockRound || cs.LockedBlock != nil {
panic(fmt.Sprintf(
t.Fatalf(
"Expected to be locked on nil at round %d. Got locked at round %d with block %v",
lockRound,
cs.LockedRound,
cs.LockedBlock))
cs.LockedBlock)
}
} else {
if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) {
panic(fmt.Sprintf(
t.Fatalf(
"Expected block to be locked on round %d, got %d. Got locked block %X, expected %X",
lockRound,
cs.LockedRound,
cs.LockedBlock.Hash(),
lockedBlockHash))
lockedBlockHash)
}
}
}
@@ -376,6 +373,7 @@ func validatePrevoteAndPrecommit(
votedBlockHash,
lockedBlockHash []byte,
) {
t.Helper()
// verify the prevote
validatePrevote(ctx, t, cs, thisRound, privVal, votedBlockHash)
// verify precommit
@@ -401,6 +399,35 @@ func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte)
return ch
}
func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message {
t.Helper()
votesSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
ClientID: testSubscriber,
Query: types.EventQueryVote,
Limit: 10})
if err != nil {
t.Fatalf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)
}
ch := make(chan tmpubsub.Message, 10)
go func() {
for {
msg, err := votesSub.Next(ctx)
if err != nil {
if !errors.Is(err, tmpubsub.ErrTerminated) && !errors.Is(err, context.Canceled) {
t.Errorf("error terminating pubsub %s", err)
}
return
}
vote := msg.Data().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
ch <- msg
}
}
}()
return ch
}
//-------------------------------------------------------------------------------
// consensus states
@@ -479,26 +506,25 @@ func newStateWithConfigAndBlockStore(
return cs
}
func loadPrivValidator(cfg *config.Config) *privval.FilePV {
privValidatorKeyFile := cfg.PrivValidator.KeyFile()
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
privValidatorStateFile := cfg.PrivValidator.StateFile()
func loadPrivValidator(t *testing.T, config *config.Config) *privval.FilePV {
t.Helper()
privValidatorKeyFile := config.PrivValidator.KeyFile()
ensureDir(t, filepath.Dir(privValidatorKeyFile), 0700)
privValidatorStateFile := config.PrivValidator.StateFile()
privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
if err != nil {
panic(err)
t.Fatalf("error generating validator file: %s", err)
}
privValidator.Reset()
return privValidator
}
func randState(
ctx context.Context,
cfg *config.Config,
logger log.Logger,
nValidators int,
) (*State, []*validatorStub, error) {
// nolint: lll
func makeState(ctx context.Context, cfg *config.Config, logger log.Logger, nValidators int) (*State, []*validatorStub, error) {
// Get State
state, privVals := randGenesisState(cfg, nValidators, false, 10)
state, privVals := makeGenesisState(cfg, genesisStateArgs{
Validators: nValidators,
})
vss := make([]*validatorStub, nValidators)
@@ -518,222 +544,211 @@ func randState(
//-------------------------------------------------------------------------------
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
func ensureNoMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, timeout time.Duration,
errorMessage string) {
t.Helper()
select {
case <-time.After(timeout):
break
case <-ch:
panic(errorMessage)
t.Fatalf("unexpected event: %s", errorMessage)
}
}
func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) {
ensureNoNewEvent(
func ensureNoNewEventOnChannel(t *testing.T, ch <-chan tmpubsub.Message) {
t.Helper()
ensureNoMessageBeforeTimeout(
t,
ch,
ensureTimeout,
"We should be stuck waiting, not receiving new event on the channel")
}
func ensureNoNewRoundStep(stepCh <-chan tmpubsub.Message) {
ensureNoNewEvent(
func ensureNoNewRoundStep(t *testing.T, stepCh <-chan tmpubsub.Message) {
t.Helper()
ensureNoMessageBeforeTimeout(
t,
stepCh,
ensureTimeout,
"We should be stuck waiting, not receiving NewRoundStep event")
}
func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) {
ensureNoNewEvent(
unlockCh,
ensureTimeout,
"We should be stuck waiting, not receiving Unlock event")
}
func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
func ensureNoNewTimeout(t *testing.T, stepCh <-chan tmpubsub.Message, timeout int64) {
t.Helper()
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
ensureNoNewEvent(
ensureNoMessageBeforeTimeout(
t,
stepCh,
timeoutDuration,
"We should be stuck waiting, not receiving NewTimeout event")
}
func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration, errorMessage string) {
select {
case <-time.After(timeout):
panic(errorMessage)
case msg := <-ch:
roundStateEvent, ok := msg.Data().(types.EventDataRoundState)
if !ok {
panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?",
msg.Data()))
}
if roundStateEvent.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, roundStateEvent.Height))
}
if roundStateEvent.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, roundStateEvent.Round))
}
// TODO: We could check also for a step at this point!
func ensureNewEvent(t *testing.T, ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration, errorMessage string) { // nolint: lll
t.Helper()
msg := ensureMessageBeforeTimeout(t, ch, ensureTimeout)
roundStateEvent, ok := msg.Data().(types.EventDataRoundState)
if !ok {
t.Fatalf("expected a EventDataRoundState, got %T. Wrong subscription channel?", msg.Data())
}
if roundStateEvent.Height != height {
t.Fatalf("expected height %v, got %v", height, roundStateEvent.Height)
}
if roundStateEvent.Round != round {
t.Fatalf("expected round %v, got %v", round, roundStateEvent.Round)
}
// TODO: We could check also for a step at this point!
}
func ensureNewRound(t *testing.T, roundCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
msg := ensureMessageBeforeTimeout(t, roundCh, ensureTimeout)
newRoundEvent, ok := msg.Data().(types.EventDataNewRound)
if !ok {
t.Fatalf("expected a EventDataNewRound, got %T. Wrong subscription channel?", msg.Data())
}
if newRoundEvent.Height != height {
t.Fatalf("expected height %v, got %v", height, newRoundEvent.Height)
}
if newRoundEvent.Round != round {
t.Fatalf("expected round %v, got %v", round, newRoundEvent.Round)
}
}
func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int32) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewRound event")
case msg := <-roundCh:
newRoundEvent, ok := msg.Data().(types.EventDataNewRound)
if !ok {
panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?",
msg.Data()))
}
if newRoundEvent.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, newRoundEvent.Height))
}
if newRoundEvent.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, newRoundEvent.Round))
}
}
}
func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) {
func ensureNewTimeout(t *testing.T, timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) {
t.Helper()
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
ensureNewEvent(t, timeoutCh, height, round, timeoutDuration,
"Timeout expired while waiting for NewTimeout event")
}
func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewProposal event")
case msg := <-proposalCh:
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
if !ok {
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
msg.Data()))
}
if proposalEvent.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
}
if proposalEvent.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
}
func ensureNewProposal(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
msg := ensureMessageBeforeTimeout(t, proposalCh, ensureTimeout)
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
if !ok {
t.Fatalf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", msg.Data())
}
if proposalEvent.Height != height {
t.Fatalf("expected height %v, got %v", height, proposalEvent.Height)
}
if proposalEvent.Round != round {
t.Fatalf("expected round %v, got %v", round, proposalEvent.Round)
}
}
func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int32) {
ensureNewEvent(validBlockCh, height, round, ensureTimeout,
func ensureNewValidBlock(t *testing.T, validBlockCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
ensureNewEvent(t, validBlockCh, height, round, ensureTimeout,
"Timeout expired while waiting for NewValidBlock event")
}
func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewBlock event")
case msg := <-blockCh:
blockEvent, ok := msg.Data().(types.EventDataNewBlock)
if !ok {
panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?",
msg.Data()))
}
if blockEvent.Block.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, blockEvent.Block.Height))
}
func ensureNewBlock(t *testing.T, blockCh <-chan tmpubsub.Message, height int64) {
t.Helper()
msg := ensureMessageBeforeTimeout(t, blockCh, ensureTimeout)
blockEvent, ok := msg.Data().(types.EventDataNewBlock)
if !ok {
t.Fatalf("expected a EventDataNewBlock, got %T. Wrong subscription channel?", msg.Data())
}
if blockEvent.Block.Height != height {
t.Fatalf("expected height %v, got %v", height, blockEvent.Block.Height)
}
}
func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHash tmbytes.HexBytes) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewBlockHeader event")
case msg := <-blockCh:
blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader)
if !ok {
panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?",
msg.Data()))
}
if blockHeaderEvent.Header.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeaderEvent.Header.Height))
}
if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) {
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash()))
}
func ensureNewBlockHeader(t *testing.T, blockCh <-chan tmpubsub.Message, height int64, blockHash tmbytes.HexBytes) {
t.Helper()
msg := ensureMessageBeforeTimeout(t, blockCh, ensureTimeout)
blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader)
if !ok {
t.Fatalf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?", msg.Data())
}
if blockHeaderEvent.Header.Height != height {
t.Fatalf("expected height %v, got %v", height, blockHeaderEvent.Header.Height)
}
if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) {
t.Fatalf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash())
}
}
func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int32) {
ensureNewEvent(unlockCh, height, round, ensureTimeout,
"Timeout expired while waiting for NewUnlock event")
func ensureLock(t *testing.T, lockCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
ensureNewEvent(t, lockCh, height, round, ensureTimeout,
"Timeout expired while waiting for LockValue event")
}
func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewProposal event")
case msg := <-proposalCh:
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
if !ok {
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
msg.Data()))
}
if proposalEvent.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
}
if proposalEvent.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
}
if !proposalEvent.BlockID.Equals(propID) {
panic(fmt.Sprintf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID))
}
func ensureRelock(t *testing.T, relockCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
ensureNewEvent(t, relockCh, height, round, ensureTimeout,
"Timeout expired while waiting for RelockValue event")
}
func ensureProposal(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) {
ensureProposalWithTimeout(t, proposalCh, height, round, &propID, ensureTimeout)
}
// nolint: lll
func ensureProposalWithTimeout(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32, propID *types.BlockID, timeout time.Duration) {
t.Helper()
msg := ensureMessageBeforeTimeout(t, proposalCh, timeout)
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
if !ok {
t.Fatalf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
msg.Data())
}
if proposalEvent.Height != height {
t.Fatalf("expected height %v, got %v", height, proposalEvent.Height)
}
if proposalEvent.Round != round {
t.Fatalf("expected round %v, got %v", round, proposalEvent.Round)
}
if propID != nil && !proposalEvent.BlockID.Equals(*propID) {
t.Fatalf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, *propID)
}
}
func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int32) {
ensureVote(voteCh, height, round, tmproto.PrecommitType)
func ensurePrecommit(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
ensureVote(t, voteCh, height, round, tmproto.PrecommitType)
}
func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) {
ensureVote(voteCh, height, round, tmproto.PrevoteType)
func ensurePrevote(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32) {
t.Helper()
ensureVote(t, voteCh, height, round, tmproto.PrevoteType)
}
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
func ensureVote(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32,
voteType tmproto.SignedMsgType) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewVote event")
case msg := <-voteCh:
voteEvent, ok := msg.Data().(types.EventDataVote)
if !ok {
panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?",
msg.Data()))
}
vote := voteEvent.Vote
if vote.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
}
if vote.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
}
if vote.Type != voteType {
panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type))
}
t.Helper()
msg := ensureMessageBeforeTimeout(t, voteCh, ensureTimeout)
voteEvent, ok := msg.Data().(types.EventDataVote)
if !ok {
t.Fatalf("expected a EventDataVote, got %T. Wrong subscription channel?", msg.Data())
}
vote := voteEvent.Vote
if vote.Height != height {
t.Fatalf("expected height %v, got %v", height, vote.Height)
}
if vote.Round != round {
t.Fatalf("expected round %v, got %v", round, vote.Round)
}
if vote.Type != voteType {
t.Fatalf("expected type %v, got %v", voteType, vote.Type)
}
}
func ensurePrecommitTimeout(ch <-chan tmpubsub.Message) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for the Precommit to Timeout")
case <-ch:
}
func ensureNewEventOnChannel(t *testing.T, ch <-chan tmpubsub.Message) {
t.Helper()
ensureMessageBeforeTimeout(t, ch, ensureTimeout)
}
func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) {
func ensureMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, to time.Duration) tmpubsub.Message {
t.Helper()
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for new activity on the channel")
case <-ch:
case <-time.After(to):
t.Fatalf("Timeout expired while waiting for message")
case msg := <-ch:
return msg
}
panic("unreachable")
}
//-------------------------------------------------------------------------------
@@ -745,7 +760,7 @@ func consensusLogger() log.Logger {
return log.TestingLogger().With("module", "consensus")
}
func randConsensusState(
func makeConsensusState(
ctx context.Context,
t *testing.T,
cfg *config.Config,
@@ -755,8 +770,10 @@ func randConsensusState(
appFunc func() abci.Application,
configOpts ...func(*config.Config),
) ([]*State, cleanupFunc) {
t.Helper()
genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, 30)
valSet, privVals := factory.ValidatorSet(nValidators, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
css := make([]*State, nValidators)
logger := consensusLogger()
@@ -777,7 +794,7 @@ func randConsensusState(
opt(thisConfig)
}
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
@@ -806,6 +823,7 @@ func randConsensusState(
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(
ctx context.Context,
t *testing.T,
cfg *config.Config,
nValidators,
nPeers int,
@@ -813,8 +831,10 @@ func randConsensusNetWithPeers(
tickerFunc func() TimeoutTicker,
appFunc func(string) abci.Application,
) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) {
genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, testMinPower)
valSet, privVals := factory.ValidatorSet(nValidators, testMinPower)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
css := make([]*State, nPeers)
t.Helper()
logger := consensusLogger()
var peer0Config *config.Config
@@ -827,7 +847,7 @@ func randConsensusNetWithPeers(
}
configRootDirs = append(configRootDirs, thisConfig.RootDir)
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
if i == 0 {
peer0Config = thisConfig
}
@@ -837,16 +857,16 @@ func randConsensusNetWithPeers(
} else {
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
if err != nil {
panic(err)
t.Fatalf("error creating temp file for validator key: %s", err)
}
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
if err != nil {
panic(err)
t.Fatalf("error loading validator state: %s", err)
}
privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "")
if err != nil {
panic(err)
t.Fatalf("error generating validator key: %s", err)
}
}
@@ -869,13 +889,28 @@ func randConsensusNetWithPeers(
}
}
func randGenesisState(
cfg *config.Config,
numValidators int,
randPower bool,
minPower int64) (sm.State, []types.PrivValidator) {
type genesisStateArgs struct {
Validators int
Power int64
Params *types.ConsensusParams
Time time.Time
}
genDoc, privValidators := factory.RandGenesisDoc(cfg, numValidators, randPower, minPower)
func makeGenesisState(cfg *config.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) {
if args.Power == 0 {
args.Power = 1
}
if args.Validators == 0 {
args.Power = 4
}
valSet, privValidators := factory.ValidatorSet(args.Validators, args.Power)
if args.Params == nil {
args.Params = types.DefaultConsensusParams()
}
if args.Time.IsZero() {
args.Time = time.Now()
}
genDoc := factory.GenesisDoc(cfg, args.Time, valSet.Validators, args.Params)
s0, _ := sm.MakeGenesisState(genDoc)
return s0, privValidators
}

View File

@@ -23,7 +23,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(ctx, t,
states, cleanup := makeConsensusState(ctx, t,
config, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore)
t.Cleanup(cleanup)

View File

@@ -37,19 +37,21 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(baseConfig, 1, false, 10)
state, privVals := makeGenesisState(baseConfig, genesisStateArgs{
Validators: 1,
Power: 10})
cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
assertMempool(cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
startTestRound(ctx, cs, height, round)
ensureNewEventOnChannel(newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(newBlockCh)
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(t, newBlockCh)
deliverTxsRange(ctx, cs, 0, 1)
ensureNewEventOnChannel(newBlockCh) // commit txs
ensureNewEventOnChannel(newBlockCh) // commit updated app hash
ensureNoNewEventOnChannel(newBlockCh)
ensureNewEventOnChannel(t, newBlockCh) // commit txs
ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash
ensureNoNewEventOnChannel(t, newBlockCh)
}
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
@@ -62,7 +64,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
state, privVals := randGenesisState(baseConfig, 1, false, 10)
state, privVals := makeGenesisState(baseConfig, genesisStateArgs{
Validators: 1,
Power: 10})
cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
assertMempool(cs.txNotifier).EnableTxsAvailable()
@@ -70,9 +74,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
startTestRound(ctx, cs, cs.Height, cs.Round)
ensureNewEventOnChannel(newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ...
ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(t, newBlockCh) // then we dont make a block ...
ensureNewEventOnChannel(t, newBlockCh) // until the CreateEmptyBlocksInterval has passed
}
func TestMempoolProgressInHigherRound(t *testing.T) {
@@ -85,7 +89,9 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
config.Consensus.CreateEmptyBlocks = false
state, privVals := randGenesisState(baseConfig, 1, false, 10)
state, privVals := makeGenesisState(baseConfig, genesisStateArgs{
Validators: 1,
Power: 10})
cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
assertMempool(cs.txNotifier).EnableTxsAvailable()
height, round := cs.Height, cs.Round
@@ -103,19 +109,19 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
}
startTestRound(ctx, cs, height, round)
ensureNewRound(newRoundCh, height, round) // first round at first height
ensureNewEventOnChannel(newBlockCh) // first block gets committed
ensureNewRound(t, newRoundCh, height, round) // first round at first height
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
height++ // moving to the next height
round = 0
ensureNewRound(newRoundCh, height, round) // first round at next height
deliverTxsRange(ctx, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
ensureNewRound(t, newRoundCh, height, round) // first round at next height
deliverTxsRange(ctx, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
round++ // moving to the next round
ensureNewRound(newRoundCh, height, round) // wait for the next round
ensureNewEventOnChannel(newBlockCh) // now we can commit the block
round++ // moving to the next round
ensureNewRound(t, newRoundCh, height, round) // wait for the next round
ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block
}
func deliverTxsRange(ctx context.Context, cs *State, start, end int) {
@@ -136,7 +142,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
config := configSetup(t)
logger := log.TestingLogger()
state, privVals := randGenesisState(config, 1, false, 10)
state, privVals := makeGenesisState(config, genesisStateArgs{
Validators: 1,
Power: 10})
stateStore := sm.NewStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())
@@ -168,7 +176,9 @@ func TestMempoolRmBadTx(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
state, privVals := randGenesisState(config, 1, false, 10)
state, privVals := makeGenesisState(config, genesisStateArgs{
Validators: 1,
Power: 10})
app := NewCounterApplication()
stateStore := sm.NewStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())

View File

@@ -64,6 +64,11 @@ type Metrics struct {
// Histogram of time taken per step annotated with reason that the step proceeded.
StepTime metrics.Histogram
// ProposalTimestampDifference is the difference between the timestamp in
// the proposal message and the local time of the validator at the time
// that the validator received the message.
ProposalTimestampDifference metrics.Histogram
}
// PrometheusMetrics returns Metrics build using Prometheus client library.
@@ -196,6 +201,15 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "step_time",
Help: "Time spent per step.",
}, append(labels, "step", "reason")).With(labelsAndValues...),
ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "proposal_timestamp_difference",
Help: "Difference in seconds between the timestamp in the proposal " +
"message and the local time when the message was received. " +
"Only calculated when a new block is proposed.",
Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10},
}, append(labels, "is_timely")).With(labelsAndValues...),
}
}
@@ -219,13 +233,14 @@ func NopMetrics() *Metrics {
BlockIntervalSeconds: discard.NewHistogram(),
NumTxs: discard.NewGauge(),
BlockSizeBytes: discard.NewHistogram(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
BlockSyncing: discard.NewGauge(),
StateSyncing: discard.NewGauge(),
BlockParts: discard.NewCounter(),
NumTxs: discard.NewGauge(),
BlockSizeBytes: discard.NewHistogram(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
BlockSyncing: discard.NewGauge(),
StateSyncing: discard.NewGauge(),
BlockParts: discard.NewCounter(),
ProposalTimestampDifference: discard.NewHistogram(),
}
}

View File

@@ -0,0 +1,496 @@
package consensus
import (
"bytes"
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/internal/eventbus"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmtimemocks "github.com/tendermint/tendermint/libs/time/mocks"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
const (
// blockTimeIota is used in the test harness as the time between
// blocks when not otherwise specified.
blockTimeIota = time.Millisecond
)
// pbtsTestHarness constructs a Tendermint network that can be used for testing the
// implementation of the Proposer-Based timestamps algorithm.
// It runs a series of consensus heights and captures timing of votes and events.
type pbtsTestHarness struct {
// configuration options set by the user of the test harness.
pbtsTestConfiguration
// The Tendermint consensus state machine being run during
// a run of the pbtsTestHarness.
observedState *State
// A stub for signing votes and messages using the key
// from the observedState.
observedValidator *validatorStub
// A list of simulated validators that interact with the observedState and are
// fully controlled by the test harness.
otherValidators []*validatorStub
// The mock time source used by all of the validator stubs in the test harness.
// This mock clock allows the test harness to produce votes and blocks with arbitrary
// timestamps.
validatorClock *tmtimemocks.Source
chainID string
// channels for verifying that the observed validator completes certain actions.
ensureProposalCh, roundCh, blockCh, ensureVoteCh <-chan tmpubsub.Message
// channel of events from the observed validator annotated with the timestamp
// the event was received.
eventCh <-chan timestampedEvent
currentHeight int64
currentRound int32
t *testing.T
ctx context.Context
}
type pbtsTestConfiguration struct {
// The timestamp consensus parameters to be used by the state machine under test.
synchronyParams types.SynchronyParams
// The setting to use for the TimeoutPropose configuration parameter.
timeoutPropose time.Duration
// The timestamp of the first block produced by the network.
genesisTime time.Time
// The time at which the proposal at height 2 should be delivered.
height2ProposalDeliverTime time.Time
// The timestamp of the block proposed at height 2.
height2ProposedBlockTime time.Time
// The timestamp of the block proposed at height 4.
// At height 4, the proposed block time and the deliver time are the same so
// that timely-ness does not affect height 4.
height4ProposedBlockTime time.Time
}
func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfiguration) pbtsTestHarness {
t.Helper()
const validators = 4
cfg := configSetup(t)
clock := new(tmtimemocks.Source)
if tc.height4ProposedBlockTime.IsZero() {
// Set a default height4ProposedBlockTime.
// Use a proposed block time that is greater than the time that the
// block at height 2 was delivered. Height 3 is not relevant for testing
// and always occurs blockTimeIota before height 4. If not otherwise specified,
// height 4 therefore occurs 2*blockTimeIota after height 2.
tc.height4ProposedBlockTime = tc.height2ProposalDeliverTime.Add(2 * blockTimeIota)
}
cfg.Consensus.TimeoutPropose = tc.timeoutPropose
consensusParams := types.DefaultConsensusParams()
consensusParams.Synchrony = tc.synchronyParams
state, privVals := makeGenesisState(cfg, genesisStateArgs{
Params: consensusParams,
Time: tc.genesisTime,
Validators: validators,
})
cs, err := newState(ctx, log.TestingLogger(), state, privVals[0], kvstore.NewApplication())
require.NoError(t, err)
vss := make([]*validatorStub, validators)
for i := 0; i < validators; i++ {
vss[i] = newValidatorStub(privVals[i], int32(i))
}
incrementHeight(vss[1:]...)
for _, vs := range vss {
vs.clock = clock
}
pubKey, err := vss[0].PrivValidator.GetPubKey(ctx)
require.NoError(t, err)
eventCh := timestampedCollector(ctx, t, cs.eventBus)
return pbtsTestHarness{
pbtsTestConfiguration: tc,
observedValidator: vss[0],
observedState: cs,
otherValidators: vss[1:],
validatorClock: clock,
currentHeight: 1,
chainID: cfg.ChainID(),
roundCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound),
ensureProposalCh: subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal),
blockCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock),
ensureVoteCh: subscribeToVoterBuffered(ctx, t, cs, pubKey.Address()),
eventCh: eventCh,
t: t,
ctx: ctx,
}
}
func (p *pbtsTestHarness) observedValidatorProposerHeight(previousBlockTime time.Time) heightResult {
p.validatorClock.On("Now").Return(p.height2ProposedBlockTime).Times(6)
ensureNewRound(p.t, p.roundCh, p.currentHeight, p.currentRound)
timeout := time.Until(previousBlockTime.Add(ensureTimeout))
ensureProposalWithTimeout(p.t, p.ensureProposalCh, p.currentHeight, p.currentRound, nil, timeout)
rs := p.observedState.GetRoundState()
bid := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}
ensurePrevote(p.t, p.ensureVoteCh, p.currentHeight, p.currentRound)
signAddVotes(p.ctx, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...)
signAddVotes(p.ctx, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...)
ensurePrecommit(p.t, p.ensureVoteCh, p.currentHeight, p.currentRound)
ensureNewBlock(p.t, p.blockCh, p.currentHeight)
vk, err := p.observedValidator.GetPubKey(context.Background())
require.NoError(p.t, err)
res := collectHeightResults(p.ctx, p.t, p.eventCh, p.currentHeight, vk.Address())
p.currentHeight++
incrementHeight(p.otherValidators...)
return res
}
func (p *pbtsTestHarness) height2() heightResult {
signer := p.otherValidators[0].PrivValidator
height3BlockTime := p.height2ProposedBlockTime.Add(-blockTimeIota)
return p.nextHeight(signer, p.height2ProposalDeliverTime, p.height2ProposedBlockTime, height3BlockTime)
}
func (p *pbtsTestHarness) intermediateHeights() {
signer := p.otherValidators[1].PrivValidator
blockTimeHeight3 := p.height4ProposedBlockTime.Add(-blockTimeIota)
p.nextHeight(signer, blockTimeHeight3, blockTimeHeight3, p.height4ProposedBlockTime)
signer = p.otherValidators[2].PrivValidator
p.nextHeight(signer, p.height4ProposedBlockTime, p.height4ProposedBlockTime, time.Now())
}
func (p *pbtsTestHarness) height5() heightResult {
return p.observedValidatorProposerHeight(p.height4ProposedBlockTime)
}
// nolint: lll
func (p *pbtsTestHarness) nextHeight(proposer types.PrivValidator, deliverTime, proposedTime, nextProposedTime time.Time) heightResult {
p.validatorClock.On("Now").Return(nextProposedTime).Times(6)
ensureNewRound(p.t, p.roundCh, p.currentHeight, p.currentRound)
b, _ := p.observedState.createProposalBlock()
b.Height = p.currentHeight
b.Header.Height = p.currentHeight
b.Header.Time = proposedTime
k, err := proposer.GetPubKey(context.Background())
require.NoError(p.t, err)
b.Header.ProposerAddress = k.Address()
ps := b.MakePartSet(types.BlockPartSizeBytes)
bid := types.BlockID{Hash: b.Hash(), PartSetHeader: ps.Header()}
prop := types.NewProposal(p.currentHeight, 0, -1, bid, proposedTime)
tp := prop.ToProto()
if err := proposer.SignProposal(context.Background(), p.observedState.state.ChainID, tp); err != nil {
p.t.Fatalf("error signing proposal: %s", err)
}
time.Sleep(time.Until(deliverTime))
prop.Signature = tp.Signature
if err := p.observedState.SetProposalAndBlock(prop, b, ps, "peerID"); err != nil {
p.t.Fatal(err)
}
ensureProposal(p.t, p.ensureProposalCh, p.currentHeight, 0, bid)
ensurePrevote(p.t, p.ensureVoteCh, p.currentHeight, p.currentRound)
signAddVotes(p.ctx, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...)
signAddVotes(p.ctx, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...)
ensurePrecommit(p.t, p.ensureVoteCh, p.currentHeight, p.currentRound)
vk, err := p.observedValidator.GetPubKey(context.Background())
require.NoError(p.t, err)
res := collectHeightResults(p.ctx, p.t, p.eventCh, p.currentHeight, vk.Address())
ensureNewBlock(p.t, p.blockCh, p.currentHeight)
p.currentHeight++
incrementHeight(p.otherValidators...)
return res
}
func timestampedCollector(ctx context.Context, t *testing.T, eb *eventbus.EventBus) <-chan timestampedEvent {
t.Helper()
// Since eventCh is not read until the end of each height, it must be large
// enough to hold all of the events produced during a single height.
eventCh := make(chan timestampedEvent, 100)
if err := eb.Observe(ctx, func(msg tmpubsub.Message) error {
eventCh <- timestampedEvent{
ts: time.Now(),
m: msg,
}
return nil
}, types.EventQueryVote, types.EventQueryCompleteProposal); err != nil {
t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err)
}
return eventCh
}
// nolint: lll
func collectHeightResults(ctx context.Context, t *testing.T, eventCh <-chan timestampedEvent, height int64, address []byte) heightResult {
t.Helper()
var res heightResult
for event := range eventCh {
switch v := event.m.Data().(type) {
case types.EventDataVote:
if v.Vote.Height > height {
t.Fatalf("received prevote from unexpected height, expected: %d, saw: %d", height, v.Vote.Height)
}
if !bytes.Equal(address, v.Vote.ValidatorAddress) {
continue
}
if v.Vote.Type != tmproto.PrevoteType {
continue
}
res.prevote = v.Vote
res.prevoteIssuedAt = event.ts
case types.EventDataCompleteProposal:
if v.Height > height {
t.Fatalf("received proposal from unexpected height, expected: %d, saw: %d", height, v.Height)
}
res.proposalIssuedAt = event.ts
}
if res.isComplete() {
return res
}
}
t.Fatalf("complete height result never seen for height %d", height)
panic("unreachable")
}
type timestampedEvent struct {
ts time.Time
m tmpubsub.Message
}
func (p *pbtsTestHarness) run() resultSet {
startTestRound(p.ctx, p.observedState, p.currentHeight, p.currentRound)
r1 := p.observedValidatorProposerHeight(p.genesisTime)
r2 := p.height2()
p.intermediateHeights()
r5 := p.height5()
return resultSet{
genesisHeight: r1,
height2: r2,
height5: r5,
}
}
type resultSet struct {
genesisHeight heightResult
height2 heightResult
height5 heightResult
}
type heightResult struct {
proposalIssuedAt time.Time
prevote *types.Vote
prevoteIssuedAt time.Time
}
func (hr heightResult) isComplete() bool {
return !hr.proposalIssuedAt.IsZero() && !hr.prevoteIssuedAt.IsZero() && hr.prevote != nil
}
// TestProposerWaitsForGenesisTime tests that a proposer will not propose a block
// until after the genesis time has passed. The test sets the genesis time in the
// future and then ensures that the observed validator waits to propose a block.
func TestProposerWaitsForGenesisTime(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a genesis time far (enough) in the future.
initialTime := time.Now().Add(800 * time.Millisecond)
cfg := pbtsTestConfiguration{
synchronyParams: types.SynchronyParams{
Precision: 10 * time.Millisecond,
MessageDelay: 10 * time.Millisecond,
},
timeoutPropose: 10 * time.Millisecond,
genesisTime: initialTime,
height2ProposalDeliverTime: initialTime.Add(10 * time.Millisecond),
height2ProposedBlockTime: initialTime.Add(10 * time.Millisecond),
}
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
results := pbtsTest.run()
// ensure that the proposal was issued after the genesis time.
assert.True(t, results.genesisHeight.proposalIssuedAt.After(cfg.genesisTime))
}
// TestProposerWaitsForPreviousBlock tests that the proposer of a block waits until
// the block time of the previous height has passed to propose the next block.
// The test harness ensures that the observed validator will be the proposer at
// height 1 and height 5. The test sets the block time of height 4 in the future
// and then verifies that the observed validator waits until after the block time
// of height 4 to propose a block at height 5.
func TestProposerWaitsForPreviousBlock(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
initialTime := time.Now().Add(time.Millisecond * 50)
cfg := pbtsTestConfiguration{
synchronyParams: types.SynchronyParams{
Precision: 100 * time.Millisecond,
MessageDelay: 500 * time.Millisecond,
},
timeoutPropose: 50 * time.Millisecond,
genesisTime: initialTime,
height2ProposalDeliverTime: initialTime.Add(150 * time.Millisecond),
height2ProposedBlockTime: initialTime.Add(100 * time.Millisecond),
height4ProposedBlockTime: initialTime.Add(800 * time.Millisecond),
}
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
results := pbtsTest.run()
// the observed validator is the proposer at height 5.
// ensure that the observed validator did not propose a block until after
// the time configured for height 4.
assert.True(t, results.height5.proposalIssuedAt.After(cfg.height4ProposedBlockTime))
// Ensure that the validator issued a prevote for a non-nil block.
assert.NotNil(t, results.height5.prevote.BlockID.Hash)
}
func TestProposerWaitTime(t *testing.T) {
genesisTime, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z")
require.NoError(t, err)
testCases := []struct {
name string
previousBlockTime time.Time
localTime time.Time
expectedWait time.Duration
}{
{
name: "block time greater than local time",
previousBlockTime: genesisTime.Add(5 * time.Nanosecond),
localTime: genesisTime.Add(1 * time.Nanosecond),
expectedWait: 4 * time.Nanosecond,
},
{
name: "local time greater than block time",
previousBlockTime: genesisTime.Add(1 * time.Nanosecond),
localTime: genesisTime.Add(5 * time.Nanosecond),
expectedWait: 0,
},
{
name: "both times equal",
previousBlockTime: genesisTime.Add(5 * time.Nanosecond),
localTime: genesisTime.Add(5 * time.Nanosecond),
expectedWait: 0,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
mockSource := new(tmtimemocks.Source)
mockSource.On("Now").Return(testCase.localTime)
ti := proposerWaitTime(mockSource, testCase.previousBlockTime)
assert.Equal(t, testCase.expectedWait, ti)
})
}
}
func TestTimelyProposal(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
initialTime := time.Now()
cfg := pbtsTestConfiguration{
synchronyParams: types.SynchronyParams{
Precision: 10 * time.Millisecond,
MessageDelay: 140 * time.Millisecond,
},
timeoutPropose: 40 * time.Millisecond,
genesisTime: initialTime,
height2ProposedBlockTime: initialTime.Add(10 * time.Millisecond),
height2ProposalDeliverTime: initialTime.Add(30 * time.Millisecond),
}
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
results := pbtsTest.run()
assert.True(t, results.height2.prevote.BlockID.Hash != nil)
}
func TestTooFarInThePastProposal(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
initialTime := time.Now()
// localtime > proposedBlockTime + MsgDelay + Precision
cfg := pbtsTestConfiguration{
synchronyParams: types.SynchronyParams{
Precision: 1 * time.Millisecond,
MessageDelay: 10 * time.Millisecond,
},
timeoutPropose: 50 * time.Millisecond,
genesisTime: initialTime,
height2ProposedBlockTime: initialTime.Add(10 * time.Millisecond),
height2ProposalDeliverTime: initialTime.Add(21 * time.Millisecond),
}
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
results := pbtsTest.run()
time.Sleep(1 * time.Second)
assert.True(t, results.height2.prevote.BlockID.Hash == nil)
}
func TestTooFarInTheFutureProposal(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
initialTime := time.Now()
// localtime < proposedBlockTime - Precision
cfg := pbtsTestConfiguration{
synchronyParams: types.SynchronyParams{
Precision: 1 * time.Millisecond,
MessageDelay: 10 * time.Millisecond,
},
timeoutPropose: 50 * time.Millisecond,
genesisTime: initialTime,
height2ProposedBlockTime: initialTime.Add(100 * time.Millisecond),
height2ProposalDeliverTime: initialTime.Add(10 * time.Millisecond),
height4ProposedBlockTime: initialTime.Add(150 * time.Millisecond),
}
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
results := pbtsTest.run()
assert.True(t, results.height2.prevote.BlockID.Hash == nil)
}

View File

@@ -311,7 +311,7 @@ func TestReactorBasic(t *testing.T) {
cfg := configSetup(t)
n := 4
states, cleanup := randConsensusState(ctx, t,
states, cleanup := makeConsensusState(ctx, t,
cfg, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore)
t.Cleanup(cleanup)
@@ -368,7 +368,8 @@ func TestReactorWithEvidence(t *testing.T) {
tickerFunc := newMockTickerFunc(true)
appFunc := newKVStore
genDoc, privVals := factory.RandGenesisDoc(cfg, n, false, 30)
valSet, privVals := factory.ValidatorSet(n, 30)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
states := make([]*State, n)
logger := consensusLogger()
@@ -382,7 +383,7 @@ func TestReactorWithEvidence(t *testing.T) {
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
@@ -470,8 +471,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
cfg := configSetup(t)
n := 4
states, cleanup := randConsensusState(
ctx,
states, cleanup := makeConsensusState(ctx,
t,
cfg,
n,
@@ -527,7 +527,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
cfg := configSetup(t)
n := 4
states, cleanup := randConsensusState(ctx, t,
states, cleanup := makeConsensusState(ctx, t,
cfg, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore)
t.Cleanup(cleanup)
@@ -592,8 +592,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
cfg := configSetup(t)
n := 4
states, cleanup := randConsensusState(
ctx,
states, cleanup := makeConsensusState(ctx,
t,
cfg,
n,
@@ -703,6 +702,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
nVals := 4
states, _, _, cleanup := randConsensusNetWithPeers(
ctx,
t,
cfg,
nVals,
nPeers,

View File

@@ -61,7 +61,7 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
logger := log.TestingLogger()
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
privValidator := loadPrivValidator(t, consensusReplayConfig)
blockStore := store.NewBlockStore(dbm.NewMemDB())
cs := newStateWithConfigAndBlockStore(
ctx,
@@ -166,7 +166,7 @@ LOOP:
blockStore := store.NewBlockStore(blockDB)
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
privValidator := loadPrivValidator(t, consensusReplayConfig)
cs := newStateWithConfigAndBlockStore(
ctx,
logger,
@@ -335,6 +335,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
css, genDoc, cfg, cleanup := randConsensusNetWithPeers(
ctx,
t,
cfg,
nVals,
nPeers,
@@ -359,15 +360,15 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
// start the machine
startTestRound(ctx, css[0], height, round)
incrementHeight(vss...)
ensureNewRound(newRoundCh, height, 0)
ensureNewProposal(proposalCh, height, round)
ensureNewRound(t, newRoundCh, height, 0)
ensureNewProposal(t, proposalCh, height, round)
rs := css[0].GetRoundState()
signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType,
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
ensureNewRound(t, newRoundCh, height+1, 0)
// HEIGHT 2
height++
@@ -383,7 +384,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
propBlockParts := propBlock.MakePartSet(partSize)
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
proposal := types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time)
p := proposal.ToProto()
if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
@@ -394,12 +395,12 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType,
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
ensureNewRound(t, newRoundCh, height+1, 0)
// HEIGHT 3
height++
@@ -415,7 +416,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
proposal = types.NewProposal(vss[2].Height, round, -1, blockID, propBlock.Header.Time)
p = proposal.ToProto()
if err := vss[2].SignProposal(ctx, cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
@@ -426,12 +427,12 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType,
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
signAddVotes(ctx, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
ensureNewRound(t, newRoundCh, height+1, 0)
// HEIGHT 4
height++
@@ -474,7 +475,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
selfIndex := valIndexFn(0)
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
proposal = types.NewProposal(vss[3].Height, round, -1, blockID, propBlock.Header.Time)
p = proposal.ToProto()
if err := vss[3].SignProposal(ctx, cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
@@ -485,7 +486,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{})
@@ -496,12 +497,13 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
if i == selfIndex {
continue
}
signAddVotes(ctx, sim.Config, css[0],
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
rs.ProposalBlockParts.Header(), newVss[i])
signAddVotes(ctx, css[0],
tmproto.PrecommitType, sim.Config.ChainID(),
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
ensureNewRound(t, newRoundCh, height+1, 0)
// HEIGHT 5
height++
@@ -511,17 +513,18 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
newVss[newVssIdx].VotingPower = 25
sort.Sort(ValidatorStubsByPower(newVss))
selfIndex = valIndexFn(0)
ensureNewProposal(proposalCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(ctx, sim.Config, css[0],
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
rs.ProposalBlockParts.Header(), newVss[i])
signAddVotes(ctx, css[0],
tmproto.PrecommitType, sim.Config.ChainID(),
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
ensureNewRound(t, newRoundCh, height+1, 0)
// HEIGHT 6
height++
@@ -537,7 +540,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
sort.Sort(ValidatorStubsByPower(newVss))
selfIndex = valIndexFn(0)
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
proposal = types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time)
p = proposal.ToProto()
if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
@@ -548,17 +551,18 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
ensureNewProposal(t, proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+3; i++ {
if i == selfIndex {
continue
}
signAddVotes(ctx, sim.Config, css[0],
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
rs.ProposalBlockParts.Header(), newVss[i])
signAddVotes(ctx, css[0],
tmproto.PrecommitType, sim.Config.ChainID(),
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
ensureNewRound(t, newRoundCh, height+1, 0)
sim.Chain = make([]*types.Block, 0)
sim.Commits = make([]*types.Commit, 0)
@@ -812,7 +816,8 @@ func testHandshakeReplay(
}
// now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err)
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
if err := proxyApp.Start(ctx); err != nil {
@@ -821,7 +826,7 @@ func testHandshakeReplay(
t.Cleanup(func() { cancel(); proxyApp.Wait() })
err := handshaker.Handshake(ctx, proxyApp)
err = handshaker.Handshake(ctx, proxyApp)
if expectError {
require.Error(t, err)
return
@@ -1000,7 +1005,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
require.NoError(t, err)
stateDB, state, store := stateAndStore(cfg, pubKey, appVersion)
stateStore := sm.NewStore(stateDB)
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
require.NoError(t, err)
state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks
blocks := sf.MakeBlocks(3, &state, privVal)
@@ -1273,7 +1279,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
val, _ := factory.RandValidator(true, 10)
votePower := 10 + int64(rand.Uint32())
val, _ := factory.Validator(votePower)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := abciclient.NewLocalCreator(app)

View File

@@ -713,6 +713,7 @@ func (cs *State) updateToState(state sm.State) {
cs.Validators = validators
cs.Proposal = nil
cs.ProposalReceiveTime = time.Time{}
cs.ProposalBlock = nil
cs.ProposalBlockParts = nil
cs.LockedRound = -1
@@ -1047,6 +1048,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
} else {
logger.Debug("resetting proposal info")
cs.Proposal = nil
cs.ProposalReceiveTime = time.Time{}
cs.ProposalBlock = nil
cs.ProposalBlockParts = nil
}
@@ -1069,9 +1071,10 @@ func (cs *State) enterNewRound(height int64, round int32) {
cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round,
cstypes.RoundStepNewRound)
}
} else {
cs.enterPropose(height, round)
return
}
cs.enterPropose(height, round)
}
// needProofBlock returns true on the first height (so the genesis app hash is signed right away)
@@ -1104,6 +1107,16 @@ func (cs *State) enterPropose(height int64, round int32) {
return
}
// If this validator is the proposer of this round, and the previous block time is later than
// our local clock time, wait to propose until our local clock time has passed the block time.
if cs.privValidatorPubKey != nil && cs.isProposer(cs.privValidatorPubKey.Address()) {
proposerWaitTime := proposerWaitTime(tmtime.DefaultSource{}, cs.state.LastBlockTime)
if proposerWaitTime > 0 {
cs.scheduleTimeout(proposerWaitTime, height, round, cstypes.RoundStepNewRound)
return
}
}
logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
defer func() {
@@ -1128,8 +1141,6 @@ func (cs *State) enterPropose(height int64, round int32) {
return
}
logger.Debug("node is a validator")
if cs.privValidatorPubKey == nil {
// If this node is a validator & proposer in the current round, it will
// miss the opportunity to create a block.
@@ -1137,18 +1148,20 @@ func (cs *State) enterPropose(height int64, round int32) {
return
}
address := cs.privValidatorPubKey.Address()
addr := cs.privValidatorPubKey.Address()
// if not a validator, we're done
if !cs.Validators.HasAddress(address) {
logger.Debug("node is not a validator", "addr", address, "vals", cs.Validators)
if !cs.Validators.HasAddress(addr) {
logger.Debug("node is not a validator", "addr", addr, "vals", cs.Validators)
return
}
if cs.isProposer(address) {
logger.Debug("node is a validator")
if cs.isProposer(addr) {
logger.Debug(
"propose step; our turn to propose",
"proposer", address,
"proposer", addr,
)
cs.decideProposal(height, round)
@@ -1188,7 +1201,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) {
// Make proposal
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID)
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID, block.Header.Time)
p := proposal.ToProto()
// wait the max amount we would wait for a proposal
@@ -1269,8 +1282,11 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa
// Enter: `timeoutPropose` after entering Propose.
// Enter: proposal block and POL is ready.
// Prevote for LockedBlock if we're locked, or ProposalBlock if valid.
// Otherwise vote nil.
// If we received a valid proposal within this round and we are not locked on a block,
// we will prevote for block.
// Otherwise, if we receive a valid proposal that matches the block we are
// locked on or matches a block that received a POL in a round later than our
// locked round, prevote for the proposal, otherwise vote nil.
func (cs *State) enterPrevote(height int64, round int32) {
logger := cs.Logger.With("height", height, "round", round)
@@ -1297,19 +1313,47 @@ func (cs *State) enterPrevote(height int64, round int32) {
// (so we have more time to try and collect +2/3 prevotes for a single block)
}
func (cs *State) proposalIsTimely() bool {
sp := types.SynchronyParams{
Precision: cs.state.ConsensusParams.Synchrony.Precision,
MessageDelay: cs.state.ConsensusParams.Synchrony.MessageDelay,
}
return cs.Proposal.IsTimely(cs.ProposalReceiveTime, sp, cs.state.InitialHeight)
}
func (cs *State) defaultDoPrevote(height int64, round int32) {
logger := cs.Logger.With("height", height, "round", round)
// If a block is locked, prevote that.
if cs.LockedBlock != nil {
logger.Debug("prevote step; already locked on a block; prevoting locked block")
cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
// Check that a proposed block was not received within this round (and thus executing this from a timeout).
if cs.ProposalBlock == nil {
logger.Debug("prevote step: ProposalBlock is nil; prevoting nil")
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
return
}
// If ProposalBlock is nil, prevote nil.
if cs.ProposalBlock == nil {
logger.Debug("prevote step: ProposalBlock is nil")
if cs.Proposal == nil {
logger.Debug("prevote step: did not receive proposal; prevoting nil")
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
return
}
if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) {
logger.Debug("prevote step: proposal timestamp not equal; prevoting nil")
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
return
}
if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() {
logger.Debug("prevote step: Proposal is not timely; prevoting nil - ",
"proposed",
tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano),
"received",
tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano),
"msg_delay",
cs.state.ConsensusParams.Synchrony.MessageDelay,
"precision",
cs.state.ConsensusParams.Synchrony.Precision)
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
return
}
@@ -1318,16 +1362,72 @@ func (cs *State) defaultDoPrevote(height int64, round int32) {
err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock)
if err != nil {
// ProposalBlock is invalid, prevote nil.
logger.Error("prevote step: ProposalBlock is invalid", "err", err)
logger.Error("prevote step: ProposalBlock is invalid; prevoting nil", "err", err)
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
return
}
// Prevote cs.ProposalBlock
// NOTE: the proposal signature is validated when it is received,
// and the proposal block parts are validated as they are received (against the merkle hash in the proposal)
logger.Debug("prevote step: ProposalBlock is valid")
cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
/*
22: upon <PROPOSAL, h_p, round_p, v, 1> from proposer(h_p, round_p) while step_p = propose do
23: if valid(v) && (lockedRound_p = 1 || lockedValue_p = v) then
24: broadcast <PREVOTE, h_p, round_p, id(v)>
Here, cs.Proposal.POLRound corresponds to the -1 in the above algorithm rule.
This means that the proposer is producing a new proposal that has not previously
seen a 2/3 majority by the network.
If we have already locked on a different value that is different from the proposed value,
we prevote nil since we are locked on a different value. Otherwise, if we're not locked on a block
or the proposal matches our locked block, we prevote the proposal.
*/
if cs.Proposal.POLRound == -1 {
if cs.LockedRound == -1 {
logger.Debug("prevote step: ProposalBlock is valid and there is no locked block; prevoting the proposal")
cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
return
}
if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) {
logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal")
cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
return
}
}
/*
28: upon <PROPOSAL, h_p, round_p, v, v_r> from proposer(h_p, round_p) AND 2f + 1 <PREVOTE, h_p, v_r, id(v)> while
step_p = propose && (v_r ≥ 0 && v_r < round_p) do
29: if valid(v) && (lockedRound_p ≤ v_r || lockedValue_p = v) then
30: broadcast <PREVOTE, h_p, round_p, id(v)>
This rule is a bit confusing but breaks down as follows:
If we see a proposal in the current round for value 'v' that lists its valid round as 'v_r'
AND this validator saw a 2/3 majority of the voting power prevote 'v' in round 'v_r', then we will
issue a prevote for 'v' in this round if 'v' is valid and either matches our locked value OR
'v_r' is a round greater than or equal to our current locked round.
'v_r' can be a round greater than to our current locked round if a 2/3 majority of
the network prevoted a value in round 'v_r' but we did not lock on it, possibly because we
missed the proposal in round 'v_r'.
*/
blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority()
if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round {
if cs.LockedRound <= cs.Proposal.POLRound {
logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" +
"majority in a round later than the locked round; prevoting the proposal")
cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
return
}
if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) {
logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal")
cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
return
}
}
logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or" +
"did not receive a more recent majority; prevoting nil")
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
}
// Enter: any +2/3 prevotes at next round.
@@ -1365,7 +1465,6 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
// Enter: `timeoutPrecommit` after any +2/3 precommits.
// Enter: +2/3 precomits for block or nil.
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
// else, precommit nil otherwise.
func (cs *State) enterPrecommit(height int64, round int32) {
logger := cs.Logger.With("height", height, "round", round)
@@ -1412,47 +1511,50 @@ func (cs *State) enterPrecommit(height int64, round int32) {
panic(fmt.Sprintf("this POLRound should be %v but got %v", round, polRound))
}
// +2/3 prevoted nil. Unlock and precommit nil.
if len(blockID.Hash) == 0 {
if cs.LockedBlock == nil {
logger.Debug("precommit step; +2/3 prevoted for nil")
} else {
logger.Debug("precommit step; +2/3 prevoted for nil; unlocking")
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
logger.Error("failed publishing event unlock", "err", err)
}
}
// +2/3 prevoted nil. Precommit nil.
if blockID.IsNil() {
logger.Debug("precommit step: +2/3 prevoted for nil; precommitting nil")
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
return
}
// At this point, +2/3 prevoted for a particular block.
// If we never received a proposal for this block, we must precommit nil
if cs.Proposal == nil || cs.ProposalBlock == nil {
logger.Debug("precommit step; did not receive proposal, precommitting nil")
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
return
}
// At this point, +2/3 prevoted for a particular block.
// If the proposal time does not match the block time, precommit nil.
if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) {
logger.Debug("precommit step: proposal timestamp not equal; precommitting nil")
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
return
}
// If we're already locked on that block, precommit it, and update the LockedRound
if cs.LockedBlock.HashesTo(blockID.Hash) {
logger.Debug("precommit step; +2/3 prevoted locked block; relocking")
logger.Debug("precommit step: +2/3 prevoted locked block; relocking")
cs.LockedRound = round
if err := cs.eventBus.PublishEventRelock(cs.RoundStateEvent()); err != nil {
logger.Error("failed publishing event relock", "err", err)
logger.Error("precommit step: failed publishing event relock", "err", err)
}
cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader)
return
}
// If +2/3 prevoted for proposal block, stage and precommit it
// If greater than 2/3 of the voting power on the network prevoted for
// the proposed block, update our locked block to this block and issue a
// precommit vote for it.
if cs.ProposalBlock.HashesTo(blockID.Hash) {
logger.Debug("precommit step; +2/3 prevoted proposal block; locking", "hash", blockID.Hash)
logger.Debug("precommit step: +2/3 prevoted proposal block; locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
panic(fmt.Sprintf("precommit step; +2/3 prevoted for an invalid block: %v", err))
panic(fmt.Sprintf("precommit step: +2/3 prevoted for an invalid block %v; relocking", err))
}
cs.LockedRound = round
@@ -1460,7 +1562,7 @@ func (cs *State) enterPrecommit(height int64, round int32) {
cs.LockedBlockParts = cs.ProposalBlockParts
if err := cs.eventBus.PublishEventLock(cs.RoundStateEvent()); err != nil {
logger.Error("failed publishing event lock", "err", err)
logger.Error("precommit step: failed publishing event lock", "err", err)
}
cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader)
@@ -1468,23 +1570,14 @@ func (cs *State) enterPrecommit(height int64, round int32) {
}
// There was a polka in this round for a block we don't have.
// Fetch that block, unlock, and precommit nil.
// The +2/3 prevotes for this round is the POL for our unlock.
logger.Debug("precommit step; +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID)
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
// Fetch that block, and precommit nil.
logger.Debug("precommit step: +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID)
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
cs.ProposalBlock = nil
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
}
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
logger.Error("failed publishing event unlock", "err", err)
}
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
}
@@ -1592,7 +1685,7 @@ func (cs *State) tryFinalizeCommit(height int64) {
}
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
if !ok || len(blockID.Hash) == 0 {
if !ok || blockID.IsNil() {
logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil")
return
}
@@ -1823,9 +1916,11 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
//-----------------------------------------------------------------------------
func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
recvTime := tmtime.Now()
// Already have one
// TODO: possibly catch double proposals
if cs.Proposal != nil {
if cs.Proposal != nil || proposal == nil {
return nil
}
@@ -1850,6 +1945,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
proposal.Signature = p.Signature
cs.Proposal = proposal
cs.ProposalReceiveTime = recvTime
cs.calculateProposalTimestampDifferenceMetric()
// We don't update cs.ProposalBlockParts if it is already set.
// This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round.
// TODO: We can check if Proposal is for a different block as this is a sign of misbehavior!
@@ -1925,7 +2022,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID
// Update Valid* if we can.
prevotes := cs.Votes.Prevotes(cs.Round)
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) {
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.Logger.Debug(
"updating valid block to new proposal block",
@@ -2074,33 +2171,13 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
prevotes := cs.Votes.Prevotes(vote.Round)
cs.Logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort())
// If +2/3 prevotes for a block or nil for *any* round:
if blockID, ok := prevotes.TwoThirdsMajority(); ok {
// There was a polka!
// If we're locked but this is a recent polka, unlock.
// If it matches our ProposalBlock, update the ValidBlock
// Unlock if `cs.LockedRound < vote.Round <= cs.Round`
// NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round
if (cs.LockedBlock != nil) &&
(cs.LockedRound < vote.Round) &&
(vote.Round <= cs.Round) &&
!cs.LockedBlock.HashesTo(blockID.Hash) {
cs.Logger.Debug("unlocking because of POL", "locked_round", cs.LockedRound, "pol_round", vote.Round)
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
return added, err
}
}
// Check to see if >2/3 of the voting power on the network voted for any non-nil block.
if blockID, ok := prevotes.TwoThirdsMajority(); ok && !blockID.IsNil() {
// Greater than 2/3 of the voting power on the network voted for some
// non-nil block
// Update Valid* if we can.
// NOTE: our proposal block may be nil or not what received a polka..
if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) {
if cs.ValidRound < vote.Round && vote.Round == cs.Round {
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.Logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round)
cs.ValidRound = vote.Round
@@ -2136,7 +2213,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round
blockID, ok := prevotes.TwoThirdsMajority()
if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) {
if ok && (cs.isProposalComplete() || blockID.IsNil()) {
cs.enterPrecommit(height, vote.Round)
} else if prevotes.HasTwoThirdsAny() {
cs.enterPrevoteWait(height, vote.Round)
@@ -2164,7 +2241,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
if len(blockID.Hash) != 0 {
if !blockID.IsNil() {
cs.enterCommit(height, vote.Round)
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
cs.enterNewRound(cs.Height, 0)
@@ -2404,3 +2481,30 @@ func repairWalFile(src, dst string) error {
return nil
}
func (cs *State) calculateProposalTimestampDifferenceMetric() {
if cs.Proposal != nil && cs.Proposal.POLRound == -1 {
tp := types.SynchronyParams{
Precision: cs.state.ConsensusParams.Synchrony.Precision,
MessageDelay: cs.state.ConsensusParams.Synchrony.MessageDelay,
}
isTimely := cs.Proposal.IsTimely(cs.ProposalReceiveTime, tp, cs.state.InitialHeight)
cs.metrics.ProposalTimestampDifference.With("is_timely", fmt.Sprintf("%t", isTimely)).
Observe(cs.ProposalReceiveTime.Sub(cs.Proposal.Timestamp).Seconds())
}
}
// proposerWaitTime determines how long the proposer should wait to propose its next block.
// If the result is zero, a block can be proposed immediately.
//
// Block times must be monotonically increasing, so if the block time of the previous
// block is larger than the proposer's current time, then the proposer will sleep
// until its local clock exceeds the previous block time.
func proposerWaitTime(lt tmtime.Source, bt time.Time) time.Duration {
t := lt.Now()
if bt.After(t) {
return bt.Sub(t)
}
return 0
}

File diff suppressed because it is too large Load Diff

View File

@@ -32,7 +32,7 @@ func TestPeerCatchupRounds(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
valSet, privVals := factory.RandValidatorSet(10, 1)
valSet, privVals := factory.ValidatorSet(10, 1)
hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet)

View File

@@ -71,14 +71,15 @@ type RoundState struct {
StartTime time.Time `json:"start_time"`
// Subjective time when +2/3 precommits for Block at Round were found
CommitTime time.Time `json:"commit_time"`
Validators *types.ValidatorSet `json:"validators"`
Proposal *types.Proposal `json:"proposal"`
ProposalBlock *types.Block `json:"proposal_block"`
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
LockedRound int32 `json:"locked_round"`
LockedBlock *types.Block `json:"locked_block"`
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
CommitTime time.Time `json:"commit_time"`
Validators *types.ValidatorSet `json:"validators"`
Proposal *types.Proposal `json:"proposal"`
ProposalReceiveTime time.Time `json:"proposal_receive_time"`
ProposalBlock *types.Block `json:"proposal_block"`
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
LockedRound int32 `json:"locked_round"`
LockedBlock *types.Block `json:"locked_block"`
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
// Last known round with POL for non-nil valid block.
ValidRound int32 `json:"valid_round"`

View File

@@ -202,10 +202,6 @@ func (b *EventBus) PublishEventPolka(data types.EventDataRoundState) error {
return b.Publish(types.EventPolkaValue, data)
}
func (b *EventBus) PublishEventUnlock(data types.EventDataRoundState) error {
return b.Publish(types.EventUnlockValue, data)
}
func (b *EventBus) PublishEventRelock(data types.EventDataRoundState) error {
return b.Publish(types.EventRelockValue, data)
}

View File

@@ -382,7 +382,6 @@ func TestEventBusPublish(t *testing.T) {
require.NoError(t, eventBus.PublishEventNewRound(types.EventDataNewRound{}))
require.NoError(t, eventBus.PublishEventCompleteProposal(types.EventDataCompleteProposal{}))
require.NoError(t, eventBus.PublishEventPolka(types.EventDataRoundState{}))
require.NoError(t, eventBus.PublishEventUnlock(types.EventDataRoundState{}))
require.NoError(t, eventBus.PublishEventRelock(types.EventDataRoundState{}))
require.NoError(t, eventBus.PublishEventLock(types.EventDataRoundState{}))
require.NoError(t, eventBus.PublishEventValidatorSetUpdates(types.EventDataValidatorSetUpdates{}))
@@ -488,7 +487,6 @@ var events = []string{
types.EventTimeoutProposeValue,
types.EventCompleteProposalValue,
types.EventPolkaValue,
types.EventUnlockValue,
types.EventLockValue,
types.EventRelockValue,
types.EventTimeoutWaitValue,
@@ -509,7 +507,6 @@ var queries = []tmpubsub.Query{
types.EventQueryTimeoutPropose,
types.EventQueryCompleteProposal,
types.EventQueryPolka,
types.EventQueryUnlock,
types.EventQueryLock,
types.EventQueryRelock,
types.EventQueryTimeoutWait,

View File

@@ -38,7 +38,7 @@ func TestEvidencePoolBasic(t *testing.T) {
blockStore = &mocks.BlockStore{}
)
valSet, privVals := factory.RandValidatorSet(1, 10)
valSet, privVals := factory.ValidatorSet(1, 10)
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}},

View File

@@ -190,7 +190,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
}
func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10)
conflictingVals, conflictingPrivVals := factory.ValidatorSet(5, 10)
conflictingHeader, err := factory.MakeHeader(&types.Header{
ChainID: evidenceChainID,
@@ -285,7 +285,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
var height int64 = 10
conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10)
conflictingVals, conflictingPrivVals := factory.ValidatorSet(5, 10)
conflictingHeader, err := factory.MakeHeader(&types.Header{
ChainID: evidenceChainID,
@@ -474,14 +474,14 @@ func makeLunaticEvidence(
totalVals, byzVals, phantomVals int,
commonTime, attackTime time.Time,
) (ev *types.LightClientAttackEvidence, trusted *types.LightBlock, common *types.LightBlock) {
commonValSet, commonPrivVals := factory.RandValidatorSet(totalVals, defaultVotingPower)
commonValSet, commonPrivVals := factory.ValidatorSet(totalVals, defaultVotingPower)
require.Greater(t, totalVals, byzVals)
// extract out the subset of byzantine validators in the common validator set
byzValSet, byzPrivVals := commonValSet.Validators[:byzVals], commonPrivVals[:byzVals]
phantomValSet, phantomPrivVals := factory.RandValidatorSet(phantomVals, defaultVotingPower)
phantomValSet, phantomPrivVals := factory.ValidatorSet(phantomVals, defaultVotingPower)
conflictingVals := phantomValSet.Copy()
require.NoError(t, conflictingVals.UpdateWithChangeSet(byzValSet))
@@ -537,7 +537,7 @@ func makeLunaticEvidence(
ValidatorSet: commonValSet,
}
trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash())
trustedVals, privVals := factory.RandValidatorSet(totalVals, defaultVotingPower)
trustedVals, privVals := factory.ValidatorSet(totalVals, defaultVotingPower)
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals)
trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime)
require.NoError(t, err)

View File

@@ -3,6 +3,7 @@ package state_test
import (
"bytes"
"fmt"
"math/rand"
"time"
dbm "github.com/tendermint/tm-db"
@@ -246,7 +247,7 @@ func makeRandomStateFromValidatorSet(
func makeRandomStateFromConsensusParams(consensusParams *types.ConsensusParams,
height, lastHeightConsensusParamsChanged int64) sm.State {
val, _ := factory.RandValidator(true, 10)
val, _ := factory.Validator(10 + int64(rand.Uint32()))
valSet := types.NewValidatorSet([]*types.Validator{val})
return sm.State{
LastBlockHeight: height - 1,

View File

@@ -102,7 +102,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) {
func setupStateStore(t *testing.T, height int64) state.Store {
stateStore := state.NewStore(dbm.NewMemDB())
valSet, _ := factory.RandValidatorSet(5, 10)
valSet, _ := factory.ValidatorSet(5, 10)
params := types.DefaultConsensusParams()
params.Version.AppVersion = 10

View File

@@ -268,7 +268,7 @@ func (state State) MakeBlock(
if height == state.InitialHeight {
timestamp = state.LastBlockTime // genesis time
} else {
timestamp = MedianTime(commit, state.LastValidators)
timestamp = time.Now()
}
// Fill rest of header with state data.
@@ -283,29 +283,6 @@ func (state State) MakeBlock(
return block, block.MakePartSet(types.BlockPartSizeBytes)
}
// MedianTime computes a median time for a given Commit (based on Timestamp field of votes messages) and the
// corresponding validator set. The computed time is always between timestamps of
// the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the
// computed value.
func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time {
weightedTimes := make([]*weightedTime, len(commit.Signatures))
totalVotingPower := int64(0)
for i, commitSig := range commit.Signatures {
if commitSig.Absent() {
continue
}
_, validator := validators.GetByAddress(commitSig.ValidatorAddress)
// If there's no condition, TestValidateBlockCommit panics; not needed normally.
if validator != nil {
totalVotingPower += validator.VotingPower
weightedTimes[i] = newWeightedTime(commitSig.Timestamp, validator.VotingPower)
}
}
return weightedMedian(weightedTimes, totalVotingPower)
}
//------------------------------------------------------------------------
// Genesis

View File

@@ -2,6 +2,7 @@ package state_test
import (
"fmt"
"math/rand"
"os"
"testing"
@@ -28,9 +29,9 @@ const (
func TestStoreBootstrap(t *testing.T) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
val, _ := factory.RandValidator(true, 10)
val2, _ := factory.RandValidator(true, 10)
val3, _ := factory.RandValidator(true, 10)
val, _ := factory.Validator(10 + int64(rand.Uint32()))
val2, _ := factory.Validator(10 + int64(rand.Uint32()))
val3, _ := factory.Validator(10 + int64(rand.Uint32()))
vals := types.NewValidatorSet([]*types.Validator{val, val2, val3})
bootstrapState := makeRandomStateFromValidatorSet(vals, 100, 100)
err := stateStore.Bootstrap(bootstrapState)
@@ -54,9 +55,9 @@ func TestStoreBootstrap(t *testing.T) {
func TestStoreLoadValidators(t *testing.T) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
val, _ := factory.RandValidator(true, 10)
val2, _ := factory.RandValidator(true, 10)
val3, _ := factory.RandValidator(true, 10)
val, _ := factory.Validator(10 + int64(rand.Uint32()))
val2, _ := factory.Validator(10 + int64(rand.Uint32()))
val3, _ := factory.Validator(10 + int64(rand.Uint32()))
vals := types.NewValidatorSet([]*types.Validator{val, val2, val3})
// 1) LoadValidators loads validators using a height where they were last changed

View File

@@ -114,13 +114,6 @@ func validateBlock(state State, block *types.Block) error {
state.LastBlockTime,
)
}
medianTime := MedianTime(block.LastCommit, state.LastValidators)
if !block.Time.Equal(medianTime) {
return fmt.Errorf("invalid block time. Expected %v, got %v",
medianTime,
block.Time,
)
}
case block.Height == state.InitialHeight:
genesisTime := state.LastBlockTime

View File

@@ -64,7 +64,6 @@ func TestValidateBlockHeader(t *testing.T) {
{"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }},
{"Height wrong", func(block *types.Block) { block.Height += 10 }},
{"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 1) }},
{"Time wrong 2", func(block *types.Block) { block.Time = block.Time.Add(time.Second * 1) }},
{"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartSetHeader.Total += 10 }},
{"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }},

View File

@@ -274,7 +274,7 @@ loop:
}
func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse {
vals, pv := factory.RandValidatorSet(3, 10)
vals, pv := factory.ValidatorSet(3, 10)
_, _, lb := mockLB(t, height, time, factory.MakeBlockID(), vals, pv)
return lightBlockResponse{
block: lb,

View File

@@ -423,7 +423,7 @@ func TestReactor_LightBlockResponse(t *testing.T) {
h := factory.MakeRandomHeader()
h.Height = height
blockID := factory.MakeBlockIDWithHash(h.Hash())
vals, pv := factory.RandValidatorSet(1, 10)
vals, pv := factory.ValidatorSet(1, 10)
vote, err := factory.MakeVote(pv[0], h.ChainID, 0, h.Height, 0, 2,
blockID, factory.DefaultTestTime)
require.NoError(t, err)
@@ -714,7 +714,7 @@ func handleLightBlockRequests(
} else {
switch errorCount % 3 {
case 0: // send a different block
vals, pv := factory.RandValidatorSet(3, 10)
vals, pv := factory.ValidatorSet(3, 10)
_, _, lb := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv)
differntLB, err := lb.ToProto()
require.NoError(t, err)
@@ -782,7 +782,7 @@ func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime ti
chain := make(map[int64]*types.LightBlock, toHeight-fromHeight)
lastBlockID := factory.MakeBlockID()
blockTime := startTime.Add(time.Duration(fromHeight-toHeight) * time.Minute)
vals, pv := factory.RandValidatorSet(3, 10)
vals, pv := factory.ValidatorSet(3, 10)
for height := fromHeight; height < toHeight; height++ {
vals, pv, chain[height] = mockLB(t, height, blockTime, lastBlockID, vals, pv)
lastBlockID = factory.MakeBlockIDWithHash(chain[height].Header.Hash())
@@ -800,7 +800,7 @@ func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockI
Time: time,
})
require.NoError(t, err)
nextVals, nextPrivVals := factory.RandValidatorSet(3, 10)
nextVals, nextPrivVals := factory.ValidatorSet(3, 10)
header.ValidatorsHash = currentVals.Hash()
header.NextValidatorsHash = nextVals.Hash()
header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams()

View File

@@ -55,7 +55,7 @@ func MakeHeader(h *types.Header) (*types.Header, error) {
if h.Height == 0 {
h.Height = 1
}
if h.LastBlockID.IsZero() {
if h.LastBlockID.IsNil() {
h.LastBlockID = MakeBlockID()
}
if h.ChainID == "" {

View File

@@ -1,35 +1,33 @@
package factory
import (
"sort"
"time"
"github.com/tendermint/tendermint/config"
tmtime "github.com/tendermint/tendermint/libs/time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types"
)
func RandGenesisDoc(
cfg *config.Config,
numValidators int,
randPower bool,
minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
func GenesisDoc(
config *cfg.Config,
time time.Time,
validators []*types.Validator,
consensusParams *types.ConsensusParams,
) *types.GenesisDoc {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
genesisValidators := make([]types.GenesisValidator, len(validators))
for i := range validators {
genesisValidators[i] = types.GenesisValidator{
Power: validators[i].VotingPower,
PubKey: validators[i].PubKey,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
InitialHeight: 1,
ChainID: cfg.ChainID(),
Validators: validators,
}, privValidators
GenesisTime: time,
InitialHeight: 1,
ChainID: config.ChainID(),
Validators: genesisValidators,
ConsensusParams: consensusParams,
}
}

View File

@@ -3,35 +3,29 @@ package factory
import (
"context"
"fmt"
"math/rand"
"sort"
"github.com/tendermint/tendermint/types"
)
func RandValidator(randPower bool, minPower int64) (*types.Validator, types.PrivValidator) {
func Validator(votingPower int64) (*types.Validator, types.PrivValidator) {
privVal := types.NewMockPV()
votePower := minPower
if randPower {
// nolint:gosec // G404: Use of weak random number generator
votePower += int64(rand.Uint32())
}
pubKey, err := privVal.GetPubKey(context.Background())
if err != nil {
panic(fmt.Errorf("could not retrieve pubkey %w", err))
}
val := types.NewValidator(pubKey, votePower)
val := types.NewValidator(pubKey, votingPower)
return val, privVal
}
func RandValidatorSet(numValidators int, votingPower int64) (*types.ValidatorSet, []types.PrivValidator) {
func ValidatorSet(numValidators int, votingPower int64) (*types.ValidatorSet, []types.PrivValidator) {
var (
valz = make([]*types.Validator, numValidators)
privValidators = make([]types.PrivValidator, numValidators)
)
for i := 0; i < numValidators; i++ {
val, privValidator := RandValidator(false, votingPower)
val, privValidator := Validator(votingPower)
valz[i] = val
privValidators[i] = privValidator
}

28
libs/time/mocks/source.go Normal file
View File

@@ -0,0 +1,28 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
time "time"
mock "github.com/stretchr/testify/mock"
)
// Source is an autogenerated mock type for the Source type
type Source struct {
mock.Mock
}
// Now provides a mock function with given fields:
func (_m *Source) Now() time.Time {
ret := _m.Called()
var r0 time.Time
if rf, ok := ret.Get(0).(func() time.Time); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Time)
}
return r0
}

View File

@@ -15,3 +15,17 @@ func Now() time.Time {
func Canonical(t time.Time) time.Time {
return t.Round(0).UTC()
}
//go:generate ../../scripts/mockery_generate.sh Source
// Source is an interface that defines a way to fetch the current time.
type Source interface {
Now() time.Time
}
// DefaultSource implements the Source interface using the system clock provided by the standard library.
type DefaultSource struct{}
func (DefaultSource) Now() time.Time {
return Now()
}

View File

@@ -112,7 +112,7 @@ func TestValidateTrustOptions(t *testing.T) {
func TestClient_SequentialVerification(t *testing.T) {
newKeys := genPrivKeys(4)
newVals := newKeys.ToValidators(10, 1)
differentVals, _ := factory.RandValidatorSet(10, 100)
differentVals, _ := factory.ValidatorSet(10, 100)
testCases := []struct {
name string
@@ -865,7 +865,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
}
func TestClient_TrustedValidatorSet(t *testing.T) {
differentVals, _ := factory.RandValidatorSet(10, 100)
differentVals, _ := factory.ValidatorSet(10, 100)
mockBadValSetNode := mockNodeFromHeadersAndVals(
map[int64]*types.SignedHeader{
1: h1,

View File

@@ -183,7 +183,7 @@ func Test_Concurrency(t *testing.T) {
}
func randLightBlock(height int64) *types.LightBlock {
vals, _ := factory.RandValidatorSet(2, 1)
vals, _ := factory.ValidatorSet(2, 1)
return &types.LightBlock{
SignedHeader: &types.SignedHeader{
Header: &types.Header{

View File

@@ -737,7 +737,8 @@ func loadStatefromGenesis(t *testing.T) sm.State {
require.NoError(t, err)
require.True(t, loadedState.IsEmpty())
genDoc, _ := factory.RandGenesisDoc(cfg, 0, false, 10)
valSet, _ := factory.ValidatorSet(0, 10)
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
state, err := loadStateFromDBOrGenesisDocProvider(
stateStore,

View File

@@ -7,9 +7,9 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
func TestValidateMsg(t *testing.T) {
@@ -186,10 +186,30 @@ func TestStateSyncVectors(t *testing.T) {
{
"ParamsResponse",
&ssproto.ParamsResponse{
Height: 9001,
ConsensusParams: types.DefaultConsensusParams().ToProto(),
Height: 9001,
ConsensusParams: tmproto.ConsensusParams{
Block: &tmproto.BlockParams{
MaxBytes: 10,
MaxGas: 20,
},
Evidence: &tmproto.EvidenceParams{
MaxAgeNumBlocks: 10,
MaxAgeDuration: 300,
MaxBytes: 100,
},
Validator: &tmproto.ValidatorParams{
PubKeyTypes: []string{ed25519.KeyType},
},
Version: &tmproto.VersionParams{
AppVersion: 11,
},
Synchrony: &tmproto.SynchronyParams{
MessageDelay: 550,
Precision: 90,
},
},
},
"423408a946122f0a10088080c00a10ffffffffffffffffff01120e08a08d0612040880c60a188080401a090a07656432353531392200",
"423008a946122b0a04080a10141209080a120310ac0218641a090a07656432353531392202080b2a090a0310a6041202105a",
},
}

View File

@@ -34,6 +34,7 @@ type ConsensusParams struct {
Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"`
Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"`
Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
Synchrony *SynchronyParams `protobuf:"bytes,5,opt,name=synchrony,proto3" json:"synchrony,omitempty"`
}
func (m *ConsensusParams) Reset() { *m = ConsensusParams{} }
@@ -97,6 +98,13 @@ func (m *ConsensusParams) GetVersion() *VersionParams {
return nil
}
func (m *ConsensusParams) GetSynchrony() *SynchronyParams {
if m != nil {
return m.Synchrony
}
return nil
}
// BlockParams contains limits on the block size.
type BlockParams struct {
// Max block size, in bytes.
@@ -373,6 +381,58 @@ func (m *HashedParams) GetBlockMaxGas() int64 {
return 0
}
type SynchronyParams struct {
MessageDelay time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay"`
Precision time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision"`
}
func (m *SynchronyParams) Reset() { *m = SynchronyParams{} }
func (m *SynchronyParams) String() string { return proto.CompactTextString(m) }
func (*SynchronyParams) ProtoMessage() {}
func (*SynchronyParams) Descriptor() ([]byte, []int) {
return fileDescriptor_e12598271a686f57, []int{6}
}
func (m *SynchronyParams) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SynchronyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SynchronyParams.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *SynchronyParams) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronyParams.Merge(m, src)
}
func (m *SynchronyParams) XXX_Size() int {
return m.Size()
}
func (m *SynchronyParams) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronyParams.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronyParams proto.InternalMessageInfo
func (m *SynchronyParams) GetMessageDelay() time.Duration {
if m != nil {
return m.MessageDelay
}
return 0
}
func (m *SynchronyParams) GetPrecision() time.Duration {
if m != nil {
return m.Precision
}
return 0
}
func init() {
proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams")
proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams")
@@ -380,44 +440,49 @@ func init() {
proto.RegisterType((*ValidatorParams)(nil), "tendermint.types.ValidatorParams")
proto.RegisterType((*VersionParams)(nil), "tendermint.types.VersionParams")
proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams")
proto.RegisterType((*SynchronyParams)(nil), "tendermint.types.SynchronyParams")
}
func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) }
var fileDescriptor_e12598271a686f57 = []byte{
// 498 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xc1, 0x6a, 0xd4, 0x40,
0x1c, 0xc6, 0x77, 0x9a, 0xda, 0xee, 0xfe, 0xe3, 0x76, 0xcb, 0x20, 0x18, 0x2b, 0xcd, 0xae, 0x39,
0x48, 0x41, 0x48, 0xc4, 0x22, 0x22, 0x08, 0xe2, 0x56, 0xa9, 0x20, 0x15, 0x09, 0xea, 0xa1, 0x97,
0x30, 0xd9, 0x8c, 0x69, 0xe8, 0x4e, 0x66, 0xc8, 0x24, 0xcb, 0xee, 0xcd, 0x47, 0xf0, 0xe8, 0x23,
0xe8, 0x9b, 0xf4, 0xd8, 0xa3, 0x27, 0x95, 0xdd, 0x17, 0x91, 0x4c, 0x32, 0xa6, 0x9b, 0xf6, 0x36,
0x33, 0xdf, 0xef, 0x9b, 0xe1, 0xfb, 0x86, 0x3f, 0xec, 0xe7, 0x34, 0x8d, 0x68, 0xc6, 0x92, 0x34,
0xf7, 0xf2, 0x85, 0xa0, 0xd2, 0x13, 0x24, 0x23, 0x4c, 0xba, 0x22, 0xe3, 0x39, 0xc7, 0xbb, 0x8d,
0xec, 0x2a, 0x79, 0xef, 0x4e, 0xcc, 0x63, 0xae, 0x44, 0xaf, 0x5c, 0x55, 0xdc, 0x9e, 0x1d, 0x73,
0x1e, 0x4f, 0xa9, 0xa7, 0x76, 0x61, 0xf1, 0xc5, 0x8b, 0x8a, 0x8c, 0xe4, 0x09, 0x4f, 0x2b, 0xdd,
0xf9, 0xba, 0x01, 0x83, 0x23, 0x9e, 0x4a, 0x9a, 0xca, 0x42, 0x7e, 0x50, 0x2f, 0xe0, 0x43, 0xb8,
0x15, 0x4e, 0xf9, 0xe4, 0xdc, 0x42, 0x23, 0x74, 0x60, 0x3e, 0xd9, 0x77, 0xdb, 0x6f, 0xb9, 0xe3,
0x52, 0xae, 0x68, 0xbf, 0x62, 0xf1, 0x0b, 0xe8, 0xd2, 0x59, 0x12, 0xd1, 0x74, 0x42, 0xad, 0x0d,
0xe5, 0x1b, 0x5d, 0xf7, 0xbd, 0xa9, 0x89, 0xda, 0xfa, 0xdf, 0x81, 0x5f, 0x42, 0x6f, 0x46, 0xa6,
0x49, 0x44, 0x72, 0x9e, 0x59, 0x86, 0xb2, 0x3f, 0xb8, 0x6e, 0xff, 0xac, 0x91, 0xda, 0xdf, 0x78,
0xf0, 0x73, 0xd8, 0x9e, 0xd1, 0x4c, 0x26, 0x3c, 0xb5, 0x36, 0x95, 0x7d, 0x78, 0x83, 0xbd, 0x02,
0x6a, 0xb3, 0xe6, 0x9d, 0x23, 0x30, 0xaf, 0xe4, 0xc1, 0xf7, 0xa1, 0xc7, 0xc8, 0x3c, 0x08, 0x17,
0x39, 0x95, 0xaa, 0x01, 0xc3, 0xef, 0x32, 0x32, 0x1f, 0x97, 0x7b, 0x7c, 0x17, 0xb6, 0x4b, 0x31,
0x26, 0x52, 0x85, 0x34, 0xfc, 0x2d, 0x46, 0xe6, 0xc7, 0x44, 0x3a, 0x3f, 0x11, 0xec, 0xac, 0xa7,
0xc3, 0x8f, 0x00, 0x97, 0x2c, 0x89, 0x69, 0x90, 0x16, 0x2c, 0x50, 0x35, 0xe9, 0x1b, 0x07, 0x8c,
0xcc, 0x5f, 0xc5, 0xf4, 0x7d, 0xc1, 0xd4, 0xd3, 0x12, 0x9f, 0xc0, 0xae, 0x86, 0xf5, 0x0f, 0xd5,
0x35, 0xde, 0x73, 0xab, 0x2f, 0x74, 0xf5, 0x17, 0xba, 0xaf, 0x6b, 0x60, 0xdc, 0xbd, 0xf8, 0x3d,
0xec, 0x7c, 0xff, 0x33, 0x44, 0xfe, 0x4e, 0x75, 0x9f, 0x56, 0xd6, 0x43, 0x18, 0xeb, 0x21, 0x9c,
0xa7, 0x30, 0x68, 0x35, 0x89, 0x1d, 0xe8, 0x8b, 0x22, 0x0c, 0xce, 0xe9, 0x22, 0x50, 0x5d, 0x59,
0x68, 0x64, 0x1c, 0xf4, 0x7c, 0x53, 0x14, 0xe1, 0x3b, 0xba, 0xf8, 0x58, 0x1e, 0x39, 0x8f, 0xa1,
0xbf, 0xd6, 0x20, 0x1e, 0x82, 0x49, 0x84, 0x08, 0x74, 0xef, 0x65, 0xb2, 0x4d, 0x1f, 0x88, 0x10,
0x35, 0xe6, 0x9c, 0xc2, 0xed, 0xb7, 0x44, 0x9e, 0xd1, 0xa8, 0x36, 0x3c, 0x84, 0x81, 0x6a, 0x21,
0x68, 0x17, 0xdc, 0x57, 0xc7, 0x27, 0xba, 0x65, 0x07, 0xfa, 0x0d, 0xd7, 0x74, 0x6d, 0x6a, 0xea,
0x98, 0xc8, 0xf1, 0xa7, 0x1f, 0x4b, 0x1b, 0x5d, 0x2c, 0x6d, 0x74, 0xb9, 0xb4, 0xd1, 0xdf, 0xa5,
0x8d, 0xbe, 0xad, 0xec, 0xce, 0xe5, 0xca, 0xee, 0xfc, 0x5a, 0xd9, 0x9d, 0xd3, 0x67, 0x71, 0x92,
0x9f, 0x15, 0xa1, 0x3b, 0xe1, 0xcc, 0xbb, 0x3a, 0x48, 0xcd, 0xb2, 0x9a, 0x94, 0xf6, 0x90, 0x85,
0x5b, 0xea, 0xfc, 0xf0, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0x54, 0x4f, 0xe1, 0x7f, 0x03,
0x00, 0x00,
// 561 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4d, 0x6b, 0xd4, 0x40,
0x18, 0xc7, 0x37, 0xdd, 0xbe, 0xec, 0x3e, 0xdb, 0xed, 0x96, 0x41, 0x30, 0x56, 0x9a, 0x5d, 0x73,
0x90, 0x82, 0x90, 0x88, 0x45, 0x44, 0x10, 0xa4, 0xdb, 0x8a, 0x05, 0xa9, 0x48, 0x7c, 0x39, 0xf4,
0x12, 0x26, 0xbb, 0x63, 0x36, 0x74, 0x93, 0x19, 0x32, 0xc9, 0xb2, 0xf9, 0x16, 0x1e, 0x3d, 0x79,
0xd6, 0x8f, 0xe1, 0xad, 0xc7, 0x1e, 0x3d, 0xa9, 0xec, 0x7e, 0x11, 0x99, 0xc9, 0x4c, 0xd3, 0xdd,
0x2a, 0xd8, 0x5b, 0x32, 0xcf, 0xef, 0x97, 0x87, 0xf9, 0x3f, 0x93, 0x81, 0xdd, 0x8c, 0x24, 0x43,
0x92, 0xc6, 0x51, 0x92, 0xb9, 0x59, 0xc1, 0x08, 0x77, 0x19, 0x4e, 0x71, 0xcc, 0x1d, 0x96, 0xd2,
0x8c, 0xa2, 0xed, 0xaa, 0xec, 0xc8, 0xf2, 0xce, 0xad, 0x90, 0x86, 0x54, 0x16, 0x5d, 0xf1, 0x54,
0x72, 0x3b, 0x56, 0x48, 0x69, 0x38, 0x26, 0xae, 0x7c, 0x0b, 0xf2, 0x8f, 0xee, 0x30, 0x4f, 0x71,
0x16, 0xd1, 0xa4, 0xac, 0xdb, 0xdf, 0x57, 0xa0, 0x73, 0x48, 0x13, 0x4e, 0x12, 0x9e, 0xf3, 0x37,
0xb2, 0x03, 0xda, 0x87, 0xb5, 0x60, 0x4c, 0x07, 0x67, 0xa6, 0xd1, 0x33, 0xf6, 0x5a, 0x8f, 0x76,
0x9d, 0xe5, 0x5e, 0x4e, 0x5f, 0x94, 0x4b, 0xda, 0x2b, 0x59, 0xf4, 0x0c, 0x1a, 0x64, 0x12, 0x0d,
0x49, 0x32, 0x20, 0xe6, 0x8a, 0xf4, 0x7a, 0xd7, 0xbd, 0x17, 0x8a, 0x50, 0xea, 0xa5, 0x81, 0x9e,
0x43, 0x73, 0x82, 0xc7, 0xd1, 0x10, 0x67, 0x34, 0x35, 0xeb, 0x52, 0xbf, 0x77, 0x5d, 0xff, 0xa0,
0x11, 0xe5, 0x57, 0x0e, 0x7a, 0x0a, 0x1b, 0x13, 0x92, 0xf2, 0x88, 0x26, 0xe6, 0xaa, 0xd4, 0xbb,
0x7f, 0xd1, 0x4b, 0x40, 0xc9, 0x9a, 0x17, 0xbd, 0x79, 0x91, 0x0c, 0x46, 0x29, 0x4d, 0x0a, 0x73,
0xed, 0x5f, 0xbd, 0xdf, 0x6a, 0x44, 0xf7, 0xbe, 0x74, 0xec, 0x43, 0x68, 0x5d, 0x09, 0x04, 0xdd,
0x85, 0x66, 0x8c, 0xa7, 0x7e, 0x50, 0x64, 0x84, 0xcb, 0x08, 0xeb, 0x5e, 0x23, 0xc6, 0xd3, 0xbe,
0x78, 0x47, 0xb7, 0x61, 0x43, 0x14, 0x43, 0xcc, 0x65, 0x4a, 0x75, 0x6f, 0x3d, 0xc6, 0xd3, 0x97,
0x98, 0xdb, 0xdf, 0x0c, 0xd8, 0x5a, 0x8c, 0x07, 0x3d, 0x00, 0x24, 0x58, 0x1c, 0x12, 0x3f, 0xc9,
0x63, 0x5f, 0xe6, 0xac, 0xbf, 0xd8, 0x89, 0xf1, 0xf4, 0x20, 0x24, 0xaf, 0xf3, 0x58, 0xb6, 0xe6,
0xe8, 0x04, 0xb6, 0x35, 0xac, 0x47, 0xac, 0xe6, 0x70, 0xc7, 0x29, 0xcf, 0x80, 0xa3, 0xcf, 0x80,
0x73, 0xa4, 0x80, 0x7e, 0xe3, 0xfc, 0x67, 0xb7, 0xf6, 0xf9, 0x57, 0xd7, 0xf0, 0xb6, 0xca, 0xef,
0xe9, 0xca, 0xe2, 0x26, 0xea, 0x8b, 0x9b, 0xb0, 0x1f, 0x43, 0x67, 0x69, 0x14, 0xc8, 0x86, 0x36,
0xcb, 0x03, 0xff, 0x8c, 0x14, 0xbe, 0xcc, 0xcb, 0x34, 0x7a, 0xf5, 0xbd, 0xa6, 0xd7, 0x62, 0x79,
0xf0, 0x8a, 0x14, 0xef, 0xc4, 0x92, 0xfd, 0x10, 0xda, 0x0b, 0x23, 0x40, 0x5d, 0x68, 0x61, 0xc6,
0x7c, 0x3d, 0x38, 0xb1, 0xb3, 0x55, 0x0f, 0x30, 0x63, 0x0a, 0xb3, 0x4f, 0x61, 0xf3, 0x18, 0xf3,
0x11, 0x19, 0x2a, 0xe1, 0x3e, 0x74, 0x64, 0x0a, 0xfe, 0x72, 0xc0, 0x6d, 0xb9, 0x7c, 0xa2, 0x53,
0xb6, 0xa1, 0x5d, 0x71, 0x55, 0xd6, 0x2d, 0x4d, 0x89, 0xc0, 0xbf, 0x18, 0xd0, 0x59, 0x1a, 0x2a,
0x3a, 0x86, 0x76, 0x4c, 0x38, 0x97, 0x21, 0x92, 0x31, 0x2e, 0xd4, 0x1f, 0xf0, 0x5f, 0x09, 0x6e,
0x2a, 0xf3, 0x48, 0x88, 0xe8, 0x00, 0x9a, 0x2c, 0x25, 0x83, 0x88, 0xdf, 0x70, 0x0e, 0x95, 0xd5,
0x7f, 0xff, 0x75, 0x66, 0x19, 0xe7, 0x33, 0xcb, 0xb8, 0x98, 0x59, 0xc6, 0xef, 0x99, 0x65, 0x7c,
0x9a, 0x5b, 0xb5, 0x8b, 0xb9, 0x55, 0xfb, 0x31, 0xb7, 0x6a, 0xa7, 0x4f, 0xc2, 0x28, 0x1b, 0xe5,
0x81, 0x33, 0xa0, 0xb1, 0x7b, 0xf5, 0xaa, 0xa8, 0x1e, 0xcb, 0xbb, 0x60, 0xf9, 0x1a, 0x09, 0xd6,
0xe5, 0xfa, 0xfe, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x26, 0x8a, 0x0b, 0x61, 0x04, 0x00,
0x00,
}
func (this *ConsensusParams) Equal(that interface{}) bool {
@@ -451,6 +516,9 @@ func (this *ConsensusParams) Equal(that interface{}) bool {
if !this.Version.Equal(that1.Version) {
return false
}
if !this.Synchrony.Equal(that1.Synchrony) {
return false
}
return true
}
func (this *BlockParams) Equal(that interface{}) bool {
@@ -590,6 +658,33 @@ func (this *HashedParams) Equal(that interface{}) bool {
}
return true
}
func (this *SynchronyParams) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*SynchronyParams)
if !ok {
that2, ok := that.(SynchronyParams)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.MessageDelay != that1.MessageDelay {
return false
}
if this.Precision != that1.Precision {
return false
}
return true
}
func (m *ConsensusParams) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -610,6 +705,18 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.Synchrony != nil {
{
size, err := m.Synchrony.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintParams(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if m.Version != nil {
{
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
@@ -719,12 +826,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x18
}
n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):])
if err5 != nil {
return 0, err5
n6, err6 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):])
if err6 != nil {
return 0, err6
}
i -= n5
i = encodeVarintParams(dAtA, i, uint64(n5))
i -= n6
i = encodeVarintParams(dAtA, i, uint64(n6))
i--
dAtA[i] = 0x12
if m.MaxAgeNumBlocks != 0 {
@@ -828,6 +935,45 @@ func (m *HashedParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *SynchronyParams) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SynchronyParams) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Precision):])
if err7 != nil {
return 0, err7
}
i -= n7
i = encodeVarintParams(dAtA, i, uint64(n7))
i--
dAtA[i] = 0x12
n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MessageDelay):])
if err8 != nil {
return 0, err8
}
i -= n8
i = encodeVarintParams(dAtA, i, uint64(n8))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintParams(dAtA []byte, offset int, v uint64) int {
offset -= sovParams(v)
base := offset
@@ -861,6 +1007,10 @@ func (m *ConsensusParams) Size() (n int) {
l = m.Version.Size()
n += 1 + l + sovParams(uint64(l))
}
if m.Synchrony != nil {
l = m.Synchrony.Size()
n += 1 + l + sovParams(uint64(l))
}
return n
}
@@ -938,6 +1088,19 @@ func (m *HashedParams) Size() (n int) {
return n
}
func (m *SynchronyParams) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MessageDelay)
n += 1 + l + sovParams(uint64(l))
l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Precision)
n += 1 + l + sovParams(uint64(l))
return n
}
func sovParams(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -1117,6 +1280,42 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Synchrony", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthParams
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthParams
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Synchrony == nil {
m.Synchrony = &SynchronyParams{}
}
if err := m.Synchrony.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipParams(dAtA[iNdEx:])
@@ -1586,6 +1785,122 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *SynchronyParams) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SynchronyParams: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SynchronyParams: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MessageDelay", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthParams
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthParams
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Precision", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowParams
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthParams
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthParams
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Precision, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipParams(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthParams
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipParams(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

View File

@@ -286,7 +286,7 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc
func mutateValidatorSet(privVals []types.MockPV, vals *types.ValidatorSet,
) ([]types.PrivValidator, *types.ValidatorSet, error) {
newVal, newPrivVal := factory.RandValidator(false, 10)
newVal, newPrivVal := factory.Validator(10)
var newVals *types.ValidatorSet
if vals.Size() > 2 {

View File

@@ -144,4 +144,4 @@ extend google.protobuf.FieldOptions {
optional bool wktpointer = 65012;
optional string castrepeated = 65013;
}
}

View File

@@ -1,4 +0,0 @@
ARG TENDERMINT_VERSION=latest
FROM tendermint/tendermint:${TENDERMINT_VERSION}
COPY tm-signer-harness /usr/bin/tm-signer-harness

View File

@@ -1,21 +0,0 @@
.PHONY: build install docker-image
TENDERMINT_VERSION?=latest
BUILD_TAGS?='tendermint'
VERSION := $(shell git describe --always)
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)"
.DEFAULT_GOAL := build
build:
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o ../../build/tm-signer-harness main.go
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) .
docker-image:
GOOS=linux GOARCH=amd64 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o tm-signer-harness main.go
docker build \
--build-arg TENDERMINT_VERSION=$(TENDERMINT_VERSION) \
-t tendermint/tm-signer-harness:$(TENDERMINT_VERSION) .
rm -rf tm-signer-harness

View File

@@ -1,5 +0,0 @@
# tm-signer-harness
See the [`tm-signer-harness`
documentation](https://tendermint.com/docs/tools/remote-signer-validation.html)
for more details.

View File

@@ -1,427 +0,0 @@
package internal
import (
"bytes"
"context"
"fmt"
"net"
"os"
"os/signal"
"time"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
tmos "github.com/tendermint/tendermint/libs/os"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
// Test harness error codes (which act as exit codes when the test harness fails).
const (
NoError int = iota // 0
ErrInvalidParameters // 1
ErrMaxAcceptRetriesReached // 2
ErrFailedToLoadGenesisFile // 3
ErrFailedToCreateListener // 4
ErrFailedToStartListener // 5
ErrInterrupted // 6
ErrOther // 7
ErrTestPublicKeyFailed // 8
ErrTestSignProposalFailed // 9
ErrTestSignVoteFailed // 10
)
var voteTypes = []tmproto.SignedMsgType{tmproto.PrevoteType, tmproto.PrecommitType}
// TestHarnessError allows us to keep track of which exit code should be used
// when exiting the main program.
type TestHarnessError struct {
Code int // The exit code to return
Err error // The original error
Info string // Any additional information
}
var _ error = (*TestHarnessError)(nil)
// TestHarness allows for testing of a remote signer to ensure compatibility
// with this version of Tendermint.
type TestHarness struct {
addr string
signerClient *privval.SignerClient
fpv *privval.FilePV
chainID string
acceptRetries int
logger log.Logger
exitWhenComplete bool
exitCode int
}
// TestHarnessConfig provides configuration to set up a remote signer test
// harness.
type TestHarnessConfig struct {
BindAddr string
KeyFile string
StateFile string
GenesisFile string
AcceptDeadline time.Duration
ConnDeadline time.Duration
AcceptRetries int
SecretConnKey ed25519.PrivKey
ExitWhenComplete bool // Whether or not to call os.Exit when the harness has completed.
}
// timeoutError can be used to check if an error returned from the netp package
// was due to a timeout.
type timeoutError interface {
Timeout() bool
}
// NewTestHarness will load Tendermint data from the given files (including
// validator public/private keypairs and chain details) and create a new
// harness.
func NewTestHarness(ctx context.Context, logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) {
keyFile := ExpandPath(cfg.KeyFile)
stateFile := ExpandPath(cfg.StateFile)
logger.Info("Loading private validator configuration", "keyFile", keyFile, "stateFile", stateFile)
// NOTE: LoadFilePV ultimately calls os.Exit on failure. No error will be
// returned if this call fails.
fpv, err := privval.LoadFilePV(keyFile, stateFile)
if err != nil {
return nil, err
}
genesisFile := ExpandPath(cfg.GenesisFile)
logger.Info("Loading chain ID from genesis file", "genesisFile", genesisFile)
st, err := state.MakeGenesisDocFromFile(genesisFile)
if err != nil {
return nil, newTestHarnessError(ErrFailedToLoadGenesisFile, err, genesisFile)
}
logger.Info("Loaded genesis file", "chainID", st.ChainID)
spv, err := newTestHarnessListener(logger, cfg)
if err != nil {
return nil, newTestHarnessError(ErrFailedToCreateListener, err, "")
}
signerClient, err := privval.NewSignerClient(ctx, spv, st.ChainID)
if err != nil {
return nil, newTestHarnessError(ErrFailedToCreateListener, err, "")
}
return &TestHarness{
addr: cfg.BindAddr,
signerClient: signerClient,
fpv: fpv,
chainID: st.ChainID,
acceptRetries: cfg.AcceptRetries,
logger: logger,
exitWhenComplete: cfg.ExitWhenComplete,
exitCode: 0,
}, nil
}
// Run will execute the tests associated with this test harness. The intention
// here is to call this from one's `main` function, as the way it succeeds or
// fails at present is to call os.Exit() with an exit code related to the error
// that caused the tests to fail, or exit code 0 on success.
func (th *TestHarness) Run() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for sig := range c {
th.logger.Info("Caught interrupt, terminating...", "sig", sig)
th.Shutdown(newTestHarnessError(ErrInterrupted, nil, ""))
}
}()
th.logger.Info("Starting test harness")
accepted := false
var startErr error
for acceptRetries := th.acceptRetries; acceptRetries > 0; acceptRetries-- {
th.logger.Info("Attempting to accept incoming connection", "acceptRetries", acceptRetries)
if err := th.signerClient.WaitForConnection(10 * time.Millisecond); err != nil {
// if it wasn't a timeout error
if _, ok := err.(timeoutError); !ok {
th.logger.Error("Failed to start listener", "err", err)
th.Shutdown(newTestHarnessError(ErrFailedToStartListener, err, ""))
// we need the return statements in case this is being run
// from a unit test - otherwise this function will just die
// when os.Exit is called
return
}
startErr = err
} else {
th.logger.Info("Accepted external connection")
accepted = true
break
}
}
if !accepted {
th.logger.Error("Maximum accept retries reached", "acceptRetries", th.acceptRetries)
th.Shutdown(newTestHarnessError(ErrMaxAcceptRetriesReached, startErr, ""))
return
}
// Run the tests
if err := th.TestPublicKey(); err != nil {
th.Shutdown(err)
return
}
if err := th.TestSignProposal(); err != nil {
th.Shutdown(err)
return
}
if err := th.TestSignVote(); err != nil {
th.Shutdown(err)
return
}
th.logger.Info("SUCCESS! All tests passed.")
th.Shutdown(nil)
}
// TestPublicKey just validates that we can (1) fetch the public key from the
// remote signer, and (2) it matches the public key we've configured for our
// local Tendermint version.
func (th *TestHarness) TestPublicKey() error {
th.logger.Info("TEST: Public key of remote signer")
fpvk, err := th.fpv.GetPubKey(context.Background())
if err != nil {
return err
}
th.logger.Info("Local", "pubKey", fpvk)
sck, err := th.signerClient.GetPubKey(context.Background())
if err != nil {
return err
}
th.logger.Info("Remote", "pubKey", sck)
if !bytes.Equal(fpvk.Bytes(), sck.Bytes()) {
th.logger.Error("FAILED: Local and remote public keys do not match")
return newTestHarnessError(ErrTestPublicKeyFailed, nil, "")
}
return nil
}
// TestSignProposal makes sure the remote signer can successfully sign
// proposals.
func (th *TestHarness) TestSignProposal() error {
th.logger.Info("TEST: Signing of proposals")
// sha256 hash of "hash"
hash := tmhash.Sum([]byte("hash"))
prop := &types.Proposal{
Type: tmproto.ProposalType,
Height: 100,
Round: 0,
POLRound: -1,
BlockID: types.BlockID{
Hash: hash,
PartSetHeader: types.PartSetHeader{
Hash: hash,
Total: 1000000,
},
},
Timestamp: time.Now(),
}
p := prop.ToProto()
propBytes := types.ProposalSignBytes(th.chainID, p)
if err := th.signerClient.SignProposal(context.Background(), th.chainID, p); err != nil {
th.logger.Error("FAILED: Signing of proposal", "err", err)
return newTestHarnessError(ErrTestSignProposalFailed, err, "")
}
prop.Signature = p.Signature
th.logger.Debug("Signed proposal", "prop", prop)
// first check that it's a basically valid proposal
if err := prop.ValidateBasic(); err != nil {
th.logger.Error("FAILED: Signed proposal is invalid", "err", err)
return newTestHarnessError(ErrTestSignProposalFailed, err, "")
}
sck, err := th.signerClient.GetPubKey(context.Background())
if err != nil {
return err
}
// now validate the signature on the proposal
if sck.VerifySignature(propBytes, prop.Signature) {
th.logger.Info("Successfully validated proposal signature")
} else {
th.logger.Error("FAILED: Proposal signature validation failed")
return newTestHarnessError(ErrTestSignProposalFailed, nil, "signature validation failed")
}
return nil
}
// TestSignVote makes sure the remote signer can successfully sign all kinds of
// votes.
func (th *TestHarness) TestSignVote() error {
th.logger.Info("TEST: Signing of votes")
for _, voteType := range voteTypes {
th.logger.Info("Testing vote type", "type", voteType)
hash := tmhash.Sum([]byte("hash"))
vote := &types.Vote{
Type: voteType,
Height: 101,
Round: 0,
BlockID: types.BlockID{
Hash: hash,
PartSetHeader: types.PartSetHeader{
Hash: hash,
Total: 1000000,
},
},
ValidatorIndex: 0,
ValidatorAddress: tmhash.SumTruncated([]byte("addr")),
Timestamp: time.Now(),
}
v := vote.ToProto()
voteBytes := types.VoteSignBytes(th.chainID, v)
// sign the vote
if err := th.signerClient.SignVote(context.Background(), th.chainID, v); err != nil {
th.logger.Error("FAILED: Signing of vote", "err", err)
return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType))
}
vote.Signature = v.Signature
th.logger.Debug("Signed vote", "vote", vote)
// validate the contents of the vote
if err := vote.ValidateBasic(); err != nil {
th.logger.Error("FAILED: Signed vote is invalid", "err", err)
return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType))
}
sck, err := th.signerClient.GetPubKey(context.Background())
if err != nil {
return err
}
// now validate the signature on the proposal
if sck.VerifySignature(voteBytes, vote.Signature) {
th.logger.Info("Successfully validated vote signature", "type", voteType)
} else {
th.logger.Error("FAILED: Vote signature validation failed", "type", voteType)
return newTestHarnessError(ErrTestSignVoteFailed, nil, "signature validation failed")
}
}
return nil
}
// Shutdown will kill the test harness and attempt to close all open sockets
// gracefully. If the supplied error is nil, it is assumed that the exit code
// should be 0. If err is not nil, it will exit with an exit code related to the
// error.
func (th *TestHarness) Shutdown(err error) {
var exitCode int
if err == nil {
exitCode = NoError
} else if therr, ok := err.(*TestHarnessError); ok {
exitCode = therr.Code
} else {
exitCode = ErrOther
}
th.exitCode = exitCode
// in case sc.Stop() takes too long
if th.exitWhenComplete {
go func() {
time.Sleep(time.Duration(5) * time.Second)
th.logger.Error("Forcibly exiting program after timeout")
os.Exit(exitCode)
}()
}
err = th.signerClient.Close()
if err != nil {
th.logger.Error("Failed to cleanly stop listener: %s", err.Error())
}
if th.exitWhenComplete {
os.Exit(exitCode)
}
}
// newTestHarnessListener creates our client instance which we will use for testing.
func newTestHarnessListener(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerListenerEndpoint, error) {
proto, addr := tmnet.ProtocolAndAddress(cfg.BindAddr)
if proto == "unix" {
// make sure the socket doesn't exist - if so, try to delete it
if tmos.FileExists(addr) {
if err := os.Remove(addr); err != nil {
logger.Error("Failed to remove existing Unix domain socket", "addr", addr)
return nil, err
}
}
}
ln, err := net.Listen(proto, addr)
if err != nil {
return nil, err
}
logger.Info("Listening", "proto", proto, "addr", addr)
var svln net.Listener
switch proto {
case "unix":
unixLn := privval.NewUnixListener(ln)
privval.UnixListenerTimeoutAccept(cfg.AcceptDeadline)(unixLn)
privval.UnixListenerTimeoutReadWrite(cfg.ConnDeadline)(unixLn)
svln = unixLn
case "tcp":
tcpLn := privval.NewTCPListener(ln, cfg.SecretConnKey)
privval.TCPListenerTimeoutAccept(cfg.AcceptDeadline)(tcpLn)
privval.TCPListenerTimeoutReadWrite(cfg.ConnDeadline)(tcpLn)
logger.Info("Resolved TCP address for listener", "addr", tcpLn.Addr())
svln = tcpLn
default:
_ = ln.Close()
logger.Error("Unsupported protocol (must be unix:// or tcp://)", "proto", proto)
return nil, newTestHarnessError(ErrInvalidParameters, nil, fmt.Sprintf("Unsupported protocol: %s", proto))
}
return privval.NewSignerListenerEndpoint(logger, svln), nil
}
func newTestHarnessError(code int, err error, info string) *TestHarnessError {
return &TestHarnessError{
Code: code,
Err: err,
Info: info,
}
}
func (e *TestHarnessError) Error() string {
var msg string
switch e.Code {
case ErrInvalidParameters:
msg = "Invalid parameters supplied to application"
case ErrMaxAcceptRetriesReached:
msg = "Maximum accept retries reached"
case ErrFailedToLoadGenesisFile:
msg = "Failed to load genesis file"
case ErrFailedToCreateListener:
msg = "Failed to create listener"
case ErrFailedToStartListener:
msg = "Failed to start listener"
case ErrInterrupted:
msg = "Interrupted"
case ErrTestPublicKeyFailed:
msg = "Public key validation test failed"
case ErrTestSignProposalFailed:
msg = "Proposal signing validation test failed"
case ErrTestSignVoteFailed:
msg = "Vote signing validation test failed"
default:
msg = "Unknown error"
}
if len(e.Info) > 0 {
msg = fmt.Sprintf("%s: %s", msg, e.Info)
}
if e.Err != nil {
msg = fmt.Sprintf("%s (original error: %s)", msg, e.Err.Error())
}
return msg
}

View File

@@ -1,226 +0,0 @@
package internal
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
const (
keyFileContents = `{
"address": "D08FCA3BA74CF17CBFC15E64F9505302BB0E2748",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "ZCsuTjaczEyon70nmKxwvwu+jqrbq5OH3yQjcK0SFxc="
},
"priv_key": {
"type": "tendermint/PrivKeyEd25519",
"value": "8O39AkQsoe1sBQwud/Kdul8lg8K9SFsql9aZvwXQSt1kKy5ONpzMTKifvSeYrHC/C76Oqturk4ffJCNwrRIXFw=="
}
}`
stateFileContents = `{
"height": "0",
"round": 0,
"step": 0
}`
genesisFileContents = `{
"genesis_time": "2019-01-15T11:56:34.8963Z",
"chain_id": "test-chain-0XwP5E",
"consensus_params": {
"block": {
"max_bytes": "22020096",
"max_gas": "-1",
"time_iota_ms": "1000"
},
"evidence": {
"max_age_num_blocks": "100000",
"max_age_duration": "172800000000000",
"max_num": 50
},
"validator": {
"pub_key_types": [
"ed25519"
]
}
},
"validators": [
{
"address": "D08FCA3BA74CF17CBFC15E64F9505302BB0E2748",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "ZCsuTjaczEyon70nmKxwvwu+jqrbq5OH3yQjcK0SFxc="
},
"power": "10",
"name": ""
}
],
"app_hash": ""
}`
defaultConnDeadline = 100
)
func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg := makeConfig(t, 1, 2)
defer cleanup(cfg)
th, err := NewTestHarness(ctx, log.TestingLogger(), cfg)
require.NoError(t, err)
th.Run()
assert.Equal(t, ErrMaxAcceptRetriesReached, th.exitCode)
}
func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
harnessTest(
ctx,
t,
func(th *TestHarness) *privval.SignerServer {
return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, false)
},
NoError,
)
}
func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
harnessTest(
ctx,
t,
func(th *TestHarness) *privval.SignerServer {
return newMockSignerServer(t, th, ed25519.GenPrivKey(), false, false)
},
ErrTestPublicKeyFailed,
)
}
func TestRemoteSignerProposalSigningFailed(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
harnessTest(
ctx,
t,
func(th *TestHarness) *privval.SignerServer {
return newMockSignerServer(t, th, th.fpv.Key.PrivKey, true, false)
},
ErrTestSignProposalFailed,
)
}
func TestRemoteSignerVoteSigningFailed(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
harnessTest(
ctx,
t,
func(th *TestHarness) *privval.SignerServer {
return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, true)
},
ErrTestSignVoteFailed,
)
}
func newMockSignerServer(
t *testing.T,
th *TestHarness,
privKey crypto.PrivKey,
breakProposalSigning bool,
breakVoteSigning bool,
) *privval.SignerServer {
mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning)
dialerEndpoint := privval.NewSignerDialerEndpoint(
th.logger,
privval.DialTCPFn(
th.addr,
time.Duration(defaultConnDeadline)*time.Millisecond,
ed25519.GenPrivKey(),
),
)
return privval.NewSignerServer(dialerEndpoint, th.chainID, mockPV)
}
// For running relatively standard tests.
func harnessTest(
ctx context.Context,
t *testing.T,
signerServerMaker func(th *TestHarness) *privval.SignerServer,
expectedExitCode int,
) {
cfg := makeConfig(t, 100, 3)
defer cleanup(cfg)
th, err := NewTestHarness(ctx, log.TestingLogger(), cfg)
require.NoError(t, err)
donec := make(chan struct{})
go func() {
defer close(donec)
th.Run()
}()
ss := signerServerMaker(th)
require.NoError(t, ss.Start(ctx))
assert.True(t, ss.IsRunning())
defer ss.Stop() //nolint:errcheck // ignore for tests
<-donec
assert.Equal(t, expectedExitCode, th.exitCode)
}
func makeConfig(t *testing.T, acceptDeadline, acceptRetries int) TestHarnessConfig {
return TestHarnessConfig{
BindAddr: privval.GetFreeLocalhostAddrPort(),
KeyFile: makeTempFile("tm-testharness-keyfile", keyFileContents),
StateFile: makeTempFile("tm-testharness-statefile", stateFileContents),
GenesisFile: makeTempFile("tm-testharness-genesisfile", genesisFileContents),
AcceptDeadline: time.Duration(acceptDeadline) * time.Millisecond,
ConnDeadline: time.Duration(defaultConnDeadline) * time.Millisecond,
AcceptRetries: acceptRetries,
SecretConnKey: ed25519.GenPrivKey(),
ExitWhenComplete: false,
}
}
func cleanup(cfg TestHarnessConfig) {
os.Remove(cfg.KeyFile)
os.Remove(cfg.StateFile)
os.Remove(cfg.GenesisFile)
}
func makeTempFile(name, content string) string {
tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-*", name))
if err != nil {
panic(err)
}
if _, err := tempFile.Write([]byte(content)); err != nil {
tempFile.Close()
panic(err)
}
if err := tempFile.Close(); err != nil {
panic(err)
}
return tempFile.Name()
}

View File

@@ -1,25 +0,0 @@
package internal
import (
"os/user"
"path/filepath"
"strings"
)
// ExpandPath will check if the given path begins with a "~" symbol, and if so,
// will expand it to become the user's home directory. If it fails to expand the
// path it will automatically return the original path itself.
func ExpandPath(path string) string {
usr, err := user.Current()
if err != nil {
return path
}
if path == "~" {
return usr.HomeDir
} else if strings.HasPrefix(path, "~/") {
return filepath.Join(usr.HomeDir, path[2:])
}
return path
}

View File

@@ -1,203 +0,0 @@
package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"time"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/tools/tm-signer-harness/internal"
"github.com/tendermint/tendermint/version"
)
const (
defaultAcceptRetries = 100
defaultBindAddr = "tcp://127.0.0.1:0"
defaultAcceptDeadline = 1
defaultConnDeadline = 3
defaultExtractKeyOutput = "./signing.key"
)
var defaultTMHome string
var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
// Command line flags
var (
flagAcceptRetries int
flagBindAddr string
flagTMHome string
flagKeyOutputPath string
)
// Command line commands
var (
rootCmd *flag.FlagSet
runCmd *flag.FlagSet
extractKeyCmd *flag.FlagSet
versionCmd *flag.FlagSet
)
func init() {
rootCmd = flag.NewFlagSet("root", flag.ExitOnError)
rootCmd.Usage = func() {
fmt.Println(`Remote signer test harness for Tendermint.
Usage:
tm-signer-harness <command> [flags]
Available Commands:
extract_key Extracts a signing key from a local Tendermint instance
help Help on the available commands
run Runs the test harness
version Display version information and exit
Use "tm-signer-harness help <command>" for more information about that command.`)
fmt.Println("")
}
hd, err := os.UserHomeDir()
if err != nil {
fmt.Println("The UserHomeDir is not defined, setting the default TM Home PATH to \"~/.tendermint\"")
defaultTMHome = "~/.tendermint"
} else {
defaultTMHome = fmt.Sprintf("%s/.tendermint", hd)
}
runCmd = flag.NewFlagSet("run", flag.ExitOnError)
runCmd.IntVar(&flagAcceptRetries,
"accept-retries",
defaultAcceptRetries,
"The number of attempts to listen for incoming connections")
runCmd.StringVar(&flagBindAddr, "addr", defaultBindAddr, "Bind to this address for the testing")
runCmd.StringVar(&flagTMHome, "tmhome", defaultTMHome, "Path to the Tendermint home directory")
runCmd.Usage = func() {
fmt.Println(`Runs the remote signer test harness for Tendermint.
Usage:
tm-signer-harness run [flags]
Flags:`)
runCmd.PrintDefaults()
fmt.Println("")
}
extractKeyCmd = flag.NewFlagSet("extract_key", flag.ExitOnError)
extractKeyCmd.StringVar(&flagKeyOutputPath,
"output",
defaultExtractKeyOutput,
"Path to which signing key should be written")
extractKeyCmd.StringVar(&flagTMHome, "tmhome", defaultTMHome, "Path to the Tendermint home directory")
extractKeyCmd.Usage = func() {
fmt.Println(`Extracts a signing key from a local Tendermint instance for use in the remote
signer under test.
Usage:
tm-signer-harness extract_key [flags]
Flags:`)
extractKeyCmd.PrintDefaults()
fmt.Println("")
}
versionCmd = flag.NewFlagSet("version", flag.ExitOnError)
versionCmd.Usage = func() {
fmt.Println(`
Prints the Tendermint version for which this remote signer harness was built.
Usage:
tm-signer-harness version`)
fmt.Println("")
}
}
func runTestHarness(ctx context.Context, acceptRetries int, bindAddr, tmhome string) {
tmhome = internal.ExpandPath(tmhome)
cfg := internal.TestHarnessConfig{
BindAddr: bindAddr,
KeyFile: filepath.Join(tmhome, "config", "priv_validator_key.json"),
StateFile: filepath.Join(tmhome, "data", "priv_validator_state.json"),
GenesisFile: filepath.Join(tmhome, "config", "genesis.json"),
AcceptDeadline: time.Duration(defaultAcceptDeadline) * time.Second,
AcceptRetries: acceptRetries,
ConnDeadline: time.Duration(defaultConnDeadline) * time.Second,
SecretConnKey: ed25519.GenPrivKey(),
ExitWhenComplete: true,
}
harness, err := internal.NewTestHarness(ctx, logger, cfg)
if err != nil {
logger.Error(err.Error())
if therr, ok := err.(*internal.TestHarnessError); ok {
os.Exit(therr.Code)
}
os.Exit(internal.ErrOther)
}
harness.Run()
}
func extractKey(tmhome, outputPath string) {
keyFile := filepath.Join(internal.ExpandPath(tmhome), "config", "priv_validator_key.json")
stateFile := filepath.Join(internal.ExpandPath(tmhome), "data", "priv_validator_state.json")
fpv, err := privval.LoadFilePV(keyFile, stateFile)
if err != nil {
logger.Error("Can't load file pv", "err", err)
os.Exit(1)
}
pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey))
if err := os.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil {
logger.Info("Failed to write private key", "output", outputPath, "err", err)
os.Exit(1)
}
logger.Info("Successfully wrote private key", "output", outputPath)
}
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if err := rootCmd.Parse(os.Args[1:]); err != nil {
fmt.Printf("Error parsing flags: %v\n", err)
os.Exit(1)
}
if rootCmd.NArg() == 0 || (rootCmd.NArg() == 1 && rootCmd.Arg(0) == "help") {
rootCmd.Usage()
os.Exit(0)
}
switch rootCmd.Arg(0) {
case "help":
switch rootCmd.Arg(1) {
case "run":
runCmd.Usage()
case "extract_key":
extractKeyCmd.Usage()
case "version":
versionCmd.Usage()
default:
fmt.Printf("Unrecognized command: %s\n", rootCmd.Arg(1))
os.Exit(1)
}
case "run":
if err := runCmd.Parse(os.Args[2:]); err != nil {
fmt.Printf("Error parsing flags: %v\n", err)
os.Exit(1)
}
runTestHarness(ctx, flagAcceptRetries, flagBindAddr, flagTMHome)
case "extract_key":
if err := extractKeyCmd.Parse(os.Args[2:]); err != nil {
fmt.Printf("Error parsing flags: %v\n", err)
os.Exit(1)
}
extractKey(flagTMHome, flagKeyOutputPath)
case "version":
fmt.Println(version.TMVersion)
default:
fmt.Printf("Unrecognized command: %s\n", flag.Arg(0))
os.Exit(1)
}
}

View File

@@ -883,7 +883,7 @@ func (commit *Commit) ValidateBasic() error {
}
if commit.Height >= 1 {
if commit.BlockID.IsZero() {
if commit.BlockID.IsNil() {
return errors.New("commit cannot be for nil block")
}
@@ -1204,8 +1204,8 @@ func (blockID BlockID) ValidateBasic() error {
return nil
}
// IsZero returns true if this is the BlockID of a nil block.
func (blockID BlockID) IsZero() bool {
// IsNil returns true if this is the BlockID of a nil block.
func (blockID BlockID) IsNil() bool {
return len(blockID.Hash) == 0 &&
blockID.PartSetHeader.IsZero()
}

View File

@@ -21,7 +21,7 @@ func CanonicalizeBlockID(bid tmproto.BlockID) *tmproto.CanonicalBlockID {
panic(err)
}
var cbid *tmproto.CanonicalBlockID
if rbid == nil || rbid.IsZero() {
if rbid == nil || rbid.IsNil() {
cbid = nil
} else {
cbid = &tmproto.CanonicalBlockID{

View File

@@ -38,7 +38,6 @@ const (
EventStateSyncStatusValue = "StateSyncStatus"
EventTimeoutProposeValue = "TimeoutPropose"
EventTimeoutWaitValue = "TimeoutWait"
EventUnlockValue = "Unlock"
EventValidBlockValue = "ValidBlock"
EventVoteValue = "Vote"
)
@@ -223,7 +222,6 @@ var (
EventQueryTimeoutPropose = QueryForEvent(EventTimeoutProposeValue)
EventQueryTimeoutWait = QueryForEvent(EventTimeoutWaitValue)
EventQueryTx = QueryForEvent(EventTxValue)
EventQueryUnlock = QueryForEvent(EventUnlockValue)
EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdatesValue)
EventQueryValidBlock = QueryForEvent(EventValidBlockValue)
EventQueryVote = QueryForEvent(EventVoteValue)

View File

@@ -56,21 +56,26 @@ func TestGenesisBad(t *testing.T) {
}
}
func TestGenesisGood(t *testing.T) {
func TestBasicGenesisDoc(t *testing.T) {
// test a good one by raw json
genDocBytes := []byte(
`{
"genesis_time": "0001-01-01T00:00:00Z",
"chain_id": "test-chain-QDKdJr",
"initial_height": "1000",
"consensus_params": null,
"validators": [{
"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},
"power":"10",
"name":""
}],
"app_hash":"",
"app_state":{"account_owner": "Bob"}
"app_state":{"account_owner": "Bob"},
"consensus_params": {
"synchrony": {"precision": "1", "message_delay": "10"},
"validator": {"pub_key_types":["ed25519"]},
"block": {"max_bytes": "100"},
"evidence": {"max_age_num_blocks": "100", "max_age_duration": "10"}
}
}`,
)
_, err := GenesisDocFromJSON(genDocBytes)
@@ -97,12 +102,12 @@ func TestGenesisGood(t *testing.T) {
genDocBytes, err = tmjson.Marshal(genDoc)
assert.NoError(t, err, "error marshaling genDoc")
genDoc, err = GenesisDocFromJSON(genDocBytes)
assert.NoError(t, err, "expected no error for valid genDoc json")
require.NoError(t, err, "expected no error for valid genDoc json")
// test with invalid consensus params
genDoc.ConsensusParams.Block.MaxBytes = 0
genDocBytes, err = tmjson.Marshal(genDoc)
assert.NoError(t, err, "error marshaling genDoc")
require.NoError(t, err, "error marshaling genDoc")
_, err = GenesisDocFromJSON(genDocBytes)
assert.Error(t, err, "expected error for genDoc json with block size of 0")

View File

@@ -41,6 +41,7 @@ type ConsensusParams struct {
Evidence EvidenceParams `json:"evidence"`
Validator ValidatorParams `json:"validator"`
Version VersionParams `json:"version"`
Synchrony SynchronyParams `json:"synchrony"`
}
// HashedParams is a subset of ConsensusParams.
@@ -75,6 +76,13 @@ type VersionParams struct {
AppVersion uint64 `json:"app_version"`
}
// SynchronyParams influence the validity of block timestamps.
// TODO (@wbanfield): add link to proposer-based timestamp spec when completed.
type SynchronyParams struct {
Precision time.Duration `json:"precision"`
MessageDelay time.Duration `json:"message_delay"`
}
// DefaultConsensusParams returns a default ConsensusParams.
func DefaultConsensusParams() *ConsensusParams {
return &ConsensusParams{
@@ -82,6 +90,7 @@ func DefaultConsensusParams() *ConsensusParams {
Evidence: DefaultEvidenceParams(),
Validator: DefaultValidatorParams(),
Version: DefaultVersionParams(),
Synchrony: DefaultSynchronyParams(),
}
}
@@ -116,6 +125,15 @@ func DefaultVersionParams() VersionParams {
}
}
func DefaultSynchronyParams() SynchronyParams {
// TODO(@wbanfield): Determine experimental values for these defaults
// https://github.com/tendermint/tendermint/issues/7202
return SynchronyParams{
Precision: 10 * time.Millisecond,
MessageDelay: 500 * time.Millisecond,
}
}
func (val *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool {
for i := 0; i < len(val.PubKeyTypes); i++ {
if val.PubKeyTypes[i] == pubkeyType {
@@ -148,7 +166,7 @@ func (params ConsensusParams) ValidateConsensusParams() error {
}
if params.Evidence.MaxAgeDuration <= 0 {
return fmt.Errorf("evidence.MaxAgeDuration must be grater than 0 if provided, Got %v",
return fmt.Errorf("evidence.MaxAgeDuration must be greater than 0 if provided, Got %v",
params.Evidence.MaxAgeDuration)
}
@@ -162,6 +180,16 @@ func (params ConsensusParams) ValidateConsensusParams() error {
params.Evidence.MaxBytes)
}
if params.Synchrony.MessageDelay <= 0 {
return fmt.Errorf("synchrony.MessageDelay must be greater than 0. Got: %d",
params.Synchrony.MessageDelay)
}
if params.Synchrony.Precision <= 0 {
return fmt.Errorf("synchrony.Precision must be greater than 0. Got: %d",
params.Synchrony.Precision)
}
if len(params.Validator.PubKeyTypes) == 0 {
return errors.New("len(Validator.PubKeyTypes) must be greater than 0")
}
@@ -205,6 +233,8 @@ func (params ConsensusParams) HashConsensusParams() []byte {
func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool {
return params.Block == params2.Block &&
params.Evidence == params2.Evidence &&
params.Version == params2.Version &&
params.Synchrony == params2.Synchrony &&
tmstrings.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes)
}
@@ -235,6 +265,10 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa
if params2.Version != nil {
res.Version.AppVersion = params2.Version.AppVersion
}
if params2.Synchrony != nil {
res.Synchrony.Precision = params2.Synchrony.Precision
res.Synchrony.MessageDelay = params2.Synchrony.MessageDelay
}
return res
}
@@ -255,6 +289,10 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams {
Version: &tmproto.VersionParams{
AppVersion: params.Version.AppVersion,
},
Synchrony: &tmproto.SynchronyParams{
MessageDelay: params.Synchrony.MessageDelay,
Precision: params.Synchrony.Precision,
},
}
}
@@ -275,5 +313,9 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams
Version: VersionParams{
AppVersion: pbParams.Version.AppVersion,
},
Synchrony: SynchronyParams{
MessageDelay: pbParams.Synchrony.MessageDelay,
Precision: pbParams.Synchrony.Precision,
},
}
}

View File

@@ -23,23 +23,140 @@ func TestConsensusParamsValidation(t *testing.T) {
valid bool
}{
// test block params
0: {makeParams(1, 0, 2, 0, valEd25519), true},
1: {makeParams(0, 0, 2, 0, valEd25519), false},
2: {makeParams(47*1024*1024, 0, 2, 0, valEd25519), true},
3: {makeParams(10, 0, 2, 0, valEd25519), true},
4: {makeParams(100*1024*1024, 0, 2, 0, valEd25519), true},
5: {makeParams(101*1024*1024, 0, 2, 0, valEd25519), false},
6: {makeParams(1024*1024*1024, 0, 2, 0, valEd25519), false},
7: {makeParams(1024*1024*1024, 0, -1, 0, valEd25519), false},
{
params: makeParams(makeParamsArgs{
blockBytes: 1,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: true,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 0,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: false,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 47 * 1024 * 1024,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: true,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 10,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: true,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 100 * 1024 * 1024,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: true,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 101 * 1024 * 1024,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: false,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 1024 * 1024 * 1024,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: false,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 1024 * 1024 * 1024,
evidenceAge: 2,
precision: 1,
messageDelay: 1}),
valid: false,
},
// test evidence params
8: {makeParams(1, 0, 0, 0, valEd25519), false},
9: {makeParams(1, 0, 2, 2, valEd25519), false},
10: {makeParams(1000, 0, 2, 1, valEd25519), true},
11: {makeParams(1, 0, -1, 0, valEd25519), false},
{
params: makeParams(makeParamsArgs{
blockBytes: 1,
evidenceAge: 0,
maxEvidenceBytes: 0,
precision: 1,
messageDelay: 1}),
valid: false,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 1,
evidenceAge: 2,
maxEvidenceBytes: 2,
precision: 1,
messageDelay: 1}),
valid: false,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 1000,
evidenceAge: 2,
maxEvidenceBytes: 1,
precision: 1,
messageDelay: 1}),
valid: true,
},
{
params: makeParams(makeParamsArgs{
blockBytes: 1,
evidenceAge: -1,
maxEvidenceBytes: 0,
precision: 1,
messageDelay: 1}),
valid: false,
},
// test no pubkey type provided
12: {makeParams(1, 0, 2, 0, []string{}), false},
{
params: makeParams(makeParamsArgs{
evidenceAge: 2,
pubkeyTypes: []string{},
precision: 1,
messageDelay: 1}),
valid: false,
},
// test invalid pubkey type provided
13: {makeParams(1, 0, 2, 0, []string{"potatoes make good pubkeys"}), false},
{
params: makeParams(makeParamsArgs{
evidenceAge: 2,
pubkeyTypes: []string{"potatoes make good pubkeys"},
precision: 1,
messageDelay: 1}),
valid: false,
},
// test invalid pubkey type provided
{
params: makeParams(makeParamsArgs{
evidenceAge: 2,
precision: 1,
messageDelay: -1}),
valid: false,
},
{
params: makeParams(makeParamsArgs{
evidenceAge: 2,
precision: -1,
messageDelay: 1}),
valid: false,
},
}
for i, tc := range testCases {
if tc.valid {
@@ -50,38 +167,51 @@ func TestConsensusParamsValidation(t *testing.T) {
}
}
func makeParams(
blockBytes, blockGas int64,
evidenceAge int64,
maxEvidenceBytes int64,
pubkeyTypes []string,
) ConsensusParams {
type makeParamsArgs struct {
blockBytes int64
blockGas int64
evidenceAge int64
maxEvidenceBytes int64
pubkeyTypes []string
precision time.Duration
messageDelay time.Duration
}
func makeParams(args makeParamsArgs) ConsensusParams {
if args.pubkeyTypes == nil {
args.pubkeyTypes = valEd25519
}
return ConsensusParams{
Block: BlockParams{
MaxBytes: blockBytes,
MaxGas: blockGas,
MaxBytes: args.blockBytes,
MaxGas: args.blockGas,
},
Evidence: EvidenceParams{
MaxAgeNumBlocks: evidenceAge,
MaxAgeDuration: time.Duration(evidenceAge),
MaxBytes: maxEvidenceBytes,
MaxAgeNumBlocks: args.evidenceAge,
MaxAgeDuration: time.Duration(args.evidenceAge),
MaxBytes: args.maxEvidenceBytes,
},
Validator: ValidatorParams{
PubKeyTypes: pubkeyTypes,
PubKeyTypes: args.pubkeyTypes,
},
Synchrony: SynchronyParams{
Precision: args.precision,
MessageDelay: args.messageDelay,
},
}
}
func TestConsensusParamsHash(t *testing.T) {
params := []ConsensusParams{
makeParams(4, 2, 3, 1, valEd25519),
makeParams(1, 4, 3, 1, valEd25519),
makeParams(1, 2, 4, 1, valEd25519),
makeParams(2, 5, 7, 1, valEd25519),
makeParams(1, 7, 6, 1, valEd25519),
makeParams(9, 5, 4, 1, valEd25519),
makeParams(7, 8, 9, 1, valEd25519),
makeParams(4, 6, 5, 1, valEd25519),
makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}),
}
hashes := make([][]byte, len(params))
@@ -101,20 +231,31 @@ func TestConsensusParamsHash(t *testing.T) {
func TestConsensusParamsUpdate(t *testing.T) {
testCases := []struct {
params ConsensusParams
intialParams ConsensusParams
updates *tmproto.ConsensusParams
updatedParams ConsensusParams
}{
// empty updates
{
makeParams(1, 2, 3, 0, valEd25519),
&tmproto.ConsensusParams{},
makeParams(1, 2, 3, 0, valEd25519),
intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}),
updates: &tmproto.ConsensusParams{},
updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}),
},
{
// update synchrony params
intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}),
updates: &tmproto.ConsensusParams{
Synchrony: &tmproto.SynchronyParams{
Precision: time.Second * 2,
MessageDelay: time.Second * 4,
},
},
updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}),
},
// fine updates
{
makeParams(1, 2, 3, 0, valEd25519),
&tmproto.ConsensusParams{
intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}),
updates: &tmproto.ConsensusParams{
Block: &tmproto.BlockParams{
MaxBytes: 100,
MaxGas: 200,
@@ -128,11 +269,15 @@ func TestConsensusParamsUpdate(t *testing.T) {
PubKeyTypes: valSecp256k1,
},
},
makeParams(100, 200, 300, 50, valSecp256k1),
updatedParams: makeParams(makeParamsArgs{
blockBytes: 100, blockGas: 200,
evidenceAge: 300,
maxEvidenceBytes: 50,
pubkeyTypes: valSecp256k1}),
},
{
makeParams(1, 2, 3, 0, valEd25519),
&tmproto.ConsensusParams{
intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}),
updates: &tmproto.ConsensusParams{
Block: &tmproto.BlockParams{
MaxBytes: 100,
MaxGas: 200,
@@ -145,17 +290,23 @@ func TestConsensusParamsUpdate(t *testing.T) {
Validator: &tmproto.ValidatorParams{
PubKeyTypes: valSr25519,
},
}, makeParams(100, 200, 300, 50, valSr25519),
},
updatedParams: makeParams(makeParamsArgs{
blockBytes: 100,
blockGas: 200,
evidenceAge: 300,
maxEvidenceBytes: 50,
pubkeyTypes: valSr25519}),
},
}
for _, tc := range testCases {
assert.Equal(t, tc.updatedParams, tc.params.UpdateConsensusParams(tc.updates))
assert.Equal(t, tc.updatedParams, tc.intialParams.UpdateConsensusParams(tc.updates))
}
}
func TestConsensusParamsUpdate_AppVersion(t *testing.T) {
params := makeParams(1, 2, 3, 0, valEd25519)
params := makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3})
assert.EqualValues(t, 0, params.Version.AppVersion)
@@ -167,14 +318,16 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) {
func TestProto(t *testing.T) {
params := []ConsensusParams{
makeParams(4, 2, 3, 1, valEd25519),
makeParams(1, 4, 3, 1, valEd25519),
makeParams(1, 2, 4, 1, valEd25519),
makeParams(2, 5, 7, 1, valEd25519),
makeParams(1, 7, 6, 1, valEd25519),
makeParams(9, 5, 4, 1, valEd25519),
makeParams(7, 8, 9, 1, valEd25519),
makeParams(4, 6, 5, 1, valEd25519),
makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}),
makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}),
makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}),
}
for i := range params {

View File

@@ -34,14 +34,14 @@ type Proposal struct {
// NewProposal returns a new Proposal.
// If there is no POLRound, polRound should be -1.
func NewProposal(height int64, round int32, polRound int32, blockID BlockID) *Proposal {
func NewProposal(height int64, round int32, polRound int32, blockID BlockID, ts time.Time) *Proposal {
return &Proposal{
Type: tmproto.ProposalType,
Height: height,
Round: round,
BlockID: blockID,
POLRound: polRound,
Timestamp: tmtime.Now(),
Timestamp: tmtime.Canonical(ts),
}
}
@@ -79,6 +79,31 @@ func (p *Proposal) ValidateBasic() error {
return nil
}
// IsTimely validates that the block timestamp is 'timely' according to the proposer-based timestamp algorithm.
// To evaluate if a block is timely, its timestamp is compared to the local time of the validator along with the
// configured Precision and MsgDelay parameters.
// Specifically, a proposed block timestamp is considered timely if it is satisfies the following inequalities:
//
// localtime >= proposedBlockTime - Precision
// localtime <= proposedBlockTime + MsgDelay + Precision
//
// Note: If the proposal is for the `initialHeight` the second inequality is not checked. This is because
// the timestamp in this case is set to the preconfigured genesis time.
// For more information on the meaning of 'timely', see the proposer-based timestamp specification:
// https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp
func (p *Proposal) IsTimely(recvTime time.Time, sp SynchronyParams, initialHeight int64) bool {
// lhs is `proposedBlockTime - Precision` in the first inequality
lhs := p.Timestamp.Add(-sp.Precision)
// rhs is `proposedBlockTime + MsgDelay + Precision` in the second inequality
rhs := p.Timestamp.Add(sp.MessageDelay).Add(sp.Precision)
if recvTime.Before(lhs) || (p.Height != initialHeight && recvTime.After(rhs)) {
return false
}
return true
}
// String returns a string representation of the Proposal.
//
// 1. height

View File

@@ -13,6 +13,7 @@ import (
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/libs/protoio"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmtime "github.com/tendermint/tendermint/libs/time"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
)
@@ -62,7 +63,7 @@ func TestProposalVerifySignature(t *testing.T) {
prop := NewProposal(
4, 2, 2,
BlockID{tmrand.Bytes(tmhash.Size), PartSetHeader{777, tmrand.Bytes(tmhash.Size)}})
BlockID{tmrand.Bytes(tmhash.Size), PartSetHeader{777, tmrand.Bytes(tmhash.Size)}}, tmtime.Now())
p := prop.ToProto()
signBytes := ProposalSignBytes("test_chain_id", p)
@@ -153,7 +154,7 @@ func TestProposalValidateBasic(t *testing.T) {
t.Run(tc.testName, func(t *testing.T) {
prop := NewProposal(
4, 2, 2,
blockID)
blockID, tmtime.Now())
p := prop.ToProto()
err := privVal.SignProposal(context.Background(), "test_chain_id", p)
prop.Signature = p.Signature
@@ -165,9 +166,9 @@ func TestProposalValidateBasic(t *testing.T) {
}
func TestProposalProtoBuf(t *testing.T) {
proposal := NewProposal(1, 2, 3, makeBlockID([]byte("hash"), 2, []byte("part_set_hash")))
proposal := NewProposal(1, 2, 3, makeBlockID([]byte("hash"), 2, []byte("part_set_hash")), tmtime.Now())
proposal.Signature = []byte("sig")
proposal2 := NewProposal(1, 2, 3, BlockID{})
proposal2 := NewProposal(1, 2, 3, BlockID{}, tmtime.Now())
testCases := []struct {
msg string
@@ -191,3 +192,99 @@ func TestProposalProtoBuf(t *testing.T) {
}
}
}
func TestIsTimely(t *testing.T) {
genesisTime, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z")
require.NoError(t, err)
testCases := []struct {
name string
genesisHeight int64
proposalHeight int64
proposalTime time.Time
recvTime time.Time
precision time.Duration
msgDelay time.Duration
expectTimely bool
}{
// proposalTime - precision <= localTime <= proposalTime + msgDelay + precision
{
// Checking that the following inequality evaluates to true:
// 0 - 2 <= 1 <= 0 + 1 + 2
name: "basic timely",
genesisHeight: 1,
proposalHeight: 2,
proposalTime: genesisTime,
recvTime: genesisTime.Add(1 * time.Nanosecond),
precision: time.Nanosecond * 2,
msgDelay: time.Nanosecond,
expectTimely: true,
},
{
// Checking that the following inequality evaluates to false:
// 0 - 2 <= 4 <= 0 + 1 + 2
name: "local time too large",
genesisHeight: 1,
proposalHeight: 2,
proposalTime: genesisTime,
recvTime: genesisTime.Add(4 * time.Nanosecond),
precision: time.Nanosecond * 2,
msgDelay: time.Nanosecond,
expectTimely: false,
},
{
// Checking that the following inequality evaluates to false:
// 4 - 2 <= 0 <= 4 + 2 + 1
name: "proposal time too large",
genesisHeight: 1,
proposalHeight: 2,
proposalTime: genesisTime.Add(4 * time.Nanosecond),
recvTime: genesisTime,
precision: time.Nanosecond * 2,
msgDelay: time.Nanosecond,
expectTimely: false,
},
{
// Checking that the following inequality evaluates to true:
// 0 - 2 <= 4
// and the following check is skipped
// 4 <= 0 + 1 + 2
name: "local time too large but proposal is for genesis",
genesisHeight: 1,
proposalHeight: 1,
proposalTime: genesisTime,
recvTime: genesisTime.Add(4 * time.Nanosecond),
precision: time.Nanosecond * 2,
msgDelay: time.Nanosecond,
expectTimely: true,
},
{
// Checking that the following inequality evaluates to false:
// 4 - 2 <= 0
name: "proposal time too large for genesis block proposal",
genesisHeight: 1,
proposalHeight: 1,
proposalTime: genesisTime.Add(4 * time.Nanosecond),
recvTime: genesisTime,
precision: time.Nanosecond * 2,
msgDelay: time.Nanosecond,
expectTimely: false,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
p := Proposal{
Height: testCase.proposalHeight,
Timestamp: testCase.proposalTime,
}
sp := SynchronyParams{
Precision: testCase.precision,
MessageDelay: testCase.msgDelay,
}
ti := p.IsTimely(testCase.recvTime, sp, testCase.genesisHeight)
assert.Equal(t, testCase.expectTimely, ti)
})
}
}

View File

@@ -68,7 +68,7 @@ func (vote *Vote) CommitSig() CommitSig {
switch {
case vote.BlockID.IsComplete():
blockIDFlag = BlockIDFlagCommit
case vote.BlockID.IsZero():
case vote.BlockID.IsNil():
blockIDFlag = BlockIDFlagNil
default:
panic(fmt.Sprintf("Invalid vote %v - expected BlockID to be either empty or complete", vote))
@@ -177,7 +177,7 @@ func (vote *Vote) ValidateBasic() error {
// BlockID.ValidateBasic would not err if we for instance have an empty hash but a
// non-empty PartsSetHeader:
if !vote.BlockID.IsZero() && !vote.BlockID.IsComplete() {
if !vote.BlockID.IsNil() && !vote.BlockID.IsComplete() {
return fmt.Errorf("blockID must be either empty or complete, got: %v", vote.BlockID)
}

View File

@@ -27,7 +27,7 @@ func TestVoteSet_AddVote_Good(t *testing.T) {
assert.Nil(t, voteSet.GetByAddress(val0Addr))
assert.False(t, voteSet.BitArray().GetIndex(0))
blockID, ok := voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority")
assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority")
vote := &Vote{
ValidatorAddress: val0Addr,
@@ -44,7 +44,7 @@ func TestVoteSet_AddVote_Good(t *testing.T) {
assert.NotNil(t, voteSet.GetByAddress(val0Addr))
assert.True(t, voteSet.BitArray().GetIndex(0))
blockID, ok = voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority")
assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority")
}
func TestVoteSet_AddVote_Bad(t *testing.T) {
@@ -145,7 +145,7 @@ func TestVoteSet_2_3Majority(t *testing.T) {
require.NoError(t, err)
}
blockID, ok := voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority")
assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority")
// 7th validator voted for some blockhash
{
@@ -156,7 +156,7 @@ func TestVoteSet_2_3Majority(t *testing.T) {
_, err = signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet)
require.NoError(t, err)
blockID, ok = voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority")
assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority")
}
// 8th validator voted for nil.
@@ -168,7 +168,7 @@ func TestVoteSet_2_3Majority(t *testing.T) {
_, err = signAddVote(privValidators[7], vote, voteSet)
require.NoError(t, err)
blockID, ok = voteSet.TwoThirdsMajority()
assert.True(t, ok || blockID.IsZero(), "there should be 2/3 majority for nil")
assert.True(t, ok || blockID.IsNil(), "there should be 2/3 majority for nil")
}
}
@@ -200,7 +200,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) {
require.NoError(t, err)
}
blockID, ok := voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(),
assert.False(t, ok || !blockID.IsNil(),
"there should be no 2/3 majority")
// 67th validator voted for nil
@@ -212,7 +212,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) {
_, err = signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet)
require.NoError(t, err)
blockID, ok = voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(),
assert.False(t, ok || !blockID.IsNil(),
"there should be no 2/3 majority: last vote added was nil")
}
@@ -226,7 +226,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) {
_, err = signAddVote(privValidators[67], withBlockPartSetHeader(vote, blockPartsHeader), voteSet)
require.NoError(t, err)
blockID, ok = voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(),
assert.False(t, ok || !blockID.IsNil(),
"there should be no 2/3 majority: last vote added had different PartSetHeader Hash")
}
@@ -240,7 +240,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) {
_, err = signAddVote(privValidators[68], withBlockPartSetHeader(vote, blockPartsHeader), voteSet)
require.NoError(t, err)
blockID, ok = voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(),
assert.False(t, ok || !blockID.IsNil(),
"there should be no 2/3 majority: last vote added had different PartSetHeader Total")
}
@@ -253,7 +253,7 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) {
_, err = signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet)
require.NoError(t, err)
blockID, ok = voteSet.TwoThirdsMajority()
assert.False(t, ok || !blockID.IsZero(),
assert.False(t, ok || !blockID.IsNil(),
"there should be no 2/3 majority: last vote added had different BlockHash")
}