mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-09 21:40:11 +00:00
Merge latest changes from master and resolve conflicts
Signed-off-by: Thane Thomson <connect@thanethomson.com>
This commit is contained in:
16
.github/workflows/lint.yml
vendored
16
.github/workflows/lint.yml
vendored
@@ -1,7 +1,11 @@
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to master.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
@@ -25,8 +29,10 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3.1.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.44
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.45
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
2
go.mod
2
go.mod
@@ -41,7 +41,7 @@ require (
|
||||
github.com/creachadair/atomicfile v0.2.4
|
||||
github.com/golangci/golangci-lint v1.45.2
|
||||
github.com/google/go-cmp v0.5.7
|
||||
github.com/vektra/mockery/v2 v2.10.0
|
||||
github.com/vektra/mockery/v2 v2.10.1
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
|
||||
4
go.sum
4
go.sum
@@ -1035,8 +1035,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vektra/mockery/v2 v2.10.0 h1:MiiQWxwdq7/ET6dCXLaJzSGEN17k758H7JHS9kOdiks=
|
||||
github.com/vektra/mockery/v2 v2.10.0/go.mod h1:m/WO2UzWzqgVX3nvqpRQq70I4Z7jbSCRhdmkgtp+Ab4=
|
||||
github.com/vektra/mockery/v2 v2.10.1 h1:EOsWLFVlkUJlNurdO/w1NBFbFE1vbemJJtaG3Bo6H/M=
|
||||
github.com/vektra/mockery/v2 v2.10.1/go.mod h1:m/WO2UzWzqgVX3nvqpRQq70I4Z7jbSCRhdmkgtp+Ab4=
|
||||
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
|
||||
@@ -145,6 +145,7 @@ func (rts *reactorTestSuite) addNode(
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventbus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
cs, err := NewState(ctx, logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
|
||||
require.NoError(t, err)
|
||||
// set private validator
|
||||
|
||||
@@ -488,7 +488,7 @@ func newStateWithConfigAndBlockStore(
|
||||
eventBus := eventbus.NewDefault(logger.With("module", "events"))
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
cs, err := NewState(ctx,
|
||||
logger.With("module", "consensus"),
|
||||
thisConfig.Consensus,
|
||||
|
||||
@@ -504,7 +504,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
eventBus := eventbus.NewDefault(log.NewNopLogger().With("module", "events"))
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
|
||||
cs, err := NewState(ctx, logger.With("validator", i, "module", "consensus"),
|
||||
thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool2, eventBus)
|
||||
|
||||
@@ -484,7 +484,7 @@ func (h *Handshaker) replayBlocks(
|
||||
if i == finalBlock && !mutateState {
|
||||
// We emit events for the index services at the final block due to the sync issue when
|
||||
// the node shutdown during the block committing status.
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus)
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus, sm.NopMetrics())
|
||||
appHash, err = sm.ExecCommitBlock(ctx,
|
||||
blockExec, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
|
||||
if err != nil {
|
||||
@@ -526,7 +526,7 @@ func (h *Handshaker) replayBlock(
|
||||
|
||||
// Use stubs for both mempool and evidence pool since no transactions nor
|
||||
// evidence are needed here - block already exists.
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus)
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus, sm.NopMetrics())
|
||||
|
||||
var err error
|
||||
state, err = blockExec.ApplyBlock(ctx, state, meta.BlockID, block)
|
||||
|
||||
@@ -348,7 +348,7 @@ func newConsensusStateForReplay(
|
||||
}
|
||||
|
||||
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mempool, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
|
||||
consensusState, err := NewState(ctx, logger, csConfig, stateStore, blockExec,
|
||||
blockStore, mempool, evpool, eventBus)
|
||||
|
||||
@@ -826,7 +826,7 @@ func applyBlock(
|
||||
eventBus *eventbus.EventBus,
|
||||
) sm.State {
|
||||
testPartSize := types.BlockPartSizeBytes
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mempool, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
|
||||
bps, err := blk.MakePartSet(testPartSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -80,7 +80,7 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
|
||||
|
||||
mempool := emptyMempool{}
|
||||
evpool := sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp, mempool, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
consensusState, err := NewState(ctx, logger, cfg.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -49,14 +49,6 @@ type BlockExecutor struct {
|
||||
cache map[string]struct{}
|
||||
}
|
||||
|
||||
type BlockExecutorOption func(executor *BlockExecutor)
|
||||
|
||||
func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption {
|
||||
return func(blockExec *BlockExecutor) {
|
||||
blockExec.metrics = metrics
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
|
||||
// Call SetEventBus to provide one.
|
||||
func NewBlockExecutor(
|
||||
@@ -67,25 +59,19 @@ func NewBlockExecutor(
|
||||
evpool EvidencePool,
|
||||
blockStore BlockStore,
|
||||
eventBus *eventbus.EventBus,
|
||||
options ...BlockExecutorOption,
|
||||
metrics *Metrics,
|
||||
) *BlockExecutor {
|
||||
res := &BlockExecutor{
|
||||
return &BlockExecutor{
|
||||
eventBus: eventBus,
|
||||
store: stateStore,
|
||||
appClient: appClient,
|
||||
mempool: pool,
|
||||
evpool: evpool,
|
||||
logger: logger,
|
||||
metrics: NopMetrics(),
|
||||
metrics: metrics,
|
||||
cache: make(map[string]struct{}),
|
||||
blockStore: blockStore,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(res)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (blockExec *BlockExecutor) Store() Store {
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestApplyBlock(t *testing.T) {
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, eventBus, sm.NopMetrics())
|
||||
|
||||
block := sf.MakeBlock(state, 1, new(types.Commit))
|
||||
bps, err := block.MakePartSet(testPartSize)
|
||||
@@ -128,7 +128,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) {
|
||||
eventBus := eventbus.NewDefault(logger)
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mp, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mp, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
state, _, lastCommit := makeAndCommitGoodBlock(ctx, t, state, 1, new(types.Commit), state.NextValidators.Validators[0].Address, blockExec, privVals, nil)
|
||||
|
||||
for idx, isAbsent := range tc.absentCommitSigs {
|
||||
@@ -252,8 +252,7 @@ func TestFinalizeBlockByzantineValidators(t *testing.T) {
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp,
|
||||
mp, evpool, blockStore, eventBus)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp, mp, evpool, blockStore, eventBus, sm.NopMetrics())
|
||||
|
||||
block := sf.MakeBlock(state, 1, new(types.Commit))
|
||||
block.Evidence = ev
|
||||
@@ -298,6 +297,7 @@ func TestProcessProposal(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
block0 := sf.MakeBlock(state, height-1, new(types.Commit))
|
||||
@@ -515,6 +515,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
updatesSub, err := eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
|
||||
@@ -585,6 +586,7 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
block := sf.MakeBlock(state, 1, new(types.Commit))
|
||||
@@ -646,6 +648,7 @@ func TestEmptyPrepareProposal(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
nil,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
@@ -700,6 +703,7 @@ func TestPrepareProposalPanicOnInvalid(t *testing.T) {
|
||||
evpool,
|
||||
nil,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
@@ -757,6 +761,7 @@ func TestPrepareProposalRemoveTxs(t *testing.T) {
|
||||
evpool,
|
||||
nil,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
@@ -816,6 +821,7 @@ func TestPrepareProposalAddedTxsIncluded(t *testing.T) {
|
||||
evpool,
|
||||
nil,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
@@ -872,6 +878,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) {
|
||||
evpool,
|
||||
nil,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
@@ -935,6 +942,7 @@ func TestPrepareProposalModifiedTxStatusFalse(t *testing.T) {
|
||||
evpool,
|
||||
nil,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
pa, _ := state.Validators.GetByIndex(0)
|
||||
commit, votes := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals)
|
||||
|
||||
@@ -63,6 +63,7 @@ func TestValidateBlockHeader(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
|
||||
@@ -166,6 +167,7 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil)
|
||||
@@ -315,6 +317,7 @@ func TestValidateBlockEvidence(t *testing.T) {
|
||||
evpool,
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
|
||||
|
||||
@@ -51,7 +51,6 @@ type Client struct {
|
||||
keyPathFn KeyPathFunc
|
||||
|
||||
closers []func()
|
||||
quitCh chan struct{}
|
||||
}
|
||||
|
||||
var _ rpcclient.Client = (*Client)(nil)
|
||||
@@ -92,10 +91,9 @@ func DefaultMerkleKeyPathFn() KeyPathFunc {
|
||||
// NewClient returns a new client.
|
||||
func NewClient(logger log.Logger, next rpcclient.Client, lc LightClient, opts ...Option) *Client {
|
||||
c := &Client{
|
||||
next: next,
|
||||
lc: lc,
|
||||
prt: merkle.DefaultProofRuntime(),
|
||||
quitCh: make(chan struct{}),
|
||||
next: next,
|
||||
lc: lc,
|
||||
prt: merkle.DefaultProofRuntime(),
|
||||
}
|
||||
c.BaseService = *service.NewBaseService(logger, "Client", c)
|
||||
for _, o := range opts {
|
||||
@@ -111,10 +109,6 @@ func (c *Client) OnStart(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
c.closers = append(c.closers, ncancel)
|
||||
go func() {
|
||||
defer close(c.quitCh)
|
||||
c.Wait()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ func makeNode(
|
||||
evPool,
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.BlockExecutorWithMetrics(nodeMetrics.state),
|
||||
nodeMetrics.state,
|
||||
)
|
||||
|
||||
// Determine whether we should do block sync. This must happen after the handshake, since the
|
||||
|
||||
@@ -333,6 +333,7 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
evidencePool,
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
commit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
@@ -412,6 +413,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
commit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
@@ -487,6 +489,7 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventBus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
blockID := types.BlockID{
|
||||
|
||||
@@ -92,7 +92,7 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) {
|
||||
snapshot := abci.Snapshot{
|
||||
Height: state.Height,
|
||||
Format: 1,
|
||||
Hash: hashItems(state.Values),
|
||||
Hash: hashItems(state.Values, state.Height),
|
||||
Chunks: byteChunks(bz),
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644)
|
||||
|
||||
@@ -3,6 +3,7 @@ package app
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -38,7 +39,7 @@ func NewState(dir string, persistInterval uint64) (*State, error) {
|
||||
previousFile: filepath.Join(dir, prevStateFileName),
|
||||
persistInterval: persistInterval,
|
||||
}
|
||||
state.Hash = hashItems(state.Values)
|
||||
state.Hash = hashItems(state.Values, state.Height)
|
||||
err := state.load()
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
@@ -114,7 +115,7 @@ func (s *State) Import(height uint64, jsonBytes []byte) error {
|
||||
}
|
||||
s.Height = height
|
||||
s.Values = values
|
||||
s.Hash = hashItems(values)
|
||||
s.Hash = hashItems(values, height)
|
||||
return s.save()
|
||||
}
|
||||
|
||||
@@ -140,7 +141,6 @@ func (s *State) Set(key, value string) {
|
||||
func (s *State) Commit() (uint64, []byte, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.Hash = hashItems(s.Values)
|
||||
switch {
|
||||
case s.Height > 0:
|
||||
s.Height++
|
||||
@@ -149,6 +149,7 @@ func (s *State) Commit() (uint64, []byte, error) {
|
||||
default:
|
||||
s.Height = 1
|
||||
}
|
||||
s.Hash = hashItems(s.Values, s.Height)
|
||||
if s.persistInterval > 0 && s.Height%s.persistInterval == 0 {
|
||||
err := s.save()
|
||||
if err != nil {
|
||||
@@ -171,7 +172,7 @@ func (s *State) Rollback() error {
|
||||
}
|
||||
|
||||
// hashItems hashes a set of key/value items.
|
||||
func hashItems(items map[string]string) []byte {
|
||||
func hashItems(items map[string]string, height uint64) []byte {
|
||||
keys := make([]string, 0, len(items))
|
||||
for key := range items {
|
||||
keys = append(keys, key)
|
||||
@@ -179,6 +180,9 @@ func hashItems(items map[string]string) []byte {
|
||||
sort.Strings(keys)
|
||||
|
||||
hasher := sha256.New()
|
||||
var b [8]byte
|
||||
binary.BigEndian.PutUint64(b[:], height)
|
||||
_, _ = hasher.Write(b[:])
|
||||
for _, key := range keys {
|
||||
_, _ = hasher.Write([]byte(key))
|
||||
_, _ = hasher.Write([]byte{0})
|
||||
|
||||
@@ -51,20 +51,23 @@ func TestApp_Hash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash")
|
||||
|
||||
status, err := client.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, status.SyncInfo.LatestBlockHeight)
|
||||
// In next-block execution, the app hash is stored in the next block
|
||||
blockHeight := info.Response.LastBlockHeight + 1
|
||||
|
||||
block, err := client.Block(ctx, &info.Response.LastBlockHeight)
|
||||
require.Eventually(t, func() bool {
|
||||
status, err := client.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, status.SyncInfo.LatestBlockHeight)
|
||||
return status.SyncInfo.LatestBlockHeight >= blockHeight
|
||||
}, 60*time.Second, 500*time.Millisecond)
|
||||
|
||||
block, err := client.Block(ctx, &blockHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, info.Response.LastBlockHeight, block.Block.Height)
|
||||
require.Equal(t, blockHeight, block.Block.Height)
|
||||
require.Equal(t,
|
||||
fmt.Sprintf("%x", info.Response.LastBlockAppHash),
|
||||
fmt.Sprintf("%x", block.Block.AppHash.Bytes()),
|
||||
fmt.Sprintf("app hash does not match last block's app hash at height %d", block.Block.Height))
|
||||
|
||||
require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight,
|
||||
"status out of sync with application")
|
||||
"app hash does not match last block's app hash")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user