Merge branch 'cal/finalize-block' into cal/vote-extensions-1

This commit is contained in:
Callum Waters
2022-11-25 10:37:44 +01:00
56 changed files with 978 additions and 382 deletions

View File

@@ -125,6 +125,7 @@ func (cli *grpcClient) OnStop() {
if cli.conn != nil {
cli.conn.Close()
}
close(cli.chReqRes)
}
func (cli *grpcClient) StopForError(err error) {
@@ -147,7 +148,6 @@ func (cli *grpcClient) StopForError(err error) {
func (cli *grpcClient) Error() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
@@ -181,7 +181,10 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response)
//----------------------------------------
func (cli *grpcClient) Flush(ctx context.Context) error { return nil }
func (cli *grpcClient) Flush(ctx context.Context) error {
_, err := cli.client.Flush(ctx, types.ToRequestFlush().GetFlush(), grpc.WaitForReady(true))
return err
}
func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true))

View File

@@ -0,0 +1,80 @@
package abcicli_test
import (
"fmt"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"golang.org/x/net/context"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
)
func TestGRPC(t *testing.T) {
app := types.NewBaseApplication()
numCheckTxs := 2000
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
// Start the listener
server := abciserver.NewGRPCServer(socket, app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
err := server.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := server.Stop(); err != nil {
t.Error(err)
}
})
// Connect to the socket
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
require.NoError(t, err)
t.Cleanup(func() {
if err := conn.Close(); err != nil {
t.Error(err)
}
})
client := types.NewABCIClient(conn)
// Write requests
for counter := 0; counter < numCheckTxs; counter++ {
// Send request
response, err := client.CheckTx(context.Background(), &types.RequestCheckTx{Tx: []byte("test")})
require.NoError(t, err)
counter++
if response.Code != 0 {
t.Error("CheckTx failed with ret_code", response.Code)
}
if counter > numCheckTxs {
t.Fatal("Too many CheckTx responses")
}
t.Log("response", counter)
if counter == numCheckTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
}
}
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
return tmnet.Connect(addr)
}

View File

@@ -14,10 +14,12 @@ import (
"github.com/tendermint/tendermint/abci/types"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/libs/timer"
)
const (
reqQueueSize = 256 // TODO make configurable
reqQueueSize = 256 // TODO make configurable
flushThrottleMS = 20 // Don't wait longer than...
)
// socketClient is the client side implementation of the Tendermint
@@ -26,8 +28,6 @@ const (
//
// This is goroutine-safe. All calls are serialized to the server through an unbuffered queue. The socketClient
// tracks responses and expects them to respect the order of the requests sent.
//
// The buffer is flushed after every message sent.
type socketClient struct {
service.BaseService
@@ -35,7 +35,8 @@ type socketClient struct {
mustConnect bool
conn net.Conn
reqQueue chan *ReqRes
reqQueue chan *ReqRes
flushTimer *timer.ThrottleTimer
mtx sync.Mutex
err error
@@ -51,10 +52,12 @@ var _ Client = (*socketClient)(nil)
func NewSocketClient(addr string, mustConnect bool) Client {
cli := &socketClient{
reqQueue: make(chan *ReqRes, reqQueueSize),
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
mustConnect: mustConnect,
addr: addr,
reqSent: list.New(),
resCb: nil,
addr: addr,
reqSent: list.New(),
resCb: nil,
}
cli.BaseService = *service.NewBaseService(nil, "socketClient", cli)
return cli
@@ -95,6 +98,7 @@ func (cli *socketClient) OnStop() {
}
cli.flushQueue()
cli.flushTimer.Stop()
}
// Error returns an error if the client was stopped abruptly.
@@ -123,26 +127,37 @@ func (cli *socketClient) CheckTxAsync(ctx context.Context, req *types.RequestChe
//----------------------------------------
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
bw := bufio.NewWriter(conn)
w := bufio.NewWriter(conn)
for {
select {
case <-cli.Quit():
return
case reqres := <-cli.reqQueue:
// N.B. We must enqueue before sending out the request, otherwise the
// server may reply before we do it, and the receiver will fail for an
// unsolicited reply.
cli.trackRequest(reqres)
if err := types.WriteMessage(reqres.Request, bw); err != nil {
err := types.WriteMessage(reqres.Request, w)
if err != nil {
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
return
}
if err := bw.Flush(); err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
// If it's a flush request, flush the current buffer.
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
err = w.Flush()
if err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
}
}
case <-cli.flushTimer.Ch: // flush queue
select {
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
default:
// Probably will fill the buffer, or retry later.
}
case <-cli.Quit():
return
}
}
}
@@ -155,8 +170,8 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
}
var res = &types.Response{}
if err := types.ReadMessage(r, res); err != nil {
err := types.ReadMessage(r, res)
if err != nil {
cli.stopForError(fmt.Errorf("read message: %w", err))
return
}
@@ -221,6 +236,182 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
return nil
}
//----------------------------------------
func (cli *socketClient) Flush(ctx context.Context) error {
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush())
if err != nil {
return err
}
reqRes.Wait()
return nil
}
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestEcho(msg))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetEcho(), cli.Error()
}
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestInfo(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetInfo(), cli.Error()
}
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestCheckTx(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetCheckTx(), cli.Error()
}
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestQuery(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetQuery(), cli.Error()
}
func (cli *socketClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit())
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetCommit(), cli.Error()
}
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestInitChain(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetInitChain(), cli.Error()
}
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestListSnapshots(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetListSnapshots(), cli.Error()
}
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetOfferSnapshot(), cli.Error()
}
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetLoadSnapshotChunk(), cli.Error()
}
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetApplySnapshotChunk(), cli.Error()
}
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestPrepareProposal(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetPrepareProposal(), cli.Error()
}
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestProcessProposal(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetProcessProposal(), cli.Error()
}
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestExtendVote(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetExtendVote(), nil
}
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestVerifyVoteExtension(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetVerifyVoteExtension(), nil
}
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqRes.Response.GetFinalizeBlock(), cli.Error()
}
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (*ReqRes, error) {
reqres := NewReqRes(req)
@@ -231,6 +422,14 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (
return nil, ctx.Err()
}
// Maybe auto-flush, or unset auto-flush
switch req.Value.(type) {
case *types.Request_Flush:
cli.flushTimer.Unset()
default:
cli.flushTimer.Set()
}
return reqres, nil
}
@@ -260,151 +459,6 @@ LOOP:
//----------------------------------------
func (cli *socketClient) Flush(ctx context.Context) error {
_, err := cli.queueRequest(ctx, types.ToRequestFlush())
if err != nil {
return err
}
return nil
}
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestEcho(msg))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetEcho(), cli.Error()
}
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestInfo(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetInfo(), cli.Error()
}
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestCheckTx(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetCheckTx(), cli.Error()
}
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestQuery(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetQuery(), cli.Error()
}
func (cli *socketClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit())
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetCommit(), cli.Error()
}
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestInitChain(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetInitChain(), cli.Error()
}
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestListSnapshots(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetListSnapshots(), cli.Error()
}
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetOfferSnapshot(), cli.Error()
}
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetLoadSnapshotChunk(), cli.Error()
}
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetApplySnapshotChunk(), cli.Error()
}
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestPrepareProposal(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetPrepareProposal(), cli.Error()
}
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestProcessProposal(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetProcessProposal(), cli.Error()
}
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestExtendVote(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetExtendVote(), nil
}
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestVerifyVoteExtension(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetVerifyVoteExtension(), nil
}
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqRes, err := cli.queueRequest(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
reqRes.Wait()
return reqRes.Response.GetFinalizeBlock(), cli.Error()
}
//----------------------------------------
func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
switch req.Value.(type) {
case *types.Request_Echo:

View File

@@ -3,6 +3,8 @@ package abcicli_test
import (
"context"
"fmt"
"math/rand"
"os"
"sync"
"testing"
"time"
@@ -33,7 +35,7 @@ func TestCalls(t *testing.T) {
}()
select {
case <-time.After(1 * time.Second):
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
@@ -41,6 +43,84 @@ func TestCalls(t *testing.T) {
}
}
func TestHangingAsyncCalls(t *testing.T) {
app := slowApp{}
s, c := setupClientServer(t, app)
resp := make(chan error, 1)
go func() {
// Call CheckTx
reqres, err := c.CheckTxAsync(context.Background(), &types.RequestCheckTx{})
require.NoError(t, err)
// wait 50 ms for all events to travel socket, but
// no response yet from server
time.Sleep(50 * time.Millisecond)
// kill the server, so the connections break
err = s.Stop()
require.NoError(t, err)
// wait for the response from CheckTx
reqres.Wait()
resp <- c.Error()
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.Error(t, err, "We should get EOF error")
}
}
func TestBulk(t *testing.T) {
const numTxs = 700000
// use a socket instead of a port
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
app := types.NewBaseApplication()
// Start the listener
server := server.NewSocketServer(socket, app)
t.Cleanup(func() {
if err := server.Stop(); err != nil {
t.Log(err)
}
})
err := server.Start()
require.NoError(t, err)
// Connect to the socket
client := abcicli.NewSocketClient(socket, false)
t.Cleanup(func() {
if err := client.Stop(); err != nil {
t.Log(err)
}
})
err = client.Start()
require.NoError(t, err)
// Construct request
rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numTxs)}
for counter := 0; counter < numTxs; counter++ {
rfb.Txs[counter] = []byte("test")
}
// Send bulk request
res, err := client.FinalizeBlock(context.Background(), rfb)
require.NoError(t, err)
require.Equal(t, numTxs, len(res.TxResults), "Number of txs doesn't match")
for _, tx := range res.TxResults {
require.Equal(t, uint32(0), tx.Code, "Tx failed")
}
// Send final flush message
err = client.Flush(context.Background())
require.NoError(t, err)
}
func setupClientServer(t *testing.T, app types.Application) (
service.Service, abcicli.Client) {
t.Helper()
@@ -55,7 +135,7 @@ func setupClientServer(t *testing.T, app types.Application) (
t.Cleanup(func() {
if err := s.Stop(); err != nil {
t.Error(err)
t.Log(err)
}
})
@@ -65,13 +145,22 @@ func setupClientServer(t *testing.T, app types.Application) (
t.Cleanup(func() {
if err := c.Stop(); err != nil {
t.Error(err)
t.Log(err)
}
})
return s, c
}
type slowApp struct {
types.BaseApplication
}
func (slowApp) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
time.Sleep(time.Second)
return &types.ResponseCheckTx{}, nil
}
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
// set after the client completes the call into the app. Currently this
// test relies on the callback being allowed to be invoked twice if set multiple

View File

@@ -167,10 +167,9 @@ where example.file looks something like:
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
finalize_block 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
finalize_block 0x01 0x04 0xff
info
`,
Args: cobra.ExactArgs(0),
@@ -186,7 +185,7 @@ This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
RunE: cmdConsole,
}

View File

@@ -61,7 +61,7 @@ func NewPersistentApplication(dbDir string) *Application {
name := "kvstore"
db, err := dbm.NewGoLevelDB(name, dbDir)
if err != nil {
panic(err)
panic(fmt.Errorf("failed to create persistent app at %s: %w", dbDir, err))
}
return NewApplication(db)
}
@@ -77,9 +77,6 @@ func NewInMemoryApplication() *Application {
// Tendermint will ensure it is in sync with the application by potentially replaying the blocks it has. If the
// Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data
func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
// Tendermint expects the application to persist validators, on start-up we need to reload them to memory if they exist
if len(app.valAddrToPubKeyMap) == 0 && app.state.Height > 0 {
validators := app.getValidators()
@@ -97,7 +94,7 @@ func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.
Version: version.ABCIVersion,
AppVersion: AppVersion,
LastBlockHeight: app.state.Height,
LastBlockAppHash: appHash,
LastBlockAppHash: app.state.Hash(),
}, nil
}

View File

@@ -183,6 +183,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
}()
for {
var req = &types.Request{}
err := types.ReadMessage(bufReader, req)
if err != nil {
@@ -315,11 +316,12 @@ func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, res
closeConn <- fmt.Errorf("error writing message: %w", err)
return
}
err = bufWriter.Flush()
if err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
if _, ok := res.Value.(*types.Response_Flush); ok {
err = bufWriter.Flush()
if err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
}
}
// If the application has responded with an exception, the server returns the error

View File

@@ -74,7 +74,7 @@ func newReactor(
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
blockStore := store.NewBlockStore(blockDB)
@@ -101,7 +101,7 @@ func newReactor(
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mp, sm.EmptyEvidencePool{}, blockStore)

View File

@@ -41,7 +41,7 @@ reindex from the base block height(inclusive); and the default end-height is 0,
the tooling will reindex until the latest block height(inclusive). User can omit
either or both arguments.
Note: This operation requires ABCI Responses. Do not set DiscardFinalizeBlockResponses to true if you
Note: This operation requires ABCI Responses. Do not set DiscardABCIResponses to true if you
want to use this command.
`,
Example: `

View File

@@ -90,7 +90,7 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
return nil, nil, err
}
stateStore := state.NewStore(stateDB, state.StoreOptions{
DiscardFinalizeBlockResponses: config.Storage.DiscardFinalizeBlockResponses,
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
return blockStore, stateStore, nil

View File

@@ -1120,14 +1120,14 @@ type StorageConfig struct {
// Set to false to ensure ABCI responses are persisted. ABCI responses are
// required for `/block_results` RPC queries, and to reindex events in the
// command-line tool.
DiscardFinalizeBlockResponses bool `mapstructure:"discard_abci_responses"`
DiscardABCIResponses bool `mapstructure:"discard_abci_responses"`
}
// DefaultStorageConfig returns the default configuration options relating to
// Tendermint storage optimization.
func DefaultStorageConfig() *StorageConfig {
return &StorageConfig{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
}
}
@@ -1135,7 +1135,7 @@ func DefaultStorageConfig() *StorageConfig {
// testing.
func TestStorageConfig() *StorageConfig {
return &StorageConfig{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
}
}

View File

@@ -514,7 +514,7 @@ peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# considerable amount of disk space. Set to false to ensure ABCI responses are
# persisted. ABCI responses are required for /block_results RPC queries, and to
# reindex events in the command-line tool.
discard_abci_responses = {{ .Storage.DiscardFinalizeBlockResponses}}
discard_abci_responses = {{ .Storage.DiscardABCIResponses}}
#######################################################
### Transaction Indexer Configuration Options ###

View File

@@ -53,7 +53,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
logger := consensusLogger().With("test", "byzantine", "validator", i)
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))

View File

@@ -434,7 +434,7 @@ func newStateWithConfigAndBlockStore(
// Make State
stateDB := blockDB
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
@@ -759,7 +759,7 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
@@ -801,7 +801,7 @@ func randConsensusNetWithPeers(
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
t.Cleanup(func() { _ = stateStore.Close() })
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)

View File

@@ -2,7 +2,6 @@ package consensus
import (
"context"
"encoding/binary"
"fmt"
"os"
"testing"
@@ -119,7 +118,7 @@ func deliverTxsRange(t *testing.T, cs *State, start, end int) {
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardFinalizeBlockResponses: false})
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], kvstore.NewInMemoryApplication(), blockDB)
err := stateStore.Save(state)
require.NoError(t, err)
@@ -144,29 +143,31 @@ func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := kvstore.NewInMemoryApplication()
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardFinalizeBlockResponses: false})
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
err := stateStore.Save(state)
require.NoError(t, err)
// increment the counter by 1
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
res, err := app.FinalizeBlock(context.Background(), &abci.RequestFinalizeBlock{Txs: [][]byte{kvstore.NewTx("key", "value")}})
txBytes := kvstore.NewTx("key", "value")
res, err := app.FinalizeBlock(context.Background(), &abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
require.NoError(t, err)
assert.False(t, res.TxResults[0].IsErr())
assert.True(t, len(res.AgreedAppData) > 0)
_, err = app.Commit(context.Background(), &abci.RequestCommit{})
require.NoError(t, err)
emptyMempoolCh := make(chan struct{})
checkTxRespCh := make(chan struct{})
go func() {
// Try to send the tx through the mempool.
// CheckTx should not err, but the app should return a bad abci code
// and the tx should get removed from the pool
err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.ResponseCheckTx) {
invalidTx := []byte("invalidTx")
err := assertMempool(cs.txNotifier).CheckTx(invalidTx, func(r *abci.ResponseCheckTx) {
if r.Code != kvstore.CodeTypeInvalidTxFormat {
t.Errorf("expected checktx to return bad nonce, got %v", r)
t.Errorf("expected checktx to return invalid format, got %v", r)
return
}
checkTxRespCh <- struct{}{}
@@ -178,7 +179,7 @@ func TestMempoolRmBadTx(t *testing.T) {
// check for the tx
for {
txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(invalidTx)), -1)
if len(txs) == 0 {
emptyMempoolCh <- struct{}{}
return

View File

@@ -139,7 +139,7 @@ func TestReactorWithEvidence(t *testing.T) {
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))

View File

@@ -266,7 +266,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
}
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
appHash, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
if err != nil {
return fmt.Errorf("error on replay: %v", err)
}
@@ -423,6 +423,13 @@ func (h *Handshaker) ReplayBlocks(
if err != nil {
return nil, err
}
// NOTE: There is a rare edge case where a node has upgraded from
// v0.37 with endblock to v0.38 with finalize block and thus
// does not have the app hash saved from the previous height
// here we take the appHash provided from the Info handshake
if len(finalizeBlockResponse.AgreedAppData) == 0 {
finalizeBlockResponse.AgreedAppData = appHash
}
mockApp := newMockProxyApp(finalizeBlockResponse)
h.logger.Info("Replay last block using mock app")
state, err = h.replayBlock(state, storeBlockHeight, mockApp)

View File

@@ -298,7 +298,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
tmos.Exit(err.Error())
}
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
if err != nil {

View File

@@ -8,6 +8,7 @@ import (
"os"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
@@ -22,6 +23,7 @@ import (
"github.com/tendermint/tendermint/abci/types/mocks"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/test"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
@@ -30,6 +32,7 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
smmocks "github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
@@ -163,7 +166,7 @@ LOOP:
blockDB := dbm.NewMemDB()
stateDB := blockDB
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
@@ -303,36 +306,291 @@ const numBlocks = 6
// Test handshake/replay
// 0 - all synced up
// 1 - saved block but app and state are behind
// 2 - save block and committed but state is behind
// 3 - save block and committed with truncated block store and state behind
// 1 - saved block but app and state are behind by one height
// 2 - save block and committed (i.e. app got `Commit`) but state is behind
// 3 - same as 2 but with a truncated block store
var modes = []uint{0, 1, 2, 3}
// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay
func setupChainWithChangingValidators(t *testing.T, name string, nBlocks int) (*cfg.Config, []*types.Block, []*types.Commit, sm.State) {
nPeers := 7
nVals := 4
css, genDoc, config, cleanup := randConsensusNetWithPeers(
t,
nVals,
nPeers,
name,
newMockTickerFunc(true),
func(_ string) abci.Application {
return newKVStore()
})
genesisState, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
t.Cleanup(cleanup)
partSize := types.BlockPartSizeBytes
newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound)
proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal)
vss := make([]*validatorStub, nPeers)
for i := 0; i < nPeers; i++ {
vss[i] = newValidatorStub(css[i].privValidator, int32(i))
}
height, round := css[0].Height, css[0].Round
// start the machine
startTestRound(css[0], height, round)
incrementHeight(vss...)
ensureNewRound(newRoundCh, height, 0)
ensureNewProposal(proposalCh, height, round)
rs := css[0].GetRoundState()
signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 2
height++
incrementHeight(vss...)
newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
require.NoError(t, err)
valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
require.NoError(t, err)
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempool.TxInfo{})
assert.NoError(t, err)
propBlock, err := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
require.NoError(t, err)
propBlockParts, err := propBlock.MakePartSet(partSize)
require.NoError(t, err)
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
p := proposal.ToProto()
if err := vss[1].SignProposal(test.DefaultTestChainID, p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 3
height++
incrementHeight(vss...)
updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
require.NoError(t, err)
updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
require.NoError(t, err)
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempool.TxInfo{})
assert.NoError(t, err)
propBlock, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
require.NoError(t, err)
propBlockParts, err = propBlock.MakePartSet(partSize)
require.NoError(t, err)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
p = proposal.ToProto()
if err := vss[2].SignProposal(test.DefaultTestChainID, p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 4
height++
incrementHeight(vss...)
newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey()
require.NoError(t, err)
newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
require.NoError(t, err)
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempool.TxInfo{})
assert.Nil(t, err)
newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey()
require.NoError(t, err)
newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
require.NoError(t, err)
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempool.TxInfo{})
assert.NoError(t, err)
propBlock, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
require.NoError(t, err)
propBlockParts, err = propBlock.MakePartSet(partSize)
require.NoError(t, err)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
newVss := make([]*validatorStub, nVals+1)
copy(newVss, vss[:nVals+1])
sort.Sort(ValidatorStubsByPower(newVss))
valIndexFn := func(cssIdx int) int {
for i, vs := range newVss {
vsPubKey, err := vs.GetPubKey()
require.NoError(t, err)
cssPubKey, err := css[cssIdx].privValidator.GetPubKey()
require.NoError(t, err)
if vsPubKey.Equals(cssPubKey) {
return i
}
}
panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx))
}
selfIndex := valIndexFn(0)
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
p = proposal.ToProto()
if err := vss[3].SignProposal(test.DefaultTestChainID, p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil, mempool.TxInfo{})
assert.Nil(t, err)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 5
height++
incrementHeight(vss...)
// Reflect the changes to vss[nVals] at height 3 and resort newVss.
newVssIdx := valIndexFn(nVals)
newVss[newVssIdx].VotingPower = 25
sort.Sort(ValidatorStubsByPower(newVss))
selfIndex = valIndexFn(0)
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 6
height++
incrementHeight(vss...)
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempool.TxInfo{})
assert.NoError(t, err)
propBlock, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
require.NoError(t, err)
propBlockParts, err = propBlock.MakePartSet(partSize)
require.NoError(t, err)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
newVss = make([]*validatorStub, nVals+3)
copy(newVss, vss[:nVals+3])
sort.Sort(ValidatorStubsByPower(newVss))
selfIndex = valIndexFn(0)
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
p = proposal.ToProto()
if err := vss[1].SignProposal(test.DefaultTestChainID, p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+3; i++ {
if i == selfIndex {
continue
}
signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
chain := make([]*types.Block, 0)
commits := make([]*types.Commit, 0)
for i := 1; i <= nBlocks; i++ {
chain = append(chain, css[0].blockStore.LoadBlock(int64(i)))
commits = append(commits, css[0].blockStore.LoadBlockCommit(int64(i)))
}
return config, chain, commits, genesisState
}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, 0, m)
t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) {
testHandshakeReplay(t, config, 0, m, false)
})
t.Run(fmt.Sprintf("mode_%d_multi", m), func(t *testing.T) {
testHandshakeReplay(t, config, 0, m, false)
})
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, 2, m)
t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) {
testHandshakeReplay(t, config, 2, m, false)
})
t.Run(fmt.Sprintf("mode_%d_multi", m), func(t *testing.T) {
testHandshakeReplay(t, config, 2, m, true)
})
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks-1, m)
t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) {
testHandshakeReplay(t, config, numBlocks-1, m, false)
})
t.Run(fmt.Sprintf("mode_%d_multi", m), func(t *testing.T) {
testHandshakeReplay(t, config, numBlocks-1, m, true)
})
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks, m)
t.Run(fmt.Sprintf("mode_%d_single", m), func(t *testing.T) {
testHandshakeReplay(t, config, numBlocks, m, false)
})
t.Run(fmt.Sprintf("mode_%d_multi", m), func(t *testing.T) {
testHandshakeReplay(t, config, numBlocks, m, true)
})
}
}
@@ -353,8 +611,9 @@ func tempWALWithData(data []byte) string {
// Make some blocks. Start a fresh app and apply nBlocks blocks.
// Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
var (
testConfig *cfg.Config
chain []*types.Block
commits []*types.Commit
store *mockBlockStore
@@ -364,35 +623,41 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
evpool = sm.EmptyEvidencePool{}
)
testConfig := ResetConfig(fmt.Sprintf("%d_%d_s", nBlocks, mode))
t.Cleanup(func() {
_ = os.RemoveAll(testConfig.RootDir)
})
walBody, err := WALWithNBlocks(t, numBlocks)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
if testValidatorsChange {
testConfig, chain, commits, genesisState = setupChainWithChangingValidators(t, fmt.Sprintf("%d_%d_m", nBlocks, mode), numBlocks)
stateDB = dbm.NewMemDB()
store = newMockBlockStore(t, config, genesisState.ConsensusParams)
} else {
testConfig = ResetConfig(fmt.Sprintf("%d_%d_s", nBlocks, mode))
t.Cleanup(func() {
_ = os.RemoveAll(testConfig.RootDir)
})
walBody, err := WALWithNBlocks(t, numBlocks, testConfig)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
testConfig.Consensus.SetWalFile(walFile)
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
privVal := privval.LoadFilePV(testConfig.PrivValidatorKeyFile(), testConfig.PrivValidatorStateFile())
wal, err := NewWAL(walFile)
require.NoError(t, err)
wal.SetLogger(log.TestingLogger())
err = wal.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := wal.Stop(); err != nil {
t.Error(err)
}
})
chain, commits, err = makeBlockchainFromWAL(wal)
require.NoError(t, err)
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
stateDB, genesisState, store = stateAndStore(t, config, pubKey, kvstore.AppVersion)
wal, err := NewWAL(walFile)
require.NoError(t, err)
wal.SetLogger(log.TestingLogger())
err = wal.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := wal.Stop(); err != nil {
t.Error(err)
}
})
chain, commits, err = makeBlockchainFromWAL(wal)
require.NoError(t, err)
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
stateDB, genesisState, store = stateAndStore(t, testConfig, pubKey, kvstore.AppVersion)
}
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
t.Cleanup(func() {
_ = stateStore.Close()
@@ -402,12 +667,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
state := genesisState.Copy()
// run the chain through state.ApplyBlock to build up the tendermint state
state = buildTMStateFromChain(t, config, stateStore, mempool, evpool, state, chain, nBlocks, mode, store)
latestAppHash := state.AppHash
state, latestAppHash := buildTMStateFromChain(t, testConfig, stateStore, mempool, evpool, state, chain, nBlocks, mode, store)
// make a new client creator
kvstoreApp := kvstore.NewPersistentApplication(
filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode)))
filepath.Join(testConfig.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode)))
t.Cleanup(func() {
_ = kvstoreApp.Close()
})
@@ -418,12 +682,12 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
stateDB1 := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB1, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
dummyStateStore := sm.NewStore(stateDB1, sm.StoreOptions{
DiscardABCIResponses: false,
})
err := stateStore.Save(genesisState)
err := dummyStateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(t, proxyApp, stateStore, mempool, evpool, genesisState, chain, nBlocks, mode, store)
buildAppStateFromChain(t, proxyApp, dummyStateStore, mempool, evpool, genesisState, chain, nBlocks, mode, store)
}
// Prune block store if requested
@@ -436,7 +700,8 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
}
// now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
genDoc, err := sm.MakeGenesisDocFromFile(testConfig.GenesisFile())
require.NoError(t, err)
handshaker := NewHandshaker(stateStore, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
if err := proxyApp.Start(); err != nil {
@@ -449,19 +714,27 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
}
})
// perform the replay protocol to sync Tendermint and the application
err = handshaker.Handshake(proxyApp)
if expectError {
require.Error(t, err)
// finish the test early
return
} else if err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
require.NoError(t, err)
// get the latest app hash from the app
res, err := proxyApp.Query().Info(context.Background(), &abci.RequestInfo{Version: ""})
if err != nil {
t.Fatal(err)
}
res, err := proxyApp.Query().Info(context.Background(), proxy.RequestInfo)
require.NoError(t, err)
// block store and app height should be in sync
require.Equal(t, store.Height(), res.LastBlockHeight)
// tendermint state height and app height should be in sync
state, err = stateStore.Load()
require.NoError(t, err)
require.Equal(t, state.LastBlockHeight, res.LastBlockHeight)
require.Equal(t, int64(numBlocks), res.LastBlockHeight)
// the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
@@ -525,9 +798,12 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm
state = applyBlock(t, stateStore, mempool, evpool, state, block, proxyApp, bs)
}
// mode 1 only the block at the last height is saved
// mode 2 and 3, the block is saved, commit is called, but the state is not saved
if mode == 2 || mode == 3 {
// update the kvstore height and apphash
// as if we ran commit but not
// here we expect a dummy state store to be used
state = applyBlock(t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, bs)
}
default:
@@ -546,7 +822,7 @@ func buildTMStateFromChain(
chain []*types.Block,
nBlocks int,
mode uint,
bs sm.BlockStore) sm.State {
bs sm.BlockStore) (sm.State, []byte) {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(
kvstore.NewPersistentApplication(
@@ -573,6 +849,7 @@ func buildTMStateFromChain(
for _, block := range chain {
state = applyBlock(t, stateStore, mempool, evpool, state, block, proxyApp, bs)
}
return state, state.AppHash
case 1, 2, 3:
// sync up to the penultimate as if we stored the block.
@@ -581,14 +858,24 @@ func buildTMStateFromChain(
state = applyBlock(t, stateStore, mempool, evpool, state, block, proxyApp, bs)
}
dummyStateStore := &smmocks.Store{}
lastHeight := int64(len(chain))
penultimateHeight := int64(len(chain) - 1)
vals, _ := stateStore.LoadValidators(penultimateHeight)
dummyStateStore.On("LoadValidators", penultimateHeight).Return(vals, nil)
dummyStateStore.On("Save", mock.Anything).Return(nil)
dummyStateStore.On("SaveFinalizeBlockResponse", lastHeight, mock.MatchedBy(func(response *abci.ResponseFinalizeBlock) bool {
require.NoError(t, stateStore.SaveFinalizeBlockResponse(lastHeight, response))
return true
})).Return(nil)
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
applyBlock(t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, bs)
s := applyBlock(t, dummyStateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, bs)
return state, s.AppHash
default:
panic(fmt.Sprintf("unknown mode %v", mode))
}
return state
}
func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
@@ -604,7 +891,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
require.NoError(t, err)
stateDB, state, store := stateAndStore(t, config, pubKey, appVersion)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
state.LastValidators = state.Validators.Copy()
@@ -821,7 +1108,7 @@ func stateAndStore(
) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
require.NoError(t, err)
@@ -917,7 +1204,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
require.NoError(t, err)
stateDB, state, store := stateAndStore(t, config, pubKey, 0x0)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
oldValAddr := state.Validators.Validators[0].Address

View File

@@ -1421,6 +1421,78 @@ func TestProcessProposalAccept(t *testing.T) {
}
}
func TestFinalizeBlockCalled(t *testing.T) {
for _, testCase := range []struct {
name string
voteNil bool
expectCalled bool
}{
{
name: "finalize block called when block committed",
voteNil: false,
expectCalled: true,
},
{
name: "not called when block not committed",
voteNil: true,
expectCalled: false,
},
} {
t.Run(testCase.name, func(t *testing.T) {
m := abcimocks.NewApplication(t)
m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{
Status: abci.ResponseProcessProposal_ACCEPT,
}, nil)
m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil)
r := &abci.ResponseFinalizeBlock{AgreedAppData: []byte("the_hash")}
m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe()
m.On("Commit", mock.Anything, mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe()
cs1, vss := randStateWithApp(4, m)
height, round := cs1.Height, cs1.Round
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
pv1, err := cs1.privValidator.GetPubKey()
require.NoError(t, err)
addr := pv1.Address()
voteCh := subscribeToVoter(cs1, addr)
startTestRound(cs1, cs1.Height, round)
ensureNewRound(newRoundCh, height, round)
ensureNewProposal(proposalCh, height, round)
rs := cs1.GetRoundState()
blockID := types.BlockID{}
nextRound := round + 1
nextHeight := height
if !testCase.voteNil {
nextRound = 0
nextHeight = height + 1
blockID = types.BlockID{
Hash: rs.ProposalBlock.Hash(),
PartSetHeader: rs.ProposalBlockParts.Header(),
}
}
signAddVotes(cs1, tmproto.PrevoteType, blockID.Hash, blockID.PartSetHeader, vss[1:]...)
ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash())
signAddVotes(cs1, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader, vss[1:]...)
ensurePrecommit(voteCh, height, round)
ensureNewRound(newRoundCh, nextHeight, nextRound)
m.AssertExpectations(t)
if !testCase.expectCalled {
m.AssertNotCalled(t, "FinalizeBlock", context.TODO(), mock.Anything)
} else {
m.AssertCalled(t, "FinalizeBlock", context.TODO(), mock.Anything)
}
})
}
}
// 4 vals, 3 Nil Precommits at P0
// What we want:
// P0 waits for timeoutPrecommit before starting next round

View File

@@ -28,9 +28,7 @@ import (
// persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created.
// If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
config := getConfig(t)
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int, config *cfg.Config) (err error) {
app := kvstore.NewPersistentApplication(filepath.Join(config.DBDir(), "wal_generator"))
logger := log.TestingLogger().With("wal_generator", "wal_generator")
@@ -49,7 +47,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
blockStoreDB := db.NewMemDB()
stateDB := blockStoreDB
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
@@ -123,11 +121,11 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
}
// WALWithNBlocks returns a WAL content with numBlocks.
func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) {
func WALWithNBlocks(t *testing.T, numBlocks int, config *cfg.Config) (data []byte, err error) {
var b bytes.Buffer
wr := bufio.NewWriter(&b)
if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil {
if err := WALGenerateNBlocks(t, wr, numBlocks, config); err != nil {
return []byte{}, err
}

View File

@@ -56,7 +56,7 @@ func TestWALTruncate(t *testing.T) {
// 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10),
// when headBuf is full, truncate content will Flush to the file. at this
// time, RotateFile is called, truncate content exist in each file.
err = WALGenerateNBlocks(t, wal.Group(), 60)
err = WALGenerateNBlocks(t, wal.Group(), 60, getConfig(t))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run
@@ -150,7 +150,7 @@ func TestWALWrite(t *testing.T) {
}
func TestWALSearchForEndHeight(t *testing.T) {
walBody, err := WALWithNBlocks(t, 6)
walBody, err := WALWithNBlocks(t, 6, getConfig(t))
if err != nil {
t.Fatal(err)
}
@@ -188,7 +188,7 @@ func TestWALPeriodicSync(t *testing.T) {
wal.SetLogger(log.TestingLogger())
// Generate some data
err = WALGenerateNBlocks(t, wal.Group(), 5)
err = WALGenerateNBlocks(t, wal.Group(), 5, getConfig(t))
require.NoError(t, err)
// We should have data in the buffer now

View File

@@ -359,7 +359,7 @@ func TestRecoverPendingEvidence(t *testing.T) {
func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state := sm.State{
ChainID: evidenceChainID,

View File

@@ -34,7 +34,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader
witnessesToRemove = make([]int, 0)
)
c.logger.Debug("Running detector against trace", "endBlockHeight", lastVerifiedHeader.Height,
c.logger.Debug("Running detector against trace", "finalizeBlockHeight", lastVerifiedHeader.Height,
"endBlockHash", lastVerifiedHeader.Hash, "length", len(primaryTrace))
c.providerMutex.Lock()

View File

@@ -664,9 +664,9 @@ func (mem *CListMempool) recheckTxs() {
}
}
if err := mem.proxyAppConn.Flush(context.TODO()); err != nil {
mem.logger.Error("recheckTx flush", err, "err")
}
// In <v0.37 we would call FlushAsync at the end of recheckTx forcing the buffer to flush
// all pending messages to the app. There doesn't seem to be any need here as the buffer
// will get flushed regularly or when filled.
}
//--------------------------------------------------------------------------------

View File

@@ -249,7 +249,6 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
mockClient.On("SetLogger", mock.Anything)
mockClient.On("Error").Return(nil).Times(4)
mockClient.On("Flush", mock.Anything).Return(nil)
mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true }))
app := kvstore.NewInMemoryApplication()
@@ -290,8 +289,6 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
req = &abci.RequestCheckTx{Tx: txs[3]}
callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
mockClient.AssertExpectations(t)
mockClient.AssertExpectations(t)
}
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {

View File

@@ -98,11 +98,11 @@ func TestReactorConcurrency(t *testing.T) {
reactors[0].mempool.Lock()
defer reactors[0].mempool.Unlock()
deliverTxResponses := make([]*abci.ExecTxResult, len(txs))
txResponses := make([]*abci.ExecTxResult, len(txs))
for i := range txs {
deliverTxResponses[i] = &abci.ExecTxResult{Code: 0}
txResponses[i] = &abci.ExecTxResult{Code: 0}
}
err := reactors[0].mempool.Update(1, txs, deliverTxResponses, nil, nil)
err := reactors[0].mempool.Update(1, txs, txResponses, nil, nil)
assert.NoError(t, err)
}()

View File

@@ -387,7 +387,7 @@ func (txmp *TxMempool) Update(
) error {
// Safety check: Transactions and responses must match in number.
if len(blockTxs) != len(txResults) {
panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses",
panic(fmt.Sprintf("mempool: got %d transactions but %d TxResult responses",
len(blockTxs), len(txResults)))
}
@@ -693,7 +693,9 @@ func (txmp *TxMempool) recheckTransactions() {
return nil
})
}
_ = txmp.proxyAppConn.Flush(context.TODO())
// In <v0.37 we would call FlushAsync at the end of recheckTx forcing the buffer to flush
// all pending messages to the app. There doesn't seem to be any need here as the buffer
// will get flushed regularly or when filled.
// When recheck is complete, trigger a notification for more transactions.
_ = g.Wait()

View File

@@ -152,7 +152,7 @@ func NewNode(config *cfg.Config,
}
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: config.Storage.DiscardFinalizeBlockResponses,
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
@@ -171,7 +171,7 @@ func NewNode(config *cfg.Config,
// EventBus and IndexerService must be started before the handshake because
// we might need to index the txs of the replayed block as this might not have happened
// when the node stopped last time (i.e. the node stopped after it saved the block
// but before it indexed the txs, or, endblocker panicked)
// but before it indexed the txs)
eventBus, err := createAndStartEventBus(logger)
if err != nil {
return nil, err

View File

@@ -261,7 +261,7 @@ func TestCreateProposalBlock(t *testing.T) {
var height int64 = 1
state, stateDB, privVals := state(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
maxBytes := 16384
var partSize uint32 = 256
@@ -373,7 +373,7 @@ func TestMaxProposalBlockSize(t *testing.T) {
var height int64 = 1
state, stateDB, _ := state(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
var maxBytes int64 = 16384
var partSize uint32 = 256
@@ -505,7 +505,7 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) {
// save validators to db for 2 heights
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
if err := stateStore.Save(s); err != nil {
panic(err)

View File

@@ -620,7 +620,7 @@ func LoadStateFromDBOrGenesisDocProvider(
}
}
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {

View File

@@ -74,7 +74,7 @@ type SignClient interface {
Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error)
// TxSearch defines a method to search for a paginated set of transactions by
// DeliverTx event search criteria.
// transaction event search criteria.
TxSearch(
ctx context.Context,
query string,
@@ -83,8 +83,8 @@ type SignClient interface {
orderBy string,
) (*ctypes.ResultTxSearch, error)
// BlockSearch defines a method to search for a paginated set of blocks by
// BeginBlock and EndBlock event search criteria.
// BlockSearch defines a method to search for a paginated set of blocks based
// from FinalizeBlock event search criteria.
BlockSearch(
ctx context.Context,
query string,

View File

@@ -191,8 +191,8 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR
}, nil
}
// BlockSearch searches for a paginated set of blocks matching BeginBlock and
// EndBlock event search criteria.
// BlockSearch searches for a paginated set of blocks matching
// FinalizeBlock event search criteria.
func BlockSearch(
ctx *rpctypes.Context,
query string,

View File

@@ -79,7 +79,7 @@ func TestBlockResults(t *testing.T) {
env = &Environment{}
env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
err := env.StateStore.SaveFinalizeBlockResponse(100, results)
require.NoError(t, err)

View File

@@ -17,7 +17,7 @@ import (
// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!)
// BroadcastTxAsync returns right away, with no response. Does not wait for
// CheckTx nor DeliverTx results.
// CheckTx nor transcation results.
// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_async
func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{})
@@ -29,7 +29,7 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca
}
// BroadcastTxSync returns with the response from CheckTx. Does not wait for
// DeliverTx result.
// the transaction result.
// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_sync
func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
resCh := make(chan *abci.ResponseCheckTx, 1)
@@ -58,7 +58,7 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas
}
}
// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx.
// BroadcastTxCommit returns with the responses from CheckTx and ExecTxResult.
// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_commit
func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
subscriber := ctx.RemoteAddr()
@@ -73,7 +73,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout)
defer cancel()
q := types.EventQueryTxFor(tx)
deliverTxSub, err := env.EventBus.Subscribe(subCtx, subscriber, q)
txSub, err := env.EventBus.Subscribe(subCtx, subscriber, q)
if err != nil {
err = fmt.Errorf("failed to subscribe to tx: %w", err)
env.Logger.Error("Error on broadcast_tx_commit", "err", err)
@@ -111,7 +111,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
// Wait for the tx to be included in a block or timeout.
select {
case msg := <-deliverTxSub.Out(): // The tx was included in a block.
case msg := <-txSub.Out(): // The tx was included in a block.
txResultEvent := msg.Data().(types.EventDataTx)
return &ctypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes,
@@ -119,14 +119,14 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
Hash: tx.Hash(),
Height: txResultEvent.Height,
}, nil
case <-deliverTxSub.Cancelled():
case <-txSub.Cancelled():
var reason string
if deliverTxSub.Err() == nil {
if txSub.Err() == nil {
reason = "Tendermint exited"
} else {
reason = deliverTxSub.Err().Error()
reason = txSub.Err().Error()
}
err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason)
err = fmt.Errorf("txSub was canceled (reason: %s)", reason)
env.Logger.Error("Error on broadcastTxCommit", "err", err)
return &ctypes.ResultBroadcastTxCommit{
CheckTx: *checkTxRes,

View File

@@ -181,7 +181,7 @@ type ResultBroadcastTx struct {
Hash bytes.HexBytes `json:"hash"`
}
// CheckTx and DeliverTx results
// CheckTx and ExecTx results
type ResultBroadcastTxCommit struct {
CheckTx abci.ResponseCheckTx `json:"check_tx"`
TxResult abci.ExecTxResult `json:"tx_result"`

View File

@@ -47,7 +47,7 @@ func TestApplyBlock(t *testing.T) {
state, stateDB, _ := makeState(1, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
blockStore := store.NewBlockStore(dbm.NewMemDB())
@@ -88,7 +88,7 @@ func TestFinalizeBlockValidators(t *testing.T) {
state, stateDB, _ := makeState(2, 2)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
prevHash := state.LastBlockID.Hash
@@ -153,7 +153,7 @@ func TestFinalizeBlockMisbehavior(t *testing.T) {
state, stateDB, privVals := makeState(1, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
@@ -270,7 +270,7 @@ func TestProcessProposal(t *testing.T) {
state, stateDB, privVals := makeState(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
blockStore := store.NewBlockStore(dbm.NewMemDB())
eventBus := types.NewEventBus()
@@ -478,7 +478,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) {
state, stateDB, _ := makeState(1, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
mp := &mpmocks.Mempool{}
mp.On("Lock").Return()
@@ -567,7 +567,7 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
state, stateDB, _ := makeState(1, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
blockStore := store.NewBlockStore(dbm.NewMemDB())
blockExec := sm.NewBlockExecutor(
@@ -608,7 +608,7 @@ func TestEmptyPrepareProposal(t *testing.T) {
state, stateDB, privVals := makeState(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
mp := &mpmocks.Mempool{}
mp.On("Lock").Return()
@@ -646,7 +646,7 @@ func TestPrepareProposalTxsAllIncluded(t *testing.T) {
state, stateDB, privVals := makeState(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
evpool := &mocks.EvidencePool{}
@@ -695,7 +695,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) {
state, stateDB, privVals := makeState(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
evpool := &mocks.EvidencePool{}
@@ -750,7 +750,7 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) {
// limit max block size
state.ConsensusParams.Block.MaxBytes = 60 * 1024
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
evpool := &mocks.EvidencePool{}
@@ -801,7 +801,7 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) {
state, stateDB, privVals := makeState(1, height)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
evpool := &mocks.EvidencePool{}

View File

@@ -41,6 +41,6 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.V
// SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in
// store.go, exported exclusively and explicitly for testing.
func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error {
stateStore := dbStore{db, StoreOptions{DiscardFinalizeBlockResponses: false}}
stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}}
return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet)
}

View File

@@ -124,7 +124,7 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
if err := stateStore.Save(s); err != nil {
panic(err)

View File

@@ -15,10 +15,10 @@ type BlockIndexer interface {
// upon database query failure.
Has(height int64) (bool, error)
// Index indexes BeginBlock and EndBlock events for a given block by its height.
// Index indexes FinalizeBlock events for a given block by its height.
Index(types.EventDataNewBlockEvents) error
// Search performs a query for block heights that match a given BeginBlock
// and Endblock event search criteria.
// Search performs a query for block heights that match a given FinalizeBlock
// event search criteria.
Search(ctx context.Context, q *query.Query) ([]int64, error)
}

View File

@@ -20,7 +20,7 @@ import (
var _ indexer.BlockIndexer = (*BlockerIndexer)(nil)
// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock
// BlockerIndexer implements a block indexer, indexing FinalizeBlock
// events with an underlying KV store. Block events are indexed by their height,
// such that matching search criteria returns the respective block height(s).
type BlockerIndexer struct {
@@ -44,12 +44,11 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) {
return idx.store.Has(key)
}
// Index indexes BeginBlock and EndBlock events for a given block by its height.
// Index indexes FinalizeBlock events for a given block by its height.
// The following is indexed:
//
// primary key: encode(block.height | height) => encode(height)
// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height)
// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height)
// FinalizeBlock events: encode(eventType.eventAttr|eventValue|height|finalize_block) => encode(height)
func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockEvents) error {
batch := idx.store.NewBatch()
defer batch.Close()
@@ -66,15 +65,15 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockEvents) error {
}
// 2. index block events
if err := idx.indexEvents(batch, bh.Events, "begin_block", height); err != nil {
return fmt.Errorf("failed to index BeginBlock events: %w", err)
if err := idx.indexEvents(batch, bh.Events, "finalize_block", height); err != nil {
return fmt.Errorf("failed to index FinalizeBlock events: %w", err)
}
return batch.WriteSync()
}
// Search performs a query for block heights that match a given BeginBlock
// and Endblock event search criteria. The given query can match against zero,
// Search performs a query for block heights that match a given FinalizeBlock
// event search criteria. The given query can match against zero,
// one or more block heights. In the case of height queries, i.e. block.height=H,
// if the height is indexed, that height alone will be returned. An error and
// nil slice is returned. Otherwise, a non-nil slice and nil error is returned.

View File

@@ -24,8 +24,7 @@ import (
)
const (
eventTypeBeginBlock = "begin_block"
eventTypeEndBlock = "end_block"
eventTypeFinalizeBlock = "finaliz_block"
)
// TxIndexer returns a bridge from es to the Tendermint v0.34 transaction indexer.

View File

@@ -165,7 +165,7 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at)
}
// Insert all the block events. Order is important here,
if err := insertEvents(dbtx, blockID, 0, h.Events); err != nil {
return fmt.Errorf("begin-block events: %w", err)
return fmt.Errorf("finalizeblock events: %w", err)
}
return nil
})

View File

@@ -351,17 +351,8 @@ SELECT height FROM `+tableBlocks+` WHERE height = $1;
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, eventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", eventTypeBeginBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, eventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", eventTypeEndBlock, height)
`, height, eventTypeFinalizeBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", eventTypeFinalizeBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}

View File

@@ -18,7 +18,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_processing_time",
Help: "Time between BeginBlock and EndBlock in ms.",
Help: "Time spent processig finalize block.",
Buckets: stdprometheus.LinearBuckets(1, 10, 10),
}, labels).With(labelsAndValues...),

View File

@@ -14,7 +14,7 @@ const (
// Metrics contains metrics exposed by this package.
type Metrics struct {
// Time between BeginBlock and EndBlock in ms.
// Time spent processing FinalizeBlock
BlockProcessingTime metrics.Histogram `metrics_buckettype:"lin" metrics_bucketsizes:"1, 10, 10"`
// ConsensusParamUpdates is the total number of times the application has

View File

@@ -88,7 +88,7 @@ func TestRollback(t *testing.T) {
func TestRollbackHard(t *testing.T) {
const height int64 = 100
blockStore := store.NewBlockStore(dbm.NewMemDB())
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardFinalizeBlockResponses: false})
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false})
valSet, _ := types.RandValidatorSet(5, 10)
@@ -204,7 +204,7 @@ func TestRollbackHard(t *testing.T) {
func TestRollbackNoState(t *testing.T) {
stateStore := state.NewStore(dbm.NewMemDB(),
state.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
blockStore := &mocks.BlockStore{}
@@ -238,7 +238,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) {
}
func setupStateStore(t *testing.T, height int64) state.Store {
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardFinalizeBlockResponses: false})
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false})
valSet, _ := types.RandValidatorSet(5, 10)
params := types.DefaultConsensusParams()

View File

@@ -68,7 +68,7 @@ type State struct {
LastHeightValidatorsChanged int64
// Consensus parameters used for validating blocks.
// Changes returned by EndBlock and updated after Commit.
// Changes returned by FinalizeBlock and updated after Commit.
ConsensusParams types.ConsensusParams
LastHeightConsensusParamsChanged int64

View File

@@ -28,7 +28,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
dbType := dbm.BackendType(config.DBBackend)
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
require.NoError(t, err)
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
@@ -77,7 +77,7 @@ func TestStateSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
assert := assert.New(t)
@@ -98,7 +98,7 @@ func TestFinalizeBlockResponsesSaveLoad1(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
assert := assert.New(t)
@@ -131,7 +131,7 @@ func TestFinalizeBlockResponsesSaveLoad2(t *testing.T) {
assert := assert.New(t)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
cases := [...]struct {
@@ -219,7 +219,7 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
assert := assert.New(t)
statestore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
// Can't load anything for height 0.
@@ -254,7 +254,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
// Change vals at these heights.
@@ -912,7 +912,7 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
t.Cleanup(func() { tearDown(t) })
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
@@ -939,7 +939,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
tearDown, stateDB, state := setupTestCase(t)
defer tearDown(t)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
require.Equal(t, int64(0), state.LastBlockHeight)
state.Validators = genValSet(valSetSize)
@@ -1005,7 +1005,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
defer tearDown(t)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
// Change vals at these heights.

View File

@@ -84,11 +84,11 @@ type dbStore struct {
}
type StoreOptions struct {
// DiscardFinalizeBlockResponses determines whether or not the store
// retains all ABCIResponses. If DiscardFinalizeBlockResponses is enabled,
// DiscardABCIResponses determines whether or not the store
// retains all ABCIResponses. If DiscardABCIResponses is enabled,
// the store will maintain only the response object from the latest
// height.
DiscardFinalizeBlockResponses bool
DiscardABCIResponses bool
}
var _ Store = (*dbStore)(nil)
@@ -375,11 +375,11 @@ func TxResultsHash(txResults []*abci.ExecTxResult) []byte {
return types.NewResults(txResults).Hash()
}
// LoadFinalizeBlockResponse loads the DiscardFinalizeBlockResponses for the given height from the
// LoadFinalizeBlockResponse loads the DiscardABCIResponses for the given height from the
// database. If the node has D set to true, ErrABCIResponsesNotPersisted
// is persisted. If not found, ErrNoABCIResponsesForHeight is returned.
func (store dbStore) LoadFinalizeBlockResponse(height int64) (*abci.ResponseFinalizeBlock, error) {
if store.DiscardFinalizeBlockResponses {
if store.DiscardABCIResponses {
return nil, ErrFinalizeBlockResponsesNotPersisted
}
@@ -445,7 +445,26 @@ func (store dbStore) LoadLastFinalizeBlockResponse(height int64) (*abci.Response
// Here we validate the result by comparing its height to the expected height.
if height != info.GetHeight() {
return nil, errors.New("expected height %d but last stored abci responses was at height %d")
return nil, fmt.Errorf("expected height %d but last stored abci responses was at height %d", height, info.GetHeight())
}
// It is possible if this is called directly after an upgrade that
// ResponseFinalizeBlock is nil. In which case we use the legacy
// ABCI responses
if info.ResponseFinalizeBlock == nil {
// sanity check
if info.LegacyAbciResponses == nil {
panic("state store contains last abci response but it is empty")
}
legacyResp := info.LegacyAbciResponses
return &abci.ResponseFinalizeBlock{
TxResults: legacyResp.DeliverTxs,
ValidatorUpdates: legacyResp.EndBlock.ValidatorUpdates,
ConsensusParamUpdates: legacyResp.EndBlock.ConsensusParamUpdates,
Events: append(legacyResp.BeginBlock.Events, legacyResp.EndBlock.Events...),
// NOTE: AgreedAppData is missing in the response but will
// be caught and filled in consensus/replay.go
}, nil
}
return info.ResponseFinalizeBlock, nil
@@ -469,7 +488,7 @@ func (store dbStore) SaveFinalizeBlockResponse(height int64, resp *abci.Response
// If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults
// query or to reindex an event using the command line.
if !store.DiscardFinalizeBlockResponses {
if !store.DiscardABCIResponses {
bz, err := resp.Marshal()
if err != nil {
return err

View File

@@ -22,7 +22,7 @@ import (
func TestStoreLoadValidators(t *testing.T) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
val, _ := types.RandValidator(true, 10)
vals := types.NewValidatorSet([]*types.Validator{val})
@@ -55,7 +55,7 @@ func BenchmarkLoadValidators(b *testing.B) {
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
require.NoError(b, err)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
if err != nil {
@@ -112,7 +112,7 @@ func TestPruneStates(t *testing.T) {
t.Run(name, func(t *testing.T) {
db := dbm.NewMemDB()
stateStore := sm.NewStore(db, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
pk := ed25519.GenPrivKey().PubKey()
@@ -214,11 +214,11 @@ func TestTxResultsHash(t *testing.T) {
root := sm.TxResultsHash(txResults)
// root should be Merkle tree root of DeliverTxs responses
// root should be Merkle tree root of ExecTxResult responses
results := types.NewResults(txResults)
assert.Equal(t, root, results.Hash())
// test we can prove first DeliverTx
// test we can prove first ExecTxResult
proof := results.ProveResult(0)
bz, err := results[0].Marshal()
require.NoError(t, err)
@@ -238,7 +238,7 @@ func TestLastFinalizeBlockResponses(t *testing.T) {
t.Run("Not persisting responses", func(t *testing.T) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
responses, err := stateStore.LoadFinalizeBlockResponse(1)
require.Error(t, err)
@@ -251,7 +251,7 @@ func TestLastFinalizeBlockResponses(t *testing.T) {
}
// create new db and state store and set discard abciresponses to false.
stateDB = dbm.NewMemDB()
stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardFinalizeBlockResponses: false})
stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardABCIResponses: false})
height := int64(10)
// save the last abci response.
err = stateStore.SaveFinalizeBlockResponse(height, response1)
@@ -281,7 +281,7 @@ func TestLastFinalizeBlockResponses(t *testing.T) {
}
// create a new statestore with the responses on.
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: true,
DiscardABCIResponses: true,
})
// save an additional response.
err := stateStore.SaveFinalizeBlockResponse(height+1, response2)

View File

@@ -34,7 +34,7 @@ func TestTxFilter(t *testing.T) {
stateDB, err := dbm.NewDB("state", "memdb", os.TempDir())
require.NoError(t, err)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
require.NoError(t, err)

View File

@@ -32,7 +32,7 @@ func TestValidateBlockHeader(t *testing.T) {
state, stateDB, privVals := makeState(3, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
mp := &mpmocks.Mempool{}
mp.On("Lock").Return()
@@ -120,7 +120,7 @@ func TestValidateBlockCommit(t *testing.T) {
state, stateDB, privVals := makeState(1, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
mp := &mpmocks.Mempool{}
mp.On("Lock").Return()
@@ -254,7 +254,7 @@ func TestValidateBlockEvidence(t *testing.T) {
state, stateDB, privVals := makeState(4, 1)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)

View File

@@ -50,7 +50,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
if err != nil {
@@ -367,7 +367,7 @@ func TestLoadBaseMeta(t *testing.T) {
config := test.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)
@@ -429,7 +429,7 @@ func TestPruneBlocks(t *testing.T) {
config := test.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)
@@ -569,7 +569,7 @@ func TestLoadBlockMetaByHash(t *testing.T) {
config := test.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardFinalizeBlockResponses: false,
DiscardABCIResponses: false,
})
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
require.NoError(t, err)

View File

@@ -342,7 +342,7 @@ type Header struct {
ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` // consensus params for current block
AppHash tmbytes.HexBytes `json:"app_hash"` // state after txs from the previous block
// root hash of all results from the txs from the previous block
// see `deterministicResponseDeliverTx` to understand which parts of a tx is hashed into here
// see `deterministicExecTxResult` to understand which parts of a tx is hashed into here
LastResultsHash tmbytes.HexBytes `json:"last_results_hash"`
// consensus info

View File

@@ -19,7 +19,7 @@ func shouldBatchVerify(vals *ValidatorSet, commit *Commit) bool {
//
// It checks all the signatures! While it's safe to exit as soon as we have
// 2/3+ signatures, doing so would impact incentivization logic in the ABCI
// application that depends on the LastCommitInfo sent in BeginBlock, which
// application that depends on the LastCommitInfo sent in FinalizeBlock, which
// includes which validators signed. For instance, Gaia incentivizes proposers
// with a bonus for including more than +2/3 of the signatures.
func VerifyCommit(chainID string, vals *ValidatorSet, blockID BlockID,