Compare commits

..

10 Commits

Author SHA1 Message Date
marbar3778
ed0c89eb76 fix tests amd errors 2021-07-15 15:00:15 +02:00
marbar3778
87c2ee69fd fix errors in abci 2021-07-05 16:58:31 +02:00
marbar3778
dc3327083e fix errors 2021-07-05 14:34:43 +02:00
marbar3778
6a29aa2b7a Merge branch 'abci++' into finalizeBlock 2021-07-05 13:51:13 +02:00
marbar3778
8a39848d06 fix some tests 2021-06-17 11:56:16 +02:00
marbar3778
f9a22483c0 vacation commit 2021-06-17 09:38:02 +02:00
marbar3778
5a4a56a781 abciresponse, blok events, indexer, some tests 2021-06-08 18:26:35 +02:00
marbar3778
3e006e6bc1 work on abci, proxy and mempool 2021-06-08 17:53:31 +02:00
marbar3778
ffb1fc32ea migrate abci to finalizeBlock 2021-06-08 10:43:57 +02:00
marbar3778
a8f91f696a finalize block 2021-06-07 18:19:56 +02:00
356 changed files with 6766 additions and 16509 deletions

3
.github/CODEOWNERS vendored
View File

@@ -7,4 +7,5 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish

14
.github/codecov.yml vendored
View File

@@ -5,14 +5,19 @@ coverage:
status:
project:
default:
threshold: 20%
patch: off
threshold: 1%
patch: on
changes: off
github_checks:
annotations: false
comment: false
comment:
layout: "diff, files"
behavior: default
require_changes: no
require_base: no
require_head: yes
ignore:
- "docs"
@@ -20,6 +25,3 @@ ignore:
- "scripts"
- "**/*.pb.go"
- "libs/pubsub/query/query.peg.go"
- "*.md"
- "*.rst"
- "*.yml"

View File

@@ -2,9 +2,6 @@ name: Test Coverage
on:
pull_request:
push:
paths:
- "**.go"
- "!test/"
branches:
- master
- release/**
@@ -49,11 +46,10 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- name: install
@@ -72,11 +68,10 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/download-artifact@v2
@@ -101,11 +96,10 @@ jobs:
needs: tests
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/download-artifact@v2
@@ -127,7 +121,7 @@ jobs:
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
- uses: codecov/codecov-action@v1.5.2
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -40,7 +40,7 @@ jobs:
platforms: all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v1.5.0
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
@@ -50,7 +50,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.7.0
uses: docker/build-push-action@v2.6.1
with:
context: .
file: ./DOCKER/Dockerfile

View File

@@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
p2p: ['legacy', 'new', 'hybrid']
group: ['00', '01']
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
@@ -30,12 +30,12 @@ jobs:
- name: Build
working-directory: test/e2e
# Run make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker generator runner tests
run: make -j2 docker generator runner
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
working-directory: test/e2e

View File

@@ -18,7 +18,7 @@ jobs:
with:
go-version: '1.16'
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -28,7 +28,7 @@ jobs:
- name: Build
working-directory: test/e2e
# Run two make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker runner tests
run: make -j2 docker runner
if: "env.GIT_DIFF != ''"
- name: Run CI testnet

View File

@@ -23,14 +23,9 @@ jobs:
working-directory: test/fuzz
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- name: Fuzz mempool-v1
- name: Fuzz mempool
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
continue-on-error: true
- name: Fuzz mempool-v0
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
continue-on-error: true
- name: Fuzz p2p-addrbook

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: styfle/cancel-workflow-action@0.9.1
- uses: styfle/cancel-workflow-action@0.9.0
with:
workflow_id: 1041851,1401230,2837803
access_token: ${{ github.token }}

View File

@@ -7,6 +7,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
with:
folder-path: "docs"

View File

@@ -14,7 +14,7 @@ jobs:
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go

View File

@@ -34,7 +34,7 @@ jobs:
echo ::set-output name=tags::${TAGS}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v1.5.0
- name: Login to DockerHub
uses: docker/login-action@v1.10.0
@@ -43,7 +43,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.7.0
uses: docker/build-push-action@v2.6.1
with:
context: ./tools/proto
file: ./tools/proto/Dockerfile

View File

@@ -2,7 +2,7 @@ name: "Release"
on:
push:
branches:
branches:
- "RC[0-9]/**"
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
@@ -20,6 +20,9 @@ jobs:
with:
go-version: '1.16'
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
if: startsWith(github.ref, 'refs/tags/')
- name: Build
uses: goreleaser/goreleaser-action@v2
if: ${{ github.event_name == 'pull_request' }}
@@ -32,6 +35,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,14 +7,12 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
- uses: actions/stale@v3.0.19
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions."
days-before-stale: -1
days-before-close: -1
days-before-pr-stale: 10
days-before-pr-close: 4
days-before-stale: 10
days-before-close: 4
exempt-pr-labels: "S:wip"

View File

@@ -19,7 +19,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -42,6 +42,38 @@ jobs:
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
test_abci_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
- name: test_abci_apps
run: abci/tests/test_app/test.sh
shell: bash
if: env.GIT_DIFF
test_abci_cli:
runs-on: ubuntu-latest
needs: build
@@ -51,7 +83,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -82,7 +114,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go

2
.gitignore vendored
View File

@@ -15,7 +15,7 @@
.vagrant
.vendor-new/
.vscode/
abci/abci-cli
abci-cli
addrbook.json
artifacts/*
build/*

View File

@@ -1,17 +1,14 @@
linters:
enable:
- asciicheck
- bodyclose
- deadcode
- depguard
- dogsled
- dupl
- errcheck
- exportloopref
# - funlen
# - gochecknoglobals
# - gochecknoinits
# - gocognit
- goconst
- gocritic
# - gocyclo
@@ -25,11 +22,11 @@ linters:
- ineffassign
# - interfacer
- lll
# - maligned
- misspell
# - maligned
- nakedret
- nolintlint
- prealloc
- scopelint
- staticcheck
- structcheck
- stylecheck
@@ -40,6 +37,9 @@ linters:
- varcheck
# - whitespace
# - wsl
# - gocognit
- nolintlint
- asciicheck
issues:
exclude-rules:

View File

@@ -1,194 +1,6 @@
# Changelog
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
## v0.35
Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis,
@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
### BREAKING CHANGES
- CLI/RPC/Config
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fastsync.version = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [blocksync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
- [blocksync/v2] \#6730 Fast Sync v2 is deprecated, please use v0
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
- [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
- [cli] \#6854 Remove deprecated snake case commands. (@tychoish)
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] \#6684 Delete counter example app
- Go API
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
- [p2p] \#6618 \#6583 Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
### FEATURES
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
exchange reactors behave the same. (@cmwaters)
- [crypto] \#6376 Enable sr25519 as a validator key type
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
`mempool.version` configuration, where `v1` is the default configuration.
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
- [blocksync/event] \#6619 Emit blocksync status event when switching consensus/blocksync (@JayT106)
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blocksync/v1] \#5728 Remove blocksync v1 (@melekes)
- [blocksync/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blocksync/v2] \#5774 Send status request when new peer joins (@melekes)
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
- [consensus] \#5987 and \#5792 Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
- [types] \#6120 use batch verification for verifying commits signatures.
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
- [privval] \#6240 Add `context.Context` to privval interface.
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
### BUG FIXES
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [blocksync] \#6590 Update the metrics during blocksync (@JayT106)
## v0.34.13
*September 6, 2021*
This release backports improvements to state synchronization and ABCI
performance under concurrent load, and the PostgreSQL event indexer.
### IMPROVEMENTS
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
## v0.34.12
*August 17, 2021*
Special thanks to external contributors on this release: @JayT106.
### FEATURES
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
### IMPROVEMENTS
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
### BUG FIXES
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
## v0.34.11
@@ -200,25 +12,25 @@ adding two new parameters to the state sync config.
### BREAKING CHANGES
- Apps
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
- [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer.
### IMPROVEMENTS
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters)
### BUG FIXES
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
## v0.34.10
*April 14, 2021*
This release fixes a bug where peers would sometimes try to send messages
This release fixes a bug where peers would sometimes try to send messages
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
this issue!
this issue!
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
@@ -227,7 +39,7 @@ this issue!
*April 8, 2021*
This release fixes a moderate severity security issue, Security Advisory Alderfly,
This release fixes a moderate severity security issue, Security Advisory Alderfly,
which impacts all networks that rely on Tendermint light clients.
Further details will be released once networks have upgraded.
@@ -300,7 +112,7 @@ shout-out to @marbar3778 for diagnosing it quickly.
## v0.34.6
*February 18, 2021*
*February 18, 2021*
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
@@ -308,9 +120,9 @@ _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling p
*February 11, 2021*
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
All Tendermint clients are recommended to upgrade.
Thank you to our friends at Crypto.com for the initial report of this memory leak!
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
All Tendermint clients are recommended to upgrade.
Thank you to our friends at Crypto.com for the initial report of this memory leak!
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
@@ -320,17 +132,17 @@ Special thanks to other external contributors on this release: @yayajacky, @odid
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
## v0.34.3
## v0.34.3
*January 19, 2021*
This release includes a fix for a high-severity security vulnerability,
This release includes a fix for a high-severity security vulnerability,
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
### BUG FIXES
@@ -422,14 +234,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
- `MaxBatchBytes` new config setting defines the max size of one batch.
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
- Blockchain Protocol
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
@@ -488,7 +300,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
@@ -526,7 +338,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)

View File

@@ -9,18 +9,141 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
### BREAKING CHANGES
- CLI/RPC/Config
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- P2P Protocol
- Go API
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
- [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish)
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- Blockchain Protocol
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
### FEATURES
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
exchange reactors behave the same. (@cmwaters)
- [crypto] \#6376 Enable sr25519 as a validator key
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
`mempool.version` configuration, where `v1` is the default configuration.
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
- [consensus] \#5987 Remove `time_iota_ms` from consensus params. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778)
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
- [types] \#6120 use batch verification for verifying commits signatures.
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
- [privval] \#6240 Add `context.Context` to privval interface.
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
### BUG FIXES
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)

View File

@@ -227,96 +227,16 @@ Fixes #nnnn
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
### Release procedure
### Release Procedure
#### A note about backport branches
Tendermint's `master` branch is under active development.
Releases are specified using tags and are built from long-lived "backport" branches.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
and the backport branches have names like `v0.34.x` or `v0.33.x`
(literally, `x`; it is not a placeholder in this case).
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
to these backport branches.
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
to the needed branch. There should be a label for any backport branch that you'll be targeting.
To notify the bot to backport a pull request, mark the pull request with
the label `S:backport-to-<backport_branch>`.
Once the original pull request is merged, the bot will try to cherry-pick the pull request
to the backport branch. If the bot fails to backport, it will open a pull request.
The author of the original pull request is responsible for solving the conflicts and
merging the pull request.
#### Creating a backport branch
If this is the first release candidate for a major release, you get to have the honor of creating
the backport branch!
Note that, after creating the backport branch, you'll also need to update the tags on `master`
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
that is "greater than" the backport branches tags. See #6072 for more context.
In the following example, we'll assume that we're making a backport branch for
the 0.35.x line.
1. Start on `master`
2. Create the backport branch:
`git checkout -b v0.35.x`
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
4. Create a new workflow to run the e2e nightlies for this backport branch.
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
for an example.)
#### Release candidates
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of backport branches.
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
(for example, `v0.35.0-rc0`).
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
Otherwise:
1. Start from the backport branch (e.g. `v0.35.x`).
1. Run the integration tests and the e2e nightlies
(which can be triggered from the Github UI;
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
1. Prepare the changelog:
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all PRs
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
or other upgrading flows.
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
1. Open a PR with these changes against the backport branch.
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
3. Push the tag back up to origin:
`git push origin v0.35.0-rc0`
Now the tag should be available on the repo's releases page.
4. Future RCs will continue to be built off of this branch.
Note that this process should only be used for "true" RCs--
release candidates that, if successful, will be the next release.
For more experimental "RCs," create a new, short-lived branch and tag that instead.
#### Major release
#### Major Release
This major release process assumes that this release was preceded by release candidates.
If there were no release candidates, begin by creating a backport branch, as described above.
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
1. Start on the backport branch (e.g. `v0.35.x`)
2. Run integration tests and the e2e nightlies.
3. Prepare the release:
1. Start on the latest RC branch (`RCx/vX.X.0`).
2. Run integration tests.
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
- "Squash" changes from the changelog entries for the RCs into a single entry,
and add all changes included in `CHANGELOG_PENDING.md`.
(Squashing includes both combining all entries, as well as removing or simplifying
@@ -328,24 +248,58 @@ If there were no release candidates, begin by creating a backport branch, as des
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
4. Open a PR with these changes against the backport branch.
5. Once these changes are on the backport branch, push a tag with prepared release details.
This will trigger the actual release `v0.35.0`.
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
- `git push origin v0.35.0`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
- `git checkout RCx/vX.X.0`
- `git checkout -b release/vX.X.0`
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
- `git push origin vX.X.0`
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
new major release series.
#### Minor release (point releases)
##### Major Release (from `master`)
1. Start on `master`
2. Run integration tests (see `test_integrations` in Makefile)
3. Prepare release in a pull request against `master` (to be squash merged):
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
had release candidates, squash all the RC updates into one
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- Reset the `CHANGELOG_PENDING.md`
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Make sure all significant breaking changes are covered in `UPGRADING.md`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
5. Update the `CHANGELOG.md` file on master with the releases changelog.
6. Delete any RC branches and tags for this release (if applicable)
#### Minor Release (Point Releases)
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-<backport_branch>`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request.
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
To create a minor release:
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
2. Run integration tests (`make test_integrations`) and the nightlies.
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
2. Run integration tests: `make test_integrations`
3. Check out a new branch and prepare the release:
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
@@ -354,14 +308,35 @@ To create a minor release:
- Bump the ABCI version number, if necessary.
(Note that ABCI follows semver, and that ABCI versions are the only versions
which can change during minor releases, and only field additions are valid minor changes.)
4. Open a PR with these changes that will land them back on `v0.35.x`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes that will land them back on `vX.X.x`
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
- `git push origin v0.35.1`
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
- Remove all `R:minor` labels from the pull requests that were included in the release.
- Do not merge the backport branch into master.
#### Release Candidates
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of RC branches. RC branches typically have names formatted
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
2. Create the new tag, specifying a name and a tag "message":
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
3. Push the tag back up to origin:
`git push origin v0.34.0-rc4`
Now the tag should be available on the repo's releases page.
4. Create a new release candidate branch for any possible updates to the RC:
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
## Testing
### Unit tests

View File

@@ -202,7 +202,7 @@ format:
lint:
@echo "--> Running linter"
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
@golangci-lint run
.PHONY: lint
DESTINATION = ./index.html.md
@@ -231,15 +231,6 @@ build-docker: build-linux
rm -rf DOCKER/tendermint
.PHONY: build-docker
###############################################################################
### Mocks ###
###############################################################################
mockery:
go generate -run="./scripts/mockery_generate.sh" ./...
.PHONY: mockery
###############################################################################
### Local testnet using docker ###
###############################################################################

View File

@@ -82,12 +82,32 @@ and familiarize yourself with our
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
According to SemVer, anything in the public API can change at any time before version 1.0.0
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used
to signal breaking changes across Tendermint's API. This API includes all
publicly exposed types, functions, and methods in non-internal Go packages as well as
the types and methods accessible via the Tendermint RPC interface.
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
to signal breaking changes across a subset of the total public API. This subset includes all
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
include the Go APIs.
Breaking changes to these public APIs will be documented in the CHANGELOG.
That said, breaking changes in the following packages will be documented in the
CHANGELOG even if they don't lead to MINOR version bumps:
- crypto
- config
- libs
- bits
- bytes
- json
- log
- math
- net
- os
- protoio
- rand
- sync
- strings
- service
- node
- rpc/client
- types
### Upgrades

View File

@@ -2,7 +2,7 @@
This guide provides instructions for upgrading to specific versions of Tendermint Core.
## v0.35
## Unreleased
### ABCI Changes
@@ -17,60 +17,25 @@ This guide provides instructions for upgrading to specific versions of Tendermin
### Config Changes
* The configuration file field `[fastsync]` has been renamed to `[blocksync]`.
* The top level configuration file field `fast-sync` has moved under the new `[blocksync]`
field as `blocksync.enable`.
* `blocksync.version = "v1"` and `blocksync.version = "v2"` (previously `fastsync`)
are no longer supported. Please use `v0` instead. During the v0.35 release cycle, `v0` was
determined to suit the existing needs and the cost of maintaining the `v1` and `v2` modules
was determined to be greater than necessary.
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
you have updated all the variables in your `config.toml` file.
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
persistent peers, there's no gaurantee that the node will remain connected with these peers.
persistent peers, there's no gaurantee that the node will remain connected with these peers.
* configuration values starting with `priv-validator-` have moved to the new
- configuration values starting with `priv-validator-` have moved to the new
`priv-validator` section, without the `priv-validator-` prefix.
* The fast sync process as well as the blockchain package and service has all
been renamed to block sync
### Database Key Format Changes
The format of all tendermint on-disk database keys changes in
0.35. Upgrading nodes must either re-sync all data or run a migration
script provided in this release. The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
provides the function `Migrate(context.Context, db.DB)` which you can
operationalize as makes sense for your deployment.
For ease of use the `tendermint` command includes a CLI version of the
migration script, which you can invoke, as in:
tendermint key-migrate
This reads the configuration file as normal and allows the
`--db-backend` and `--db-dir` flags to change database operations as
needed.
The migration operation is idempotent and can be run more than once,
if needed.
### CLI Changes
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
* The `--fast-sync` command line option has been renamed to `--blocksync.enable`
* If you had previously used `tendermint gen_node_key` to generate a new node
key, keep in mind that it no longer saves the output to a file. You can use
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
@@ -85,8 +50,8 @@ if needed.
### API Changes
The p2p layer was reimplemented as part of the 0.35 release cycle and
all reactors were refactored to accomodate the change. As part of that work these
The p2p layer was reimplemented as part of the 0.35 release cycle, and
all reactors were refactored. As part of that work these
implementations moved into the `internal` package and are no longer
considered part of the public Go API of tendermint. These packages
are:
@@ -98,7 +63,7 @@ are:
- `blockchain`
- `evidence`
Accordingly, the `node` package was changed to reduce access to
Accordingly, the space `node` package was changed to reduce access to
tendermint internals: applications that use tendermint as a library
will need to change to accommodate these changes. Most notably:
@@ -109,84 +74,6 @@ will need to change to accommodate these changes. Most notably:
longer exported and have been replaced with `node.New` and
`node.NewDefault` which provide more functional interfaces.
### gRPC Support
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
### Peer Management Interface
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
`UnsafeDialPeers` RPC methods will always return an error. They are
deprecated and will be removed in 0.36 when the legacy peer stack is
removed.
Additionally the format of the Peer list returned in the `NetInfo`
method changes in this release to accommodate the different way that
the new stack tracks data about peers. This change affects users of
both stacks.
### Using the updated p2p library
The P2P library was reimplemented in this release. The new implementation is
enabled by default in this version of Tendermint. The legacy implementation is still
included in this version of Tendermint as a backstop to work around unforeseen
production issues. The new and legacy version are interoperable. If necessary,
you can enable the legacy implementation in the server configuration file.
To make use of the legacy P2P implemementation add or update the following field of
your server's configuration file under the `[p2p]` section:
```toml
[p2p]
...
use-legacy = true
...
```
If you need to do this, please consider filing an issue in the Tendermint repository
to let us know why. We plan to remove the legacy P2P code in the next (v0.36) release.
#### New p2p queue types
The new p2p implementation enables selection of the queue type to be used for
passing messages between peers.
The following values may be used when selecting which queue type to use:
* `fifo`: (**default**) An unbuffered and lossless queue that passes messages through
in the order in which they were received.
* `priority`: A priority queue of messages.
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
weighted deficit round robin queue is created per peer. Each queue contains a
separate 'flow' for each of the channels of communication that exist between any two
peers. Tendermint maintains a channel per message type between peers. Each WDRR
queue maintains a shared buffered with a fixed capacity through which messages on different
flows are passed.
For more information on WDRR scheduling, see: https://en.wikipedia.org/wiki/Deficit_round_robin
To select a queue type, add or update the following field under the `[p2p]`
section of your server's configuration file.
```toml
[p2p]
...
queue-type = wdrr
...
```
### Support for Custom Reactor and Mempool Implementations
The changes to p2p layer removed existing support for custom
reactors. Based on our understanding of how this functionality was
used, the introduction of the prioritized mempool covers nearly all of
the use cases for custom reactors. If you are currently running custom
reactors and mempools and are having trouble seeing the migration path
for your project please feel free to reach out to the Tendermint Core
development team directly.
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
@@ -340,8 +227,8 @@ Other user-relevant changes include:
* The old `lite` package was removed; the new light client uses the `light` package.
* The `Verifier` was broken up into two pieces:
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* The new light clients stores headers & validator sets as `LightBlock`s
* The RPC client can be found in the `/rpc` directory.
* The HTTP(S) proxy is located in the `/proxy` directory.
@@ -473,12 +360,12 @@ Evidence Params has been changed to include duration.
### Go API
* `libs/common` has been removed in favor of specific pkgs.
* `async`
* `service`
* `rand`
* `net`
* `strings`
* `cmap`
* `async`
* `service`
* `rand`
* `net`
* `strings`
* `cmap`
* removal of `errors` pkg
### RPC Changes
@@ -547,9 +434,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
```go
abci.ResponseDeliverTx{
Tags: []kv.Pair{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
}
}
```
@@ -568,14 +455,14 @@ the following `Events`:
```go
abci.ResponseDeliverTx{
Events: []abci.Event{
{
Type: "transfer",
Attributes: kv.Pairs{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
},
}
{
Type: "transfer",
Attributes: kv.Pairs{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
},
}
}
```
@@ -623,9 +510,9 @@ In this case, the WS client will receive an error with description:
"jsonrpc": "2.0",
"id": "{ID}#event",
"error": {
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
}
}
@@ -831,9 +718,9 @@ just the `Data` field set:
```go
[]ProofOp{
ProofOp{
Data: <proof bytes>,
}
ProofOp{
Data: <proof bytes>,
}
}
```

View File

@@ -15,7 +15,7 @@ const (
echoRetryIntervalSeconds = 1
)
//go:generate ../../scripts/mockery_generate.sh Client
//go:generate mockery --case underscore --name Client
// Client defines an interface for an ABCI client.
//
@@ -35,33 +35,29 @@ type Client interface {
FlushAsync(context.Context) (*ReqRes, error)
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
CommitAsync(context.Context) (*ReqRes, error)
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
// Synchronous requests
FlushSync(context.Context) error
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
CommitSync(context.Context) (*types.ResponseCommit, error)
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
}
//----------------------------------------

View File

@@ -194,16 +194,6 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
req := types.ToRequestDeliverTx(params)
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
req := types.ToRequestCheckTx(params)
@@ -244,26 +234,6 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
req := types.ToRequestListSnapshots(params)
@@ -314,6 +284,22 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
)
}
func (cli *grpcClient) FinalizeBlockAsync(
ctx context.Context,
params types.RequestFinalizeBlock,
) (*ReqRes, error) {
req := types.ToRequestFinalizeBlock(params)
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(
ctx,
req,
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
)
}
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
// with the response. We don't complete it until it's been ordered via the channel.
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
@@ -380,18 +366,6 @@ func (cli *grpcClient) InfoSync(
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
}
func (cli *grpcClient) DeliverTxSync(
ctx context.Context,
params types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.DeliverTxAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTxSync(
ctx context.Context,
params types.RequestCheckTx,
@@ -435,30 +409,6 @@ func (cli *grpcClient) InitChainSync(
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlockSync(
ctx context.Context,
params types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.BeginBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlockSync(
ctx context.Context,
params types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.EndBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
}
func (cli *grpcClient) ListSnapshotsSync(
ctx context.Context,
params types.RequestListSnapshots,
@@ -504,3 +454,14 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
}
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
}
func (cli *grpcClient) FinalizeBlockSync(
ctx context.Context,
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.FinalizeBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
}

View File

@@ -77,17 +77,6 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
), nil
}
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(params)
return app.callback(
types.ToRequestDeliverTx(params),
types.ToResponseDeliverTx(res),
), nil
}
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -132,28 +121,6 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
), nil
}
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
), nil
}
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
), nil
}
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -204,6 +171,20 @@ func (app *localClient) ApplySnapshotChunkAsync(
), nil
}
func (app *localClient) FinalizeBlockAsync(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return app.callback(
types.ToRequestFinalizeBlock(req),
types.ToResponseFinalizeBlock(res),
), nil
}
//-------------------------------------------------------
func (app *localClient) FlushSync(ctx context.Context) error {
@@ -222,18 +203,6 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
return &res, nil
}
func (app *localClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(req)
return &res, nil
}
func (app *localClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
@@ -276,30 +245,6 @@ func (app *localClient) InitChainSync(
return &res, nil
}
func (app *localClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return &res, nil
}
func (app *localClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return &res, nil
}
func (app *localClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
@@ -346,6 +291,17 @@ func (app *localClient) ApplySnapshotChunkSync(
return &res, nil
}
func (app *localClient) FinalizeBlockSync(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery. DO NOT EDIT.
// Code generated by mockery 2.9.0. DO NOT EDIT.
package mocks
@@ -65,52 +65,6 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
return r0, r1
}
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// BeginBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseBeginBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CheckTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
@@ -203,52 +157,6 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeliverTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoAsync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
ret := _m.Called(ctx, msg)
@@ -295,52 +203,6 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
return r0, r1
}
// EndBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EndBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseEndBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEndBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Error provides a mock function with given fields:
func (_m *Client) Error() error {
ret := _m.Called()
@@ -355,6 +217,52 @@ func (_m *Client) Error() error {
return r0
}
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FlushAsync provides a mock function with given fields: _a0
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0)

View File

@@ -245,10 +245,6 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
}
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
}
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
}
@@ -265,14 +261,6 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
}
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
}
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
}
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
}
@@ -295,6 +283,13 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
}
func (cli *socketClient) FinalizeBlockAsync(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync(ctx context.Context) error {
@@ -341,18 +336,6 @@ func (cli *socketClient) InfoSync(
return reqres.Response.GetInfo(), nil
}
func (cli *socketClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
if err != nil {
return nil, err
}
return reqres.Response.GetDeliverTx(), nil
}
func (cli *socketClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
@@ -395,30 +378,6 @@ func (cli *socketClient) InitChainSync(
return reqres.Response.GetInitChain(), nil
}
func (cli *socketClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetBeginBlock(), nil
}
func (cli *socketClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetEndBlock(), nil
}
func (cli *socketClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
@@ -465,6 +424,17 @@ func (cli *socketClient) ApplySnapshotChunkSync(
return reqres.Response.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) FinalizeBlockSync(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetFinalizeBlock(), nil
}
//----------------------------------------
// queueRequest enqueues req onto the queue. If the queue is full, it ether
@@ -569,8 +539,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
_, ok = res.Value.(*types.Response_CheckTx)
case *types.Request_Commit:
@@ -579,10 +547,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
_, ok = res.Value.(*types.Response_EndBlock)
case *types.Request_ApplySnapshotChunk:
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
case *types.Request_LoadSnapshotChunk:
@@ -591,6 +555,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_ListSnapshots)
case *types.Request_OfferSnapshot:
_, ok = res.Value.(*types.Response_OfferSnapshot)
case *types.Request_FinalizeBlock:
_, ok = res.Value.(*types.Response_FinalizeBlock)
}
return ok
}

View File

@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// This is BeginBlockSync unrolled....
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
err = c.FlushSync(context.Background())
assert.NoError(t, err)
res := reqres.Response.GetBeginBlock()
res := reqres.Response.GetFinalizeBlock()
assert.NotNil(t, res)
resp <- c.Error()
}()
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// Start BeginBlock and flush it
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
flush, err := c.FlushAsync(ctx)
assert.NoError(t, err)
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
err = s.Stop()
assert.NoError(t, err)
// wait for the response from BeginBlock
// wait for the response from FinalizeBlock
reqres.Wait()
flush.Wait()
resp <- c.Error()
@@ -121,7 +121,7 @@ type slowApp struct {
types.BaseApplication
}
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
return types.ResponseFinalizeBlock{}
}

View File

@@ -17,6 +17,7 @@ import (
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
servertest "github.com/tendermint/tendermint/abci/tests/server"
@@ -46,6 +47,9 @@ var (
flagHeight int
flagProve bool
// counter
flagSerial bool
// kvstore
flagPersist string
)
@@ -57,7 +61,9 @@ var RootCmd = &cobra.Command{
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
switch cmd.Use {
case "kvstore", "version":
case "counter", "kvstore": // for the examples apps, don't pre-run
return nil
case "version": // skip running for version command
return nil
}
@@ -129,6 +135,10 @@ func addQueryFlags() {
"whether or not to return a merkle proof of the query result")
}
func addCounterFlags() {
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
}
func addKVStoreFlags() {
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
@@ -147,6 +157,8 @@ func addCommands() {
RootCmd.AddCommand(queryCmd)
// examples
addCounterFlags()
RootCmd.AddCommand(counterCmd)
addKVStoreFlags()
RootCmd.AddCommand(kvstoreCmd)
}
@@ -246,6 +258,14 @@ var queryCmd = &cobra.Command{
RunE: cmdQuery,
}
var counterCmd = &cobra.Command{
Use: "counter",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: cmdCounter,
}
var kvstoreCmd = &cobra.Command{
Use: "kvstore",
Short: "ABCI demo example",
@@ -484,16 +504,18 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
for _, tx := range res.Txs {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,
Info: tx.Info,
Log: tx.Log,
})
}
return nil
}
@@ -573,6 +595,32 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return nil
}
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewApplication(flagSerial)
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
if err := srv.Stop(); err != nil {
logger.Error("Error while stopping server", "err", err)
}
})
// Run forever.
select {}
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)

View File

@@ -0,0 +1,92 @@
package counter
import (
"encoding/binary"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
type Application struct {
types.BaseApplication
hashCount int
txCount int
serial bool
}
func NewApplication(serial bool) *Application {
return &Application{serial: serial}
}
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
if app.serial {
for _, tx := range req.Txs {
if len(tx) > 8 {
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseFinalizeBlock{
Txs: []*types.ResponseDeliverTx{{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
}
}
}
}
app.txCount++
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
if app.serial {
if len(req.Tx) > 8 {
return types.ResponseCheckTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue < uint64(app.txCount) {
return types.ResponseCheckTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
}
}
return types.ResponseCheckTx{Code: code.CodeTypeOK}
}
func (app *Application) Commit() (resp types.ResponseCommit) {
app.hashCount++
if app.txCount == 0 {
return types.ResponseCommit{}
}
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return types.ResponseCommit{Data: hash}
}
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -76,20 +76,22 @@ func testStream(t *testing.T, app types.Application) {
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
// Process response
switch r := res.Value.(type) {
case *types.Response_DeliverTx:
counter++
if r.DeliverTx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
case *types.Response_FinalizeBlock:
for _, tx := range r.FinalizeBlock.Txs {
counter++
if tx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", tx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
}
case *types.Response_Flush:
// ignore
@@ -103,7 +105,8 @@ func testStream(t *testing.T, app types.Application) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
tx := []byte("test")
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
// Sometimes send flush messages
@@ -163,22 +166,25 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
txt := []byte("test")
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
for _, tx := range response.Txs {
if tx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", tx.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
}
}

View File

@@ -86,35 +86,40 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
}
// tx is either "key=value" or just arbitrary bytes
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
var key, value string
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(req.Tx), string(req.Tx)
}
for i, tx := range req.Txs {
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(tx), string(tx)
}
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
if err != nil {
panic(err)
}
app.state.Size++
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
if err != nil {
panic(err)
}
app.state.Size++
events := []types.Event{
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
events := []types.Event{
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
},
},
},
}
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
return types.ResponseFinalizeBlock{Txs: txs}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {

View File

@@ -27,12 +27,16 @@ const (
var ctx = context.Background()
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestDeliverTx{Tx: tx}
ar := app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
// repeating tx doesn't raise error
ar = app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
ar = app.FinalizeBlock(req)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
// commit
app.Commit()
@@ -109,8 +113,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := tmproto.Header{
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@@ -200,16 +203,15 @@ func makeApplyBlock(
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Txs: txs,
})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
}
@@ -326,13 +328,16 @@ func runClientTests(t *testing.T, client abcicli.Client) {
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
// commit
_, err = app.CommitSync(ctx)
require.NoError(t, err)

View File

@@ -66,19 +66,19 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
return res
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(req.Tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(req.Tx)
}
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// // if it starts with "val:", update the validator set
// // format is "val:pubkey!power"
// if isValidatorTx(req.Tx) {
// // update validators in the merkle tree
// // and in app.ValUpdates
// return app.execValidatorTx(req.Tx)
// }
// otherwise, update the key-value store
return app.app.DeliverTx(req)
}
// // otherwise, update the key-value store
// return app.app.DeliverTx(req)
// }
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return app.app.CheckTx(req)
@@ -119,8 +119,40 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
return types.ResponseInitChain{}
}
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) FinalizeBlock(
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
// for i, tx := range req.Txs {
// // if it starts with "val:", update the validator set
// // format is "val:pubkey!power"
// if isValidatorTx(tx) {
// // update validators in the merkle tree
// // and in app.ValUpdates
// return app.execValidatorTx(req.Tx)
// }
// // otherwise, update the key-value store
// return app.app.DeliverTx(tx)
// }
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
@@ -142,32 +174,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
}
}
return types.ResponseBeginBlock{}
}
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
}
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
}
//---------------------------------------------

View File

@@ -200,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(*r.DeliverTx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(*r.CheckTx)
responses <- types.ToResponseCheckTx(res)
@@ -215,12 +212,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_InitChain:
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_BeginBlock:
res := s.app.BeginBlock(*r.BeginBlock)
responses <- types.ToResponseBeginBlock(res)
case *types.Request_EndBlock:
res := s.app.EndBlock(*r.EndBlock)
responses <- types.ToResponseEndBlock(res)
case *types.Request_ListSnapshots:
res := s.app.ListSnapshots(*r.ListSnapshots)
responses <- types.ToResponseListSnapshots(res)
@@ -233,6 +224,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_ApplySnapshotChunk:
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
responses <- types.ToResponseApplySnapshotChunk(res)
case *types.Request_FinalizeBlock:
res := s.app.FinalizeBlock(*r.FinalizeBlock)
responses <- types.ToResponseFinalizeBlock(res)
default:
responses <- types.ToResponseException("Unknown request")
}

View File

@@ -51,20 +51,22 @@ func Commit(client abcicli.Client, hashExp []byte) error {
return nil
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
for _, tx := range res.Txs {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
}
}
fmt.Println("Passed test: DeliverTx")
return nil

View File

@@ -0,0 +1,68 @@
package main
import (
"bytes"
"context"
"fmt"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
var ctx = context.Background()
func startClient(abciType string) abcicli.Client {
// Start client
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
if err != nil {
panic(err.Error())
}
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
client.SetLogger(logger.With("module", "abcicli"))
if err := client.Start(); err != nil {
panicf("connecting to abci_app: %v", err.Error())
}
return client
}
func commit(client abcicli.Client, hashExp []byte) {
res, err := client.CommitSync(ctx)
if err != nil {
panicf("client error: %v", err)
}
if !bytes.Equal(res.Data, hashExp) {
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
}
}
type tx struct {
Data []byte
CodeExp uint32
DataExp []byte
}
func finalizeBlock(client abcicli.Client, txs []tx) {
var txsData = make([][]byte, len(txs))
for i, tx := range txs {
txsData[i] = tx.Data
}
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
if err != nil {
panicf("client error: %v", err)
}
for i, tx := range res.Txs {
if tx.Code != txs[i].CodeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
}
if !bytes.Equal(tx.Data, txs[i].DataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
}
}
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}

View File

@@ -0,0 +1,95 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/types"
)
var abciType string
func init() {
abciType = os.Getenv("ABCI")
if abciType == "" {
abciType = "socket"
}
}
func main() {
testCounter()
}
const (
maxABCIConnectTries = 10
)
func ensureABCIIsUp(typ string, n int) error {
var err error
cmdString := "abci-cli echo hello"
if typ == "grpc" {
cmdString = "abci-cli --abci grpc echo hello"
}
for i := 0; i < n; i++ {
cmd := exec.Command("bash", "-c", cmdString)
_, err = cmd.CombinedOutput()
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
return err
}
func testCounter() {
abciApp := os.Getenv("ABCI_APP")
if abciApp == "" {
panic("No ABCI_APP specified")
}
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
cmd := exec.Command("bash", "-c", subCommand)
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err)
}
defer func() {
if err := cmd.Process.Kill(); err != nil {
log.Printf("error on process kill: %v", err)
}
if err := cmd.Wait(); err != nil {
log.Printf("error while waiting for cmd to exit: %v", err)
}
}()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err) //nolint:gocritic
}
client := startClient(abciType)
defer func() {
if err := client.Stop(); err != nil {
log.Printf("error trying client stop: %v", err)
}
}()
// commit(client, nil)
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
txs := []tx{
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
finalizeBlock(client, txs)
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

28
abci/tests/test_app/test.sh Executable file
View File

@@ -0,0 +1,28 @@
#! /bin/bash
set -e
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
# Get the directory of where this script is.
export PATH="$GOBIN:$PATH"
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
echo "RUN COUNTER OVER SOCKET"
# test golang counter
ABCI_APP="counter" go run -mod=readonly ./*.go
echo "----------------------"
echo "RUN COUNTER OVER GRPC"
# test golang counter via grpc
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
echo "----------------------"
# test nodejs counter
# TODO: fix node app
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter

View File

@@ -37,6 +37,7 @@ function testExample() {
}
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
testExample 2 tests/test_cli/ex2.abci abci-cli counter
echo ""
echo "PASS"

View File

@@ -17,11 +17,9 @@ type Application interface {
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
// State Sync Connection
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
@@ -46,10 +44,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
@@ -66,14 +60,6 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
return ResponseBeginBlock{}
}
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
return ResponseEndBlock{}
}
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
return ResponseListSnapshots{}
}
@@ -90,6 +76,10 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
return ResponseApplySnapshotChunk{}
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
return ResponseFinalizeBlock{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
@@ -114,11 +104,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(*req)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(*req)
return &res, nil
@@ -139,16 +124,6 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
return &res, nil
}
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
res := app.app.BeginBlock(*req)
return &res, nil
}
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
res := app.app.EndBlock(*req)
return &res, nil
}
func (app *GRPCApplication) ListSnapshots(
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
res := app.app.ListSnapshots(*req)
@@ -172,3 +147,9 @@ func (app *GRPCApplication) ApplySnapshotChunk(
res := app.app.ApplySnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) FinalizeBlock(
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
res := app.app.FinalizeBlock(*req)
return &res, nil
}

View File

@@ -15,7 +15,11 @@ const (
func WriteMessage(msg proto.Message, w io.Writer) error {
protoWriter := protoio.NewDelimitedWriter(w)
_, err := protoWriter.WriteMsg(msg)
return err
if err != nil {
return err
}
return nil
}
// ReadMessage reads a varint length-delimited protobuf message.
@@ -44,12 +48,6 @@ func ToRequestInfo(req RequestInfo) *Request {
}
}
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
return &Request{
Value: &Request_DeliverTx{&req},
}
}
func ToRequestCheckTx(req RequestCheckTx) *Request {
return &Request{
Value: &Request_CheckTx{&req},
@@ -74,18 +72,6 @@ func ToRequestInitChain(req RequestInitChain) *Request {
}
}
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
return &Request{
Value: &Request_BeginBlock{&req},
}
}
func ToRequestEndBlock(req RequestEndBlock) *Request {
return &Request{
Value: &Request_EndBlock{&req},
}
}
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
return &Request{
Value: &Request_ListSnapshots{&req},
@@ -110,6 +96,12 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
}
}
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
return &Request{
Value: &Request_FinalizeBlock{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
@@ -135,11 +127,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
Value: &Response_Info{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},
}
}
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
@@ -165,18 +152,6 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
}
}
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
return &Response{
Value: &Response_BeginBlock{&res},
}
}
func ToResponseEndBlock(res ResponseEndBlock) *Response {
return &Response{
Value: &Response_EndBlock{&res},
}
}
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
return &Response{
Value: &Response_ListSnapshots{&res},
@@ -200,3 +175,9 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
Value: &Response_ApplySnapshotChunk{&res},
}
}
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
return &Response{
Value: &Response_FinalizeBlock{&res},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,11 @@ import (
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
// NodeKey to the standard output.
var GenNodeKeyCmd = &cobra.Command{
Use: "gen-node-key",
Short: "Generate a new node key",
RunE: genNodeKey,
Use: "gen-node-key",
Aliases: []string{"gen_node_key"},
Short: "Generate a new node key",
RunE: genNodeKey,
PreRun: deprecateSnakeCase,
}
func genNodeKey(cmd *cobra.Command, args []string) error {

View File

@@ -13,9 +13,11 @@ import (
// GenValidatorCmd allows the generation of a keypair for a
// validator.
var GenValidatorCmd = &cobra.Command{
Use: "gen-validator",
Short: "Generate new validator keypair",
RunE: genValidator,
Use: "gen-validator",
Aliases: []string{"gen_validator"},
Short: "Generate new validator keypair",
RunE: genValidator,
PreRun: deprecateSnakeCase,
}
func init() {

View File

@@ -1,87 +0,0 @@
package commands
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer/sink"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
// InspectCmd is the command for starting an inspect server.
var InspectCmd = &cobra.Command{
Use: "inspect",
Short: "Run an inspect server for investigating Tendermint state",
Long: `
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
issues with Tendermint.
When the Tendermint consensus engine detects inconsistent state, it will crash the
tendermint process. Tendermint will not start up while in this inconsistent state.
The inspect command can be used to query the block and state store using Tendermint
RPC calls to debug issues of inconsistent state.
`,
RunE: runInspect,
}
func init() {
InspectCmd.Flags().
String("rpc.laddr",
config.RPC.ListenAddress, "RPC listenener address. Port required")
InspectCmd.Flags().
String("db-backend",
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
InspectCmd.Flags().
String("db-dir", config.DBPath, "database directory")
}
func runInspect(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-c
cancel()
}()
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
if err != nil {
return err
}
blockStore := store.NewBlockStore(blockStoreDB)
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
if err != nil {
if err := blockStoreDB.Close(); err != nil {
logger.Error("error closing block store db", "error", err)
}
return err
}
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return err
}
sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
if err != nil {
return err
}
stateStore := state.NewStore(stateDB)
ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger)
logger.Info("starting inspect server")
if err := ins.Run(ctx); err != nil {
return err
}
return nil
}

View File

@@ -1,64 +0,0 @@
package commands
import (
"context"
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/scripts/keymigrate"
)
func MakeKeyMigrateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "key-migrate",
Short: "Run Database key migration",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
contexts := []string{
// this is ordered to put the
// (presumably) biggest/most important
// subsets first.
"blockstore",
"state",
"peerstore",
"tx_index",
"evidence",
"light",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
ID: dbctx,
Config: config,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
}
logger.Info("completed database migration successfully")
return nil
},
}
// allow database info to be overridden via cli
addDBFlags(cmd)
return cmd
}

View File

@@ -11,9 +11,11 @@ import (
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
var ProbeUpnpCmd = &cobra.Command{
Use: "probe-upnp",
Short: "Test UPnP functionality",
RunE: probeUpnp,
Use: "probe-upnp",
Aliases: []string{"probe_upnp"},
Short: "Test UPnP functionality",
RunE: probeUpnp,
PreRun: deprecateSnakeCase,
}
func probeUpnp(cmd *cobra.Command, args []string) error {

View File

@@ -1,251 +0,0 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/progressbar"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
// ReIndexEventCmd allows re-index the event by given block height interval
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
either or both arguments.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
es, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err = eventReIndex(cmd, es, bs, ss); err != nil {
fmt.Println(reindexFailed, err)
return
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
// Check duplicated sinks.
sinks := map[string]bool{}
for _, s := range cfg.TxIndex.Indexer {
sl := strings.ToLower(s)
if sinks[sl] {
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
}
sinks[sl] = true
}
eventSinks := []indexer.EventSink{}
for k := range sinks {
switch k {
case string(indexer.NULL):
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case string(indexer.KV):
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, kv.NewEventSink(store))
case string(indexer.PSQL):
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, errors.New("the psql connection settings cannot be empty")
}
es, err := psql.NewEventSink(conn, chainID)
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, es)
default:
return nil, errors.New("unsupported event sink type")
}
}
if len(eventSinks) == 0 {
return nil, errors.New("no proper event sink can do event re-indexing," +
" please check the tx-index section in the config.toml")
}
if !indexer.IndexingEnabled(eventSinks) {
return nil, fmt.Errorf("no event sink has been enabled")
}
return eventSinks, nil
}
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
// Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
blockStore := store.NewBlockStore(blockStoreDB)
// Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB)
return blockStore, stateStore, nil
}
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
var bar progressbar.Bar
bar.NewOption(startHeight-1, endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := startHeight; i <= endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := bs.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := ss.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *indexer.Batch
if e.NumTxs > 0 {
batch = indexer.NewBatch(e.NumTxs)
for i, tx := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: tx,
Result: *(r.DeliverTxs[i]),
}
_ = batch.Add(&tr)
}
}
for _, sink := range es {
if err := sink.IndexBlockEvents(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
if batch != nil {
if err := sink.IndexTxEvents(batch.Ops); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -1,171 +0,0 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks []string
connURL string
loadErr bool
}{
{[]string{}, "", true},
{[]string{"NULL"}, "", true},
{[]string{"KV"}, "", false},
{[]string{"KV", "KV"}, "", true},
{[]string{"PSQL"}, "", true}, // true because empty connect url
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
// skip to test PSQL connect with correct url
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
}
for _, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
func TestLoadBlockStore(t *testing.T) {
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockEventSink := &mocks.EventSink{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
mockEventSink.
On("Type").Return(indexer.KV).
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -17,9 +17,11 @@ var ReplayCmd = &cobra.Command{
// ReplayConsoleCmd allows replaying of messages from the WAL in a
// console.
var ReplayConsoleCmd = &cobra.Command{
Use: "replay-console",
Short: "Replay messages from WAL in a console",
Use: "replay-console",
Aliases: []string{"replay_console"},
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
},
PreRun: deprecateSnakeCase,
}

View File

@@ -14,9 +14,11 @@ import (
// ResetAllCmd removes the database of this Tendermint core
// instance.
var ResetAllCmd = &cobra.Command{
Use: "unsafe-reset-all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
RunE: resetAll,
Use: "unsafe-reset-all",
Aliases: []string{"unsafe_reset_all"},
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
RunE: resetAll,
PreRun: deprecateSnakeCase,
}
var keepAddrBook bool
@@ -29,9 +31,11 @@ func init() {
// ResetPrivValidatorCmd resets the private validator files.
var ResetPrivValidatorCmd = &cobra.Command{
Use: "unsafe-reset-priv-validator",
Short: "(unsafe) Reset this node's validator to genesis state",
RunE: resetPrivValidator,
Use: "unsafe-reset-priv-validator",
Aliases: []string{"unsafe_reset_priv_validator"},
Short: "(unsafe) Reset this node's validator to genesis state",
RunE: resetPrivValidator,
PreRun: deprecateSnakeCase,
}
// XXX: this is totally unsafe.

View File

@@ -2,6 +2,7 @@ package commands
import (
"fmt"
"strings"
"time"
"github.com/spf13/cobra"
@@ -64,3 +65,10 @@ var RootCmd = &cobra.Command{
return nil
},
}
// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35
func deprecateSnakeCase(cmd *cobra.Command, args []string) {
if strings.Contains(cmd.CalledAs(), "_") {
fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release")
}
}

View File

@@ -3,8 +3,6 @@ package commands
import (
"bytes"
"crypto/sha256"
"errors"
"flag"
"fmt"
"io"
"os"
@@ -35,22 +33,7 @@ func AddNodeFlags(cmd *cobra.Command) {
"socket address to listen on for connections from external priv-validator process")
// node flags
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
// This check was added to give users an upgrade prompt to use the new flag for syncing.
//
// The pflag package does not have a native way to print a depcrecation warning
// and return an error. This logic was added to print a deprecation message to the user
// and then crash if the user attempts to use the old --fast-sync flag.
fs := flag.NewFlagSet("", flag.ExitOnError)
fs.Func("fast-sync", "deprecated",
func(string) error {
return errors.New("--fast-sync has been deprecated, please use --blocksync.enable")
})
cmd.Flags().AddGoFlagSet(fs)
cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck
cmd.Flags().Bool("fast-sync", config.FastSyncMode, "fast blockchain syncing")
cmd.Flags().BytesHexVar(
&genesisHash,
"genesis-hash",
@@ -65,7 +48,9 @@ func AddNodeFlags(cmd *cobra.Command) {
"proxy-app",
config.ProxyApp,
"proxy app address, or one of: 'kvstore',"+
" 'persistent_kvstore' or 'noop' for local testing.")
" 'persistent_kvstore',"+
" 'counter',"+
" 'counter_serial' or 'noop' for local testing.")
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
// rpc flags
@@ -100,10 +85,7 @@ func AddNodeFlags(cmd *cobra.Command) {
config.Consensus.CreateEmptyBlocksInterval.String(),
"the possible interval between empty blocks")
addDBFlags(cmd)
}
func addDBFlags(cmd *cobra.Command) {
// db flags
cmd.Flags().String(
"db-backend",
config.DBBackend,
@@ -175,7 +157,7 @@ func checkGenesisHash(config *cfg.Config) error {
// Compare with the flag.
if !bytes.Equal(genesisHash, actualHash) {
return fmt.Errorf(
"--genesis-hash=%X does not match %s hash: %X",
"--genesis_hash=%X does not match %s hash: %X",
genesisHash, config.GenesisFile(), actualHash)
}

View File

@@ -8,9 +8,11 @@ import (
// ShowNodeIDCmd dumps node's ID to the standard output.
var ShowNodeIDCmd = &cobra.Command{
Use: "show-node-id",
Short: "Show this node's ID",
RunE: showNodeID,
Use: "show-node-id",
Aliases: []string{"show_node_id"},
Short: "Show this node's ID",
RunE: showNodeID,
PreRun: deprecateSnakeCase,
}
func showNodeID(cmd *cobra.Command, args []string) error {

View File

@@ -16,9 +16,11 @@ import (
// ShowValidatorCmd adds capabilities for showing the validator info.
var ShowValidatorCmd = &cobra.Command{
Use: "show-validator",
Short: "Show this node's validator info",
RunE: showValidator,
Use: "show-validator",
Aliases: []string{"show_validator"},
Short: "Show this node's validator info",
RunE: showValidator,
PreRun: deprecateSnakeCase,
}
func showValidator(cmd *cobra.Command, args []string) error {

View File

@@ -15,7 +15,6 @@ func main() {
rootCmd := cmd.RootCmd
rootCmd.AddCommand(
cmd.GenValidatorCmd,
cmd.ReIndexEventCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LightCmd,
@@ -28,8 +27,6 @@ func main() {
cmd.ShowNodeIDCmd,
cmd.GenNodeKeyCmd,
cmd.VersionCmd,
cmd.InspectCmd,
cmd.MakeKeyMigrateCommand(),
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
)

View File

@@ -29,8 +29,8 @@ const (
ModeValidator = "validator"
ModeSeed = "seed"
BlockSyncV0 = "v0"
BlockSyncV2 = "v2"
BlockchainV0 = "v0"
BlockchainV2 = "v2"
MempoolV0 = "v0"
MempoolV1 = "v1"
@@ -76,7 +76,7 @@ type Config struct {
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
StateSync *StateSyncConfig `mapstructure:"statesync"`
BlockSync *BlockSyncConfig `mapstructure:"blocksync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
@@ -91,7 +91,7 @@ func DefaultConfig() *Config {
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
StateSync: DefaultStateSyncConfig(),
BlockSync: DefaultBlockSyncConfig(),
FastSync: DefaultFastSyncConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
@@ -114,7 +114,7 @@ func TestConfig() *Config {
P2P: TestP2PConfig(),
Mempool: TestMempoolConfig(),
StateSync: TestStateSyncConfig(),
BlockSync: TestBlockSyncConfig(),
FastSync: TestFastSyncConfig(),
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
@@ -151,8 +151,8 @@ func (cfg *Config) ValidateBasic() error {
if err := cfg.StateSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [statesync] section: %w", err)
}
if err := cfg.BlockSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [blocksync] section: %w", err)
if err := cfg.FastSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [fastsync] section: %w", err)
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
return fmt.Errorf("error in [consensus] section: %w", err)
@@ -194,6 +194,11 @@ type BaseConfig struct { //nolint: maligned
// - No priv_validator_key.json, priv_validator_state.json
Mode string `mapstructure:"mode"`
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSyncMode bool `mapstructure:"fast-sync"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
// - pure go
@@ -236,24 +241,23 @@ type BaseConfig struct { //nolint: maligned
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter-peers"` // false
Other map[string]interface{} `mapstructure:",remain"`
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
func DefaultBaseConfig() BaseConfig {
return BaseConfig{
Genesis: defaultGenesisJSONPath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: log.LogFormatPlain,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
Genesis: defaultGenesisJSONPath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: log.LogFormatPlain,
FastSyncMode: true,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
}
}
@@ -263,6 +267,7 @@ func TestBaseConfig() BaseConfig {
cfg.chainID = "tendermint_test"
cfg.Mode = ModeValidator
cfg.ProxyApp = "kvstore"
cfg.FastSyncMode = false
cfg.DBBackend = "memdb"
return cfg
}
@@ -339,28 +344,6 @@ func (cfg BaseConfig) ValidateBasic() error {
return fmt.Errorf("unknown mode: %v", cfg.Mode)
}
// TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle.
// This check was added to give users an upgrade prompt to use the new
// configuration option in v0.35. In future release cycles they should no longer
// be using this configuration parameter so the check can be removed.
// The cfg.Other field can likely be removed at the same time if it is not referenced
// elsewhere as it was added to service this check.
if fs, ok := cfg.Other["fastsync"]; ok {
if _, ok := fs.(map[string]interface{}); ok {
return fmt.Errorf("a configuration section named 'fastsync' was found in the " +
"configuration file. The 'fastsync' section has been renamed to " +
"'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'")
}
}
if fs, ok := cfg.Other["fast-sync"]; ok {
if fs != "" {
return fmt.Errorf("a parameter named 'fast-sync' was found in the " +
"configuration file. The parameter to enable or disable quickly syncing with a blockchain" +
"has moved to the [blocksync] section of the configuration file as blocksync.enable. " +
"Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'")
}
}
return nil
}
@@ -463,7 +446,6 @@ type RPCConfig struct {
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCListenAddress string `mapstructure:"grpc-laddr"`
// Maximum number of simultaneous connections.
@@ -471,7 +453,6 @@ type RPCConfig struct {
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
@@ -710,14 +691,13 @@ type P2PConfig struct { //nolint: maligned
// Force dial to fail
TestDialFail bool `mapstructure:"test-dial-fail"`
// UseLegacy enables the "legacy" P2P implementation and
// disables the newer default implementation. This flag will
// be removed in a future release.
UseLegacy bool `mapstructure:"use-legacy"`
// DisableLegacy is used mostly for testing to enable or disable the legacy
// P2P stack.
DisableLegacy bool `mapstructure:"disable-legacy"`
// Makes it possible to configure which queue backend the p2p
// layer uses. Options are: "fifo", "priority" and "wdrr",
// with the default being "priority".
// with the default being "fifo".
QueueType string `mapstructure:"queue-type"`
}
@@ -749,7 +729,6 @@ func DefaultP2PConfig() *P2PConfig {
DialTimeout: 3 * time.Second,
TestDialFail: false,
QueueType: "priority",
UseLegacy: false,
}
}
@@ -803,47 +782,25 @@ type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.
// This only accounts for raw transactions (e.g. given 1MB transactions and
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
// Size of the cache (used to filter transactions we saw earlier) in transactions
CacheSize int `mapstructure:"cache-size"`
// Do not remove invalid transactions from the cache (default: false)
// Set to true if it's not possible for any invalid transaction to become
// valid again in the future.
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
// Maximum size of a single transaction
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
MaxTxBytes int `mapstructure:"max-tx-bytes"`
// Maximum size of a batch of transactions to send to a peer
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
// can exist for in the mempool.
//
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
// insertion time into the mempool is beyond TTLDuration.
TTLDuration time.Duration `mapstructure:"ttl-duration"`
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
// can exist for in the mempool.
//
// Note, if TTLDuration is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if
// it's insertion time into the mempool is beyond TTLDuration.
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
@@ -854,12 +811,10 @@ func DefaultMempoolConfig() *MempoolConfig {
Broadcast: true,
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
TTLDuration: 0 * time.Second,
TTLNumBlocks: 0,
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
}
}
@@ -885,13 +840,6 @@ func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.MaxTxBytes < 0 {
return errors.New("max-tx-bytes can't be negative")
}
if cfg.TTLDuration < 0 {
return errors.New("ttl-duration can't be negative")
}
if cfg.TTLNumBlocks < 0 {
return errors.New("ttl-num-blocks can't be negative")
}
return nil
}
@@ -900,46 +848,15 @@ func (cfg *MempoolConfig) ValidateBasic() error {
// StateSyncConfig defines the configuration for the Tendermint state sync service
type StateSyncConfig struct {
// State sync rapidly bootstraps a new node by discovering, fetching, and restoring a
// state machine snapshot from peers instead of fetching and replaying historical
// blocks. Requires some peers in the network to take and serve state machine
// snapshots. State sync is not attempted if the node has any local state
// (LastBlockHeight > 0). The node will have a truncated block history, starting from
// the height of the snapshot.
Enable bool `mapstructure:"enable"`
// State sync uses light client verification to verify state. This can be done either
// through the P2P layer or the RPC layer. Set this to true to use the P2P layer. If
// false (default), the RPC layer will be used.
UseP2P bool `mapstructure:"use-p2p"`
// If using RPC, at least two addresses need to be provided. They should be compatible
// with net.Dial, for example: "host.example.com:2125".
RPCServers []string `mapstructure:"rpc-servers"`
// The hash and height of a trusted block. Must be within the trust-period.
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
// The trust period should be set so that Tendermint can detect and gossip
// misbehavior before it is considered expired. For chains based on the Cosmos SDK,
// one day less than the unbonding period should suffice.
TrustPeriod time.Duration `mapstructure:"trust-period"`
// Time to spend discovering snapshots before initiating a restore.
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
// Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
// The synchronizer will create a new, randomly named directory within this directory
// and remove it when the sync is complete.
TempDir string `mapstructure:"temp-dir"`
// The timeout duration before re-requesting a chunk, possibly from a different
// peer (default: 15 seconds).
Enable bool `mapstructure:"enable"`
TempDir string `mapstructure:"temp-dir"`
RPCServers []string `mapstructure:"rpc-servers"`
TrustPeriod time.Duration `mapstructure:"trust-period"`
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"`
// The number of concurrent chunk and block fetchers to run (default: 4).
Fetchers int32 `mapstructure:"fetchers"`
Fetchers int32 `mapstructure:"fetchers"`
}
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
@@ -961,96 +878,90 @@ func DefaultStateSyncConfig() *StateSyncConfig {
}
}
// TestStateSyncConfig returns a default configuration for the state sync service
// TestFastSyncConfig returns a default configuration for the state sync service
func TestStateSyncConfig() *StateSyncConfig {
return DefaultStateSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *StateSyncConfig) ValidateBasic() error {
if !cfg.Enable {
return nil
}
if cfg.Enable {
if len(cfg.RPCServers) == 0 {
return errors.New("rpc-servers is required")
}
// If we're not using the P2P stack then we need to validate the
// RPCServers
if !cfg.UseP2P {
if len(cfg.RPCServers) < 2 {
return errors.New("at least two rpc-servers must be specified")
return errors.New("at least two rpc-servers entries is required")
}
for _, server := range cfg.RPCServers {
if server == "" {
if len(server) == 0 {
return errors.New("found empty rpc-servers entry")
}
}
}
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
return errors.New("discovery time must be 0s or greater than five seconds")
}
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
return errors.New("discovery time must be 0s or greater than five seconds")
}
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
}
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
}
if cfg.ChunkRequestTimeout < 5*time.Second {
return errors.New("chunk-request-timeout must be at least 5 seconds")
}
if cfg.ChunkRequestTimeout < 5*time.Second {
return errors.New("chunk-request-timeout must be at least 5 seconds")
}
if cfg.Fetchers <= 0 {
return errors.New("fetchers is required")
if cfg.Fetchers <= 0 {
return errors.New("fetchers is required")
}
}
return nil
}
//-----------------------------------------------------------------------------
// FastSyncConfig
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
// If this node is many blocks behind the tip of the chain, BlockSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits.
type BlockSyncConfig struct {
Enable bool `mapstructure:"enable"`
// FastSyncConfig defines the configuration for the Tendermint fast sync service
type FastSyncConfig struct {
Version string `mapstructure:"version"`
}
// DefaultBlockSyncConfig returns a default configuration for the block sync service
func DefaultBlockSyncConfig() *BlockSyncConfig {
return &BlockSyncConfig{
Enable: true,
Version: BlockSyncV0,
// DefaultFastSyncConfig returns a default configuration for the fast sync service
func DefaultFastSyncConfig() *FastSyncConfig {
return &FastSyncConfig{
Version: BlockchainV0,
}
}
// TestBlockSyncConfig returns a default configuration for the block sync.
func TestBlockSyncConfig() *BlockSyncConfig {
return DefaultBlockSyncConfig()
// TestFastSyncConfig returns a default configuration for the fast sync.
func TestFastSyncConfig() *FastSyncConfig {
return DefaultFastSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *BlockSyncConfig) ValidateBasic() error {
func (cfg *FastSyncConfig) ValidateBasic() error {
switch cfg.Version {
case BlockSyncV0:
case BlockchainV0:
return nil
case BlockchainV2:
return nil
case BlockSyncV2:
return errors.New("blocksync version v2 is no longer supported. Please use v0")
default:
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
}
}

View File

@@ -125,13 +125,13 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
require.NoError(t, cfg.ValidateBasic())
}
func TestBlockSyncConfigValidateBasic(t *testing.T) {
cfg := TestBlockSyncConfig()
func TestFastSyncConfigValidateBasic(t *testing.T) {
cfg := TestFastSyncConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with version
cfg.Version = "v2"
assert.Error(t, cfg.ValidateBasic())
assert.NoError(t, cfg.ValidateBasic())
cfg.Version = "invalid"
assert.Error(t, cfg.ValidateBasic())

View File

@@ -97,6 +97,11 @@ moniker = "{{ .BaseConfig.Moniker }}"
# - No priv_validator_key.json, priv_validator_state.json
mode = "{{ .BaseConfig.Mode }}"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = {{ .BaseConfig.FastSyncMode }}
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
@@ -195,7 +200,6 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# Maximum number of simultaneous connections.
@@ -205,7 +209,6 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
@@ -265,8 +268,8 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
#######################################################
[p2p]
# Enable the legacy p2p layer.
use-legacy = {{ .P2P.UseLegacy }}
# Enable the new p2p layer.
disable-legacy = {{ .P2P.DisableLegacy }}
# Select the p2p internal queue
queue-type = "{{ .P2P.QueueType }}"
@@ -300,7 +303,6 @@ persistent-peers = "{{ .P2P.PersistentPeers }}"
upnp = {{ .P2P.UPNP }}
# Path to address book
# TODO: Remove once p2p refactor is complete in favor of peer store.
addr-book-file = "{{ js .P2P.AddrBook }}"
# Set true for strict address routability rules
@@ -326,8 +328,6 @@ max-connections = {{ .P2P.MaxConnections }}
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
# TODO: Remove once p2p refactor is complete.
# ref: https://github.com/tendermint/tendermint/issues/5670
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
@@ -397,22 +397,6 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "{{ .Mempool.TTLDuration }}"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -424,30 +408,22 @@ ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
# starting from the height of the snapshot.
enable = {{ .StateSync.Enable }}
# State sync uses light client verification to verify state. This can be done either through the
# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer
# will be used.
use-p2p = {{ .StateSync.UseP2P }}
# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial,
# for example: "host.example.com:2125"
# RPC servers (comma-separated) for light client verification of the synced state machine and
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
# header hash obtained from a trusted source, and a period during which validators can be trusted.
#
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
# weeks) during which they can be financially punished (slashed) for misbehavior.
rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}"
# The hash and height of a trusted block. Must be within the trust-period.
trust-height = {{ .StateSync.TrustHeight }}
trust-hash = "{{ .StateSync.TrustHash }}"
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
# period should suffice.
trust-period = "{{ .StateSync.TrustPeriod }}"
# Time to spend discovering snapshots before initiating a restore.
discovery-time = "{{ .StateSync.DiscoveryTime }}"
# Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
# The synchronizer will create a new, randomly named directory within this directory
# and remove it when the sync is complete.
# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp).
# Will create a new, randomly named directory within, and remove it when done.
temp-dir = "{{ .StateSync.TempDir }}"
# The timeout duration before re-requesting a chunk, possibly from a different
@@ -458,19 +434,14 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
fetchers = "{{ .StateSync.Fetchers }}"
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[blocksync]
[fastsync]
# If this node is many blocks behind the tip of the chain, BlockSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
enable = {{ .BlockSync.Enable }}
# Block Sync version to use:
# 1) "v0" (default) - the standard Block Sync implementation
# 2) "v2" - DEPRECATED, please use v0
version = "{{ .BlockSync.Version }}"
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "{{ .FastSync.Version }}"
#######################################################
### Consensus Configuration Options ###

View File

@@ -36,7 +36,9 @@ func TestEnsureRoot(t *testing.T) {
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
require.Nil(err)
checkConfig(t, string(data))
if !checkConfig(string(data)) {
t.Fatalf("config file missing some information")
}
ensureFiles(t, tmpDir, "data")
}
@@ -55,7 +57,9 @@ func TestEnsureTestRoot(t *testing.T) {
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
require.Nil(err)
checkConfig(t, string(data))
if !checkConfig(string(data)) {
t.Fatalf("config file missing some information")
}
// TODO: make sure the cfg returned and testconfig are the same!
baseConfig := DefaultBaseConfig()
@@ -63,15 +67,16 @@ func TestEnsureTestRoot(t *testing.T) {
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, pvConfig.Key, pvConfig.State)
}
func checkConfig(t *testing.T, configFile string) {
t.Helper()
func checkConfig(configFile string) bool {
var valid bool
// list of words we expect in the config
var elems = []string{
"moniker",
"seeds",
"proxy-app",
"blocksync",
"create-empty-blocks",
"fast_sync",
"create_empty_blocks",
"peer",
"timeout",
"broadcast",
@@ -84,7 +89,10 @@ func checkConfig(t *testing.T, configFile string) {
}
for _, e := range elems {
if !strings.Contains(configFile, e) {
t.Errorf("config file was expected to contain %s but did not", e)
valid = false
} else {
valid = true
}
}
return valid
}

View File

@@ -204,10 +204,7 @@ func (spn *ProofNode) FlattenAunts() [][]byte {
case spn.Right != nil:
innerHashes = append(innerHashes, spn.Right.Hash)
default:
// FIXME(fromberger): Per the documentation above, exactly one of
// these fields should be set. If that is true, this should probably
// be a panic since it violates the invariant. If not, when can it
// be OK to have no siblings? Does this occur at the leaves?
break
}
spn = spn.Parent
}

View File

@@ -13,7 +13,7 @@ import (
tmjson "github.com/tendermint/tendermint/libs/json"
// necessary for Bitcoin address format
"golang.org/x/crypto/ripemd160" // nolint
"golang.org/x/crypto/ripemd160" // nolint: staticcheck
)
//-------------------------------------

View File

@@ -36,7 +36,8 @@ func TestPubKeySecp256k1Address(t *testing.T) {
addrBbz, _, _ := base58.CheckDecode(d.addr)
addrB := crypto.Address(addrBbz)
priv := secp256k1.PrivKey(privB)
var priv secp256k1.PrivKey = secp256k1.PrivKey(privB)
pubKey := priv.PubKey()
pubT, _ := pubKey.(secp256k1.PubKey)
pub := pubT

View File

@@ -31,6 +31,7 @@ Available Commands:
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
counter ABCI demo example
deliver_tx Deliver a new tx to the application
kvstore ABCI demo example
echo Have the application echo a message
@@ -213,9 +214,137 @@ we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
Similarly, you could put the commands in a file and run
`abci-cli --verbose batch < myfile`.
## Counter - Another Example
Now that we've got the hang of it, let's try another application, the
"counter" app.
Like the kvstore app, its code can be found
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
and looks like:
```go
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddrC, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
srv.Stop()
})
// Run forever.
select {}
}
```
The counter app doesn't use a Merkle tree, it just counts how many times
we've sent a transaction, asked for a hash, or committed the state. The
result of `commit` is just the number of transactions sent.
This application has two modes: `serial=off` and `serial=on`.
When `serial=on`, transactions must be a big-endian encoded incrementing
integer, starting at 0.
If `serial=off`, there are no restrictions on transactions.
We can toggle the value of `serial` using the `set_option` ABCI message.
When `serial=on`, some transactions are invalid. In a live blockchain,
transactions collect in memory before they are committed into blocks. To
avoid wasting resources on invalid transactions, ABCI provides the
`check_tx` message, which application developers can use to accept or
reject transactions, before they are stored in memory or gossipped to
other peers.
In this instance of the counter app, `check_tx` only allows transactions
whose integer is greater than the last committed one.
Let's kill the console and the kvstore application, and start the
counter app:
```sh
abci-cli counter
```
In another window, start the `abci-cli console`:
```sh
> check_tx 0x00
-> code: OK
> check_tx 0xff
-> code: OK
> deliver_tx 0x00
-> code: OK
> check_tx 0x00
-> code: BadNonce
-> log: Invalid nonce. Expected >= 1, got 0
> deliver_tx 0x01
-> code: OK
> deliver_tx 0x04
-> code: BadNonce
-> log: Invalid nonce. Expected 2, got 4
> info
-> code: OK
-> data: {"hashes":0,"txs":2}
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
```
This is a very simple application, but between `counter` and `kvstore`,
its easy to see how you can build out arbitrary application states on
top of the ABCI. [Hyperledger's
Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI,
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
Monax's permissioning scheme, and native contracts extensions.
But the ultimate flexibility comes from being able to write the
application easily in any language.
We have implemented the counter in a number of languages [see the
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
```sh
git clone https://github.com/tendermint/js-abci.git
cd js-abci
npm install abci
```
Now you can start the app:
```sh
node example/counter.js
```
(you'll have to kill the other counter application process). In another
window, run the console and those previous ABCI commands. You should get
the same results as for the Go version.
## Bounties
Want to write an app in your favorite language?! We'd be happy
Want to write the counter app in your favorite language?! We'd be happy
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
See [funding](https://github.com/interchainio/funding) opportunities from the
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.

View File

@@ -37,8 +37,8 @@ cd $GOPATH/src/github.com/tendermint/tendermint
make install_abci
```
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
command, an example application written
Now you should have the `abci-cli` installed; you'll see a couple of
commands (`counter` and `kvstore`) that are example applications written
in Go. See below for an application written in JavaScript.
Now, let's run some apps!
@@ -165,6 +165,92 @@ curl -s 'localhost:26657/abci_query?data="name"'
Try some other transactions and queries to make sure everything is
working!
## Counter - Another Example
Now that we've got the hang of it, let's try another application, the
`counter` app.
The counter app doesn't use a Merkle tree, it just counts how many times
we've sent a transaction, or committed the state.
This application has two modes: `serial=off` and `serial=on`.
When `serial=on`, transactions must be a big-endian encoded incrementing
integer, starting at 0.
If `serial=off`, there are no restrictions on transactions.
In a live blockchain, transactions collect in memory before they are
committed into blocks. To avoid wasting resources on invalid
transactions, ABCI provides the `CheckTx` message, which application
developers can use to accept or reject transactions, before they are
stored in memory or gossipped to other peers.
In this instance of the counter app, with `serial=on`, `CheckTx` only
allows transactions whose integer is greater than the last committed
one.
Let's kill the previous instance of `tendermint` and the `kvstore`
application, and start the counter app. We can enable `serial=on` with a
flag:
```sh
abci-cli counter --serial
```
In another window, reset then start Tendermint:
```sh
tendermint unsafe_reset_all
tendermint start
```
Once again, you can see the blocks streaming by. Let's send some
transactions. Since we have set `serial=on`, the first transaction must
be the number `0`:
```sh
curl localhost:26657/broadcast_tx_commit?tx=0x00
```
Note the empty (hence successful) response. The next transaction must be
the number `1`. If instead, we try to send a `5`, we get an error:
```json
> curl localhost:26657/broadcast_tx_commit?tx=0x05
{
"jsonrpc": "2.0",
"id": "",
"result": {
"check_tx": {},
"deliver_tx": {
"code": 2,
"log": "Invalid nonce. Expected 1, got 5"
},
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
"height": 34
}
}
```
But if we send a `1`, it works again:
```json
> curl localhost:26657/broadcast_tx_commit?tx=0x01
{
"jsonrpc": "2.0",
"id": "",
"result": {
"check_tx": {},
"deliver_tx": {},
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
"height": 60
}
}
```
For more details on the `broadcast_tx` API, see [the guide on using
Tendermint](../tendermint-core/using-tendermint.md).
## CounterJS - Example in Another Language

View File

@@ -31,61 +31,24 @@ For example:
would be equal to the composite key of `jack.account.number`.
By default, Tendermint will index all transactions by their respective hashes
and height and blocks by their height.
## Configuration
Operators can configure indexing via the `[tx_index]` section. The `indexer`
field takes a series of supported indexers. If `null` is included, indexing will
be turned off regardless of other values provided.
Let's take a look at the `[tx_index]` config section:
```toml
[tx-index]
##### transactions indexer configuration options #####
[tx_index]
# The backend database list to back the indexer.
# If list contains null, meaning no indexer service will be used.
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
# indexer = []
indexer = "kv"
```
### Supported Indexers
By default, Tendermint will index all transactions by their respective hashes
and height and blocks by their height.
#### KV
The `kv` indexer type is an embedded key-value store supported by the main
underlying Tendermint database. Using the `kv` indexer type allows you to query
for block and transaction events directly against Tendermint's RPC. However, the
query syntax is limited and so this indexer type might be deprecated or removed
entirely in the future.
#### PostgreSQL
The `psql` indexer type allows an operator to enable block and transaction event
indexing by proxying it to an external PostgreSQL instance allowing for the events
to be stored in relational models. Since the events are stored in a RDBMS, operators
can leverage SQL to perform a series of rich and complex queries that are not
supported by the `kv` indexer type. Since operators can leverage SQL directly,
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
such query will fail.
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
must explicitly create the relations prior to starting Tendermint and enabling
the `psql` indexer type.
Example:
```shell
$ psql ... -f state/indexer/sink/psql/schema.sql
```
You can turn off indexing completely by setting `tx_index` to `null`.
## Default Indexes

View File

@@ -61,7 +61,7 @@ Note the context/background should be written in the present tense.
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks)
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
@@ -97,6 +97,3 @@ Note the context/background should be written in the present tense.
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
- [ADR-057: RPC](./adr-057-RPC.md)
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)

View File

@@ -97,7 +97,8 @@ design for tendermint was originally tracked in
[#828](https://github.com/tendermint/tendermint/issues/828).
#### Eager StateSync
Warp Sync as implemented in OpenEthereum to rapidly
Warp Sync as implemented in Parity
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
download both blocks and state snapshots from peers. Data is carved into ~4MB
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
@@ -233,3 +234,5 @@ Proposed
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed

View File

@@ -119,7 +119,7 @@ network usage.
---
Check out the formal specification
[here](https://github.com/tendermint/spec/tree/master/spec/light-client).
[here](https://docs.tendermint.com/master/spec/consensus/light-client.html).
## Status

View File

@@ -18,7 +18,7 @@ graceful here, but that's for another day.
It's possible to fool lite clients without there being a fork on the
main chain - so called Fork-Lite. See the
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
[fork accountability](https://docs.tendermint.com/master/spec/consensus/fork-accountability.html)
document for more details. For a sequential lite client, this can happen via
equivocation or amnesia attacks. For a skipping lite client this can also happen
via lunatic validator attacks. There must be some way for applications to punish

View File

@@ -179,7 +179,7 @@ This then ends the process and the verify function that was called at the start
the user.
For a detailed overview of how each of these three attacks can be conducted please refer to the
[fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md).
[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)).
## Full Node Verification

View File

@@ -24,7 +24,6 @@
- April 1, 2021: Initial Draft (@alexanderbez)
- April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778)
- May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106)
- Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair)
## Status
@@ -146,190 +145,163 @@ The postgres eventsink will not support `tx_search`, `block_search`, `GetTxByHas
```sql
-- Table Definition ----------------------------------------------
-- The blocks table records metadata about each block.
-- The block record does not include its events or transactions (see tx_results).
CREATE TABLE blocks (
rowid BIGSERIAL PRIMARY KEY,
CREATE TYPE block_event_type AS ENUM ('begin_block', 'end_block', '');
height BIGINT NOT NULL,
chain_id VARCHAR NOT NULL,
-- When this block header was logged into the sink, in UTC.
created_at TIMESTAMPTZ NOT NULL,
UNIQUE (height, chain_id)
CREATE TABLE block_events (
id SERIAL PRIMARY KEY,
key VARCHAR NOT NULL,
value VARCHAR NOT NULL,
height INTEGER NOT NULL,
type block_event_type,
created_at TIMESTAMPTZ NOT NULL,
chain_id VARCHAR NOT NULL
);
-- Index blocks by height and chain, since we need to resolve block IDs when
-- indexing transaction records and transaction events.
CREATE INDEX idx_blocks_height_chain ON blocks(height, chain_id);
-- The tx_results table records metadata about transaction results. Note that
-- the events from a transaction are stored separately.
CREATE TABLE tx_results (
rowid BIGSERIAL PRIMARY KEY,
-- The block to which this transaction belongs.
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
-- The sequential index of the transaction within the block.
index INTEGER NOT NULL,
-- When this result record was logged into the sink, in UTC.
created_at TIMESTAMPTZ NOT NULL,
-- The hex-encoded hash of the transaction.
tx_hash VARCHAR NOT NULL,
-- The protobuf wire encoding of the TxResult message.
tx_result BYTEA NOT NULL,
UNIQUE (block_id, index)
id SERIAL PRIMARY KEY,
tx_result BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL
);
-- The events table records events. All events (both block and transaction) are
-- associated with a block ID; transaction events also have a transaction ID.
CREATE TABLE events (
rowid BIGSERIAL PRIMARY KEY,
-- The block and transaction this event belongs to.
-- If tx_id is NULL, this is a block event.
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
tx_id BIGINT NULL REFERENCES tx_results(rowid),
-- The application-defined type label for the event.
type VARCHAR NOT NULL
CREATE TABLE tx_events (
id SERIAL PRIMARY KEY,
key VARCHAR NOT NULL,
value VARCHAR NOT NULL,
height INTEGER NOT NULL,
hash VARCHAR NOT NULL,
tx_result_id SERIAL,
created_at TIMESTAMPTZ NOT NULL,
chain_id VARCHAR NOT NULL,
FOREIGN KEY (tx_result_id)
REFERENCES tx_results(id)
ON DELETE CASCADE
);
-- The attributes table records event attributes.
CREATE TABLE attributes (
event_id BIGINT NOT NULL REFERENCES events(rowid),
key VARCHAR NOT NULL, -- bare key
composite_key VARCHAR NOT NULL, -- composed type.key
value VARCHAR NULL,
-- Indices -------------------------------------------------------
UNIQUE (event_id, key)
);
-- A joined view of events and their attributes. Events that do not have any
-- attributes are represented as a single row with empty key and value fields.
CREATE VIEW event_attributes AS
SELECT block_id, tx_id, type, key, composite_key, value
FROM events LEFT JOIN attributes ON (events.rowid = attributes.event_id);
-- A joined view of all block events (those having tx_id NULL).
CREATE VIEW block_events AS
SELECT blocks.rowid as block_id, height, chain_id, type, key, composite_key, value
FROM blocks JOIN event_attributes ON (blocks.rowid = event_attributes.block_id)
WHERE event_attributes.tx_id IS NULL;
-- A joined view of all transaction events.
CREATE VIEW tx_events AS
SELECT height, index, chain_id, type, key, composite_key, value, tx_results.created_at
FROM blocks JOIN tx_results ON (blocks.rowid = tx_results.block_id)
JOIN event_attributes ON (tx_results.rowid = event_attributes.tx_id)
WHERE event_attributes.tx_id IS NOT NULL;
CREATE INDEX idx_block_events_key_value ON block_events(key, value);
CREATE INDEX idx_tx_events_key_value ON tx_events(key, value);
CREATE INDEX idx_tx_events_hash ON tx_events(hash);
```
The `PSQLEventSink` will implement the `EventSink` interface as follows
(some details omitted for brevity):
```go
func NewEventSink(connStr, chainID string) (*EventSink, error) {
db, err := sql.Open(driverName, connStr)
// ...
func NewPSQLEventSink(connStr string, chainID string) (*PSQLEventSink, error) {
db, err := sql.Open("postgres", connStr)
if err != nil {
return nil, err
}
return &EventSink{
store: db,
chainID: chainID,
}, nil
// ...
}
func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error {
ts := time.Now().UTC()
func (es *PSQLEventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error {
sqlStmt := sq.Insert("block_events").Columns("key", "value", "height", "type", "created_at", "chain_id")
return runInTransaction(es.store, func(tx *sql.Tx) error {
// Add the block to the blocks table and report back its row ID for use
// in indexing the events for the block.
blockID, err := queryWithID(tx, `
INSERT INTO blocks (height, chain_id, created_at)
VALUES ($1, $2, $3)
ON CONFLICT DO NOTHING
RETURNING rowid;
`, h.Header.Height, es.chainID, ts)
// ...
// index the reserved block height index
ts := time.Now()
sqlStmt = sqlStmt.Values(types.BlockHeightKey, h.Header.Height, h.Header.Height, "", ts, es.chainID)
// Insert the special block meta-event for height.
if err := insertEvents(tx, blockID, 0, []abci.Event{
makeIndexedEvent(types.BlockHeightKey, fmt.Sprint(h.Header.Height)),
}); err != nil {
return fmt.Errorf("block meta-events: %w", err)
}
// Insert all the block events. Order is important here,
if err := insertEvents(tx, blockID, 0, h.ResultBeginBlock.Events); err != nil {
return fmt.Errorf("begin-block events: %w", err)
}
if err := insertEvents(tx, blockID, 0, h.ResultEndBlock.Events); err != nil {
return fmt.Errorf("end-block events: %w", err)
}
return nil
})
for _, event := range h.ResultBeginBlock.Events {
// only index events with a non-empty type
if len(event.Type) == 0 {
continue
}
for _, attr := range event.Attributes {
if len(attr.Key) == 0 {
continue
}
// index iff the event specified index:true and it's not a reserved event
compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key))
if compositeKey == types.BlockHeightKey {
return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey)
}
if attr.GetIndex() {
sqlStmt = sqlStmt.Values(compositeKey, string(attr.Value), h.Header.Height, BlockEventTypeBeginBlock, ts, es.chainID)
}
}
}
// index end_block events...
// execute sqlStmt db query...
}
func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error {
ts := time.Now().UTC()
func (es *PSQLEventSink) IndexTxEvents(txr []*abci.TxResult) error {
sqlStmtEvents := sq.Insert("tx_events").Columns("key", "value", "height", "hash", "tx_result_id", "created_at", "chain_id")
sqlStmtTxResult := sq.Insert("tx_results").Columns("tx_result", "created_at")
for _, txr := range txrs {
// Encode the result message in protobuf wire format for indexing.
resultData, err := proto.Marshal(txr)
// ...
ts := time.Now()
for _, tx := range txr {
// store the tx result
txBz, err := proto.Marshal(tx)
if err != nil {
return err
}
// Index the hash of the underlying transaction as a hex string.
txHash := fmt.Sprintf("%X", types.Tx(txr.Tx).Hash())
sqlStmtTxResult = sqlStmtTxResult.Values(txBz, ts)
if err := runInTransaction(es.store, func(tx *sql.Tx) error {
// Find the block associated with this transaction.
blockID, err := queryWithID(tx, `
SELECT rowid FROM blocks WHERE height = $1 AND chain_id = $2;
`, txr.Height, es.chainID)
// ...
// Insert a record for this tx_result and capture its ID for indexing events.
txID, err := queryWithID(tx, `
INSERT INTO tx_results (block_id, index, created_at, tx_hash, tx_result)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT DO NOTHING
RETURNING rowid;
`, blockID, txr.Index, ts, txHash, resultData)
// ...
// Insert the special transaction meta-events for hash and height.
if err := insertEvents(tx, blockID, txID, []abci.Event{
makeIndexedEvent(types.TxHashKey, txHash),
makeIndexedEvent(types.TxHeightKey, fmt.Sprint(txr.Height)),
}); err != nil {
return fmt.Errorf("indexing transaction meta-events: %w", err)
}
// Index any events packaged with the transaction.
if err := insertEvents(tx, blockID, txID, txr.Result.Events); err != nil {
return fmt.Errorf("indexing transaction events: %w", err)
}
return nil
}); err != nil {
// execute sqlStmtTxResult db query...
var txID uint32
err = sqlStmtTxResult.QueryRow().Scan(&txID)
if err != nil {
return err
}
}
return nil
// index the reserved height and hash indices
hash := types.Tx(tx.Tx).Hash()
sqlStmtEvents = sqlStmtEvents.Values(types.TxHashKey, hash, tx.Height, hash, txID, ts, es.chainID)
sqlStmtEvents = sqlStmtEvents.Values(types.TxHeightKey, tx.Height, tx.Height, hash, txID, ts, es.chainID)
for _, event := range result.Result.Events {
// only index events with a non-empty type
if len(event.Type) == 0 {
continue
}
for _, attr := range event.Attributes {
if len(attr.Key) == 0 {
continue
}
// index if `index: true` is set
compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key))
// ensure event does not conflict with a reserved prefix key
if compositeTag == types.TxHashKey || compositeTag == types.TxHeightKey {
return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeTag)
}
if attr.GetIndex() {
sqlStmtEvents = sqlStmtEvents.Values(compositeKey, string(attr.Value), tx.Height, hash, txID, ts, es.chainID)
}
}
}
}
// execute sqlStmtEvents db query...
}
// SearchBlockEvents is not implemented by this sink, and reports an error for all queries.
func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error)
func (es *PSQLEventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) {
return nil, errors.New("block search is not supported via the postgres event sink")
}
// SearchTxEvents is not implemented by this sink, and reports an error for all queries.
func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error)
func (es *PSQLEventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) {
return nil, errors.New("tx search is not supported via the postgres event sink")
}
// GetTxByHash is not implemented by this sink, and reports an error for all queries.
func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error)
func (es *PSQLEventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) {
return nil, errors.New("getTxByHash is not supported via the postgres event sink")
}
// HasBlock is not implemented by this sink, and reports an error for all queries.
func (es *EventSink) HasBlock(h int64) (bool, error)
func (es *PSQLEventSink) HasBlock(h int64) (bool, error) {
return false, errors.New("hasBlock is not supported via the postgres event sink")
}
```
### Configuration

View File

@@ -1,273 +0,0 @@
# ADR 069: Flexible Node Initialization
## Changlog
- 2021-06-09: Initial Draft (@tychoish)
- 2021-07-21: Major Revision (@tychoish)
## Status
Proposed.
## Context
In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md),
during the 0.35 development cycle, we have attempted to reduce the the API
surface area by moving most of the interface of the `node` package into
unexported functions, as well as moving the reactors to an `internal`
package. Having this coincide with the 0.35 release made a lot of sense
because these interfaces were _already_ changing as a result of the `p2p`
[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit
more about how tendermint exposes this API.
While the interfaces of the P2P layer and most of the node package are already
internalized, this precludes some operational patterns that are important to
users who use tendermint as a library. Specifically, introspecting the
tendermint node service and replacing components is not supported in the latest
version of the code, and some of these use cases would require maintaining a
vendor copy of the code. Adding these features requires rather extensive
(internal/implementation) changes to the `node` and `rpc` packages, and this
ADR describes a model for changing the way that tendermint nodes initialize, in
service of providing this kind of functionality.
We consider node initialization, because the current implemention
provides strong connections between all components, as well as between
the components of the node and the RPC layer, and being able to think
about the interactions of these components will help enable these
features and help define the requirements of the node package.
## Alternative Approaches
These alternatives are presented to frame the design space and to
contextualize the decision in terms of product requirements. These
ideas are not inherently bad, and may even be possible or desireable
in the (distant) future, and merely provide additional context for how
we, in the moment came to our decision(s).
### Do Nothing
The current implementation is functional and sufficient for the vast
majority of use cases (e.g., all users of the Cosmos-SDK as well as
anyone who runs tendermint and the ABCI application in separate
processes). In the current implementation, and even previous versions,
modifying node initialization or injecting custom components required
copying most of the `node` package, which required such users
to maintain a vendored copy of tendermint.
While this is (likely) not tenable in the long term, as users do want
more modularity, and the current service implementation is brittle and
difficult to maintain, in the short term it may be possible to delay
implementation somewhat. Eventually, however, we will need to make the
`node` package easier to maintain and reason about.
### Generic Service Pluggability
One possible system design would export interfaces (in the Golang
sense) for all components of the system, to permit runtime dependency
injection of all components in the system, so that users can compose
tendermint nodes of arbitrary user-supplied components.
Although this level of customization would provide benefits, it would be a huge
undertaking (particularly with regards to API design work) that we do not have
scope for at the moment. Eventually providing support for some kinds of
pluggability may be useful, so the current solution does not explicitly
foreclose the possibility of this alternative.
### Abstract Dependency Based Startup and Shutdown
The main proposal in this document makes tendermint node initialization simpler
and more abstract, but the system lacks a number of
features which daemon/service initialization could provide, such as a
system allowing the authors of services to control initialization and shutdown order
of components using dependency relationships.
Such a system could work by allowing services to declare
initialization order dependencies to other reactors (by ID, perhaps)
so that the node could decide the initialization based on the
dependencies declared by services rather than requiring the node to
encode this logic directly.
This level of configuration is probably more complicated than is needed. Given
that the authors of components in the current implementation of tendermint
already *do* need to know about other components, a dependency-based system
would probably be overly-abstract at this stage.
## Decisions
- To the greatest extent possible, factor the code base so that
packages are responsible for their own initialization, and minimize
the amount of code in the `node` package itself.
- As a design goal, reduce direct coupling and dependencies between
components in the implementation of `node`.
- Begin iterating on a more-flexible internal framework for
initializing tendermint nodes to make the initatilization process
less hard-coded by the implementation of the node objects.
- Reactors should not need to expose their interfaces *within* the
implementation of the node type
- This refactoring should be entirely opaque to users.
- These node initialization changes should not require a
reevaluation of the `service.Service` or a generic initialization
orchestration framework.
- Do not proactively provide a system for injecting
components/services within a tendtermint node, though make it
possible to retrofit this kind of plugability in the future if
needed.
- Prioritize implementation of p2p-based statesync reactor to obviate
need for users to inject a custom state-sync provider.
## Detailed Design
The [current
nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47)
includes direct references to the implementations of each of the
reactors, which should be replaced by references to `service.Service`
objects. This will require moving construction of the [rpc
service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771)
into the constructor of
[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126). One
possible implementation of this would be to eliminate the current
`ConfigureRPC` method on the node package and instead [configure it
here](https://github.com/tendermint/tendermint/pull/6798/files#diff-375d57e386f20eaa5f09f02bb9d28bfc48ac3dca18d0325f59492208219e5618R441).
To avoid adding complexity to the `node` package, we will add a
composite service implementation to the `service` package
that implements `service.Service` and is composed of a sequence of
underlying `service.Service` objects and handles their
startup/shutdown in the specified sequential order.
Consensus, blocksync (*née* fast sync), and statesync all depend on
each other, and have significant initialization dependencies that are
presently encoded in the `node` package. As part of this change, a
new package/component (likely named `blocks` located at
`internal/blocks`) will encapsulate the initialization of these block
management areas of the code.
### Injectable Component Option
This section briefly describes a possible implementation for
user-supplied services running within a node. This should not be
implemented unless user-supplied components are a hard requirement for
a user.
In order to allow components to be replaced, a new public function
will be added to the public interface of `node` with a signature that
resembles the following:
```go
func NewWithServices(conf *config.Config,
logger log.Logger,
cf proxy.ClientCreator,
gen *types.GenesisDoc,
srvs []service.Service,
) (service.Service, error) {
```
The `service.Service` objects will be initialized in the order supplied, after
all pre-configured/default services have started (and shut down in reverse
order). The given services may implement additional interfaces, allowing them
to replace specific default services. `NewWithServices` will validate input
service lists with the following rules:
- None of the services may already be running.
- The caller may not supply more than one replacement reactor for a given
default service type.
If callers violate any of these rules, `NewWithServices` will return
an error. To retract support for this kind of operation in the future,
the function can be modified to *always* return an error.
## Consequences
### Positive
- The node package will become easier to maintain.
- It will become easier to add additional services within tendermint
nodes.
- It will become possible to replace default components in the node
package without vendoring the tendermint repo and modifying internal
code.
- The current end-to-end (e2e) test suite will be able to prevent any
regressions, and the new functionality can be thoroughly unit tested.
- The scope of this project is very narrow, which minimizes risk.
### Negative
- This increases our reliance on the `service.Service` interface which
is probably not an interface that we want to fully commit to.
- This proposal implements a fairly minimal set of functionality and
leaves open the possibility for many additional features which are
not included in the scope of this proposal.
### Neutral
N/A
## Open Questions
- To what extent does this new initialization framework need to accommodate
the legacy p2p stack? Would it be possible to delay a great deal of this
work to the 0.36 cycle to avoid this complexity?
- Answer: _depends on timing_, and the requirement to ship pluggable reactors in 0.35.
- Where should additional public types be exported for the 0.35
release?
Related to the general project of API stabilization we want to deprecate
the `types` package, and move its contents into a new `pkg` hierarchy;
however, the design of the `pkg` interface is currently underspecified.
If `types` is going to remain for the 0.35 release, then we should consider
the impact of using multiple organizing modalities for this code within a
single release.
## Future Work
- Improve or simplify the `service.Service` interface. There are some
pretty clear limitations with this interface as written (there's no
way to timeout slow startup or shut down, the cycle between the
`service.BaseService` and `service.Service` implementations is
troubling, the default panic in `OnReset` seems troubling.)
- As part of the refactor of `service.Service` have all services/nodes
respect the lifetime of a `context.Context` object, and avoid the
current practice of creating `context.Context` objects in p2p and
reactor code. This would be required for in-process multi-tenancy.
- Support explicit dependencies between components and allow for
parallel startup, so that different reactors can startup at the same
time, where possible.
## References
- [this
branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize)
contains experimental work in the implementation of the node package
to unwind some of the hard dependencies between components.
- [the component
graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph)
as a framing for internal service construction.
## Appendix
### Dependencies
There's a relationship between the blockchain and consensus reactor
described by the following dependency graph makes replacing some of
these components more difficult relative to other reactors or
components.
![consensus blockchain dependency graph](./img/consensus_blockchain.png)

View File

@@ -1,445 +0,0 @@
# ADR 71: Proposer-Based Timestamps
* [Changelog](#changelog)
* [Status](#status)
* [Context](#context)
* [Alternative Approaches](#alternative-approaches)
* [Remove timestamps altogether](#remove-timestamps-altogether)
* [Decision](#decision)
* [Detailed Design](#detailed-design)
* [Overview](#overview)
* [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp)
* [Saving the timestamp across heights](#saving-the-timestamp-across-heights)
* [Changes to `CommitSig`](#changes-to-commitsig)
* [Changes to `Commit`](#changes-to-commit)
* [Changes to `Vote` messages](#changes-to-vote-messages)
* [New consensus parameters](#new-consensus-parameters)
* [Changes to `Header`](#changes-to-header)
* [Changes to the block proposal step](#changes-to-the-block-proposal-step)
* [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp)
* [Proposer selects block timestamp](#proposer-selects-block-timestamp)
* [Proposer waits](#proposer-waits)
* [Changes to the propose step timeout](#changes-to-the-propose-step-timeout)
* [Changes to validation rules](#changes-to-validation-rules)
* [Proposal timestamp validation](#proposal-timestamp-validation)
* [Block timestamp validation](#block-timestamp-validation)
* [Changes to the prevote step](#changes-to-the-prevote-step)
* [Changes to the precommit step](#changes-to-the-precommit-step)
* [Changes to locking a block](#changes-to-locking-a-block)
* [Remove voteTime Completely](#remove-votetime-completely)
* [Future Improvements](#future-improvements)
* [Consequences](#consequences)
* [Positive](#positive)
* [Neutral](#neutral)
* [Negative](#negative)
* [References](#references)
## Changelog
- July 15 2021: Created by @williambanfield
- Aug 4 2021: Draft completed by @williambanfield
- Aug 5 2021: Draft updated to include data structure changes by @williambanfield
- Aug 20 2021: Language edits completed by @williambanfield
## Status
**Accepted**
## Context
Tendermint currently provides a monotonically increasing source of time known as [BFTTime](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md).
This mechanism for producing a source of time is reasonably simple.
Each correct validator adds a timestamp to each `Precommit` message it sends.
The timestamp it sends is either the validator's current known Unix time or one millisecond greater than the previous block time, depending on which value is greater.
When a block is produced, the proposer chooses the block timestamp as the weighted median of the times in all of the `Precommit` messages the proposer received.
The weighting is proportional to the amount of voting power, or stake, a validator has on the network.
This mechanism for producing timestamps is both deterministic and byzantine fault tolerant.
This current mechanism for producing timestamps has a few drawbacks.
Validators do not have to agree at all on how close the selected block timestamp is to their own currently known Unix time.
Additionally, any amount of voting power `>1/3` may directly control the block timestamp.
As a result, it is quite possible that the timestamp is not particularly meaningful.
These drawbacks present issues in the Tendermint protocol.
Timestamps are used by light clients to verify blocks.
Light clients rely on correspondence between their own currently known Unix time and the block timestamp to verify blocks they see;
However, their currently known Unix time may be greatly divergent from the block timestamp as a result of the limitations of `BFTTime`.
The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues.
Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways:
1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block.
1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time.
The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power.
This document outlines the necessary code changes in Tendermint to implement the corresponding [proposer-based timestamps specification](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp).
## Alternative Approaches
### Remove timestamps altogether
Computer clocks are bound to skew for a variety of reasons.
Using timestamps in our protocol means either accepting the timestamps as not reliable or impacting the protocols liveness guarantees.
This design requires impacting the protocols liveness in order to make the timestamps more reliable.
An alternate approach is to remove timestamps altogether from the block protocol.
`BFTTime` is deterministic but may be arbitrarily inaccurate.
However, having a reliable source of time is quite useful for applications and protocols built on top of a blockchain.
We therefore decided not to remove the timestamp.
Applications often wish for some transactions to occur on a certain day, on a regular period, or after some time following a different event.
All of these require some meaningful representation of agreed upon time.
The following protocols and application features require a reliable source of time:
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/spec/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification).
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime).
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.43/ibc/overview.html#acknowledgements).
Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate.
This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear).
Proposer-based timestamps will allow this inflation calculation to use a more meaningful and accurate source of time.
## Decision
Implement proposer-based timestamps and remove `BFTTime`.
## Detailed Design
### Overview
Implementing proposer-based timestamps will require a few changes to Tendermints code.
These changes will be to the following components:
* The `internal/consensus/` package.
* The `state/` package.
* The `Vote`, `CommitSig`, `Commit` and `Header` types.
* The consensus parameters.
### Proposal Timestamp and Block Timestamp
This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message.
The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses.
The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31).
This timestamp is the current Unix time known to the proposer when sending the `Proposal` message.
This timestamp is not currently used as part of consensus.
The changes in this ADR will begin using the proposal message timestamp as part of consensus.
We will refer to this as the **proposal timestamp** throughout this design.
The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338).
This timestamp is set currently as part of Tendermints `BFTtime` algorithm.
It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block.
This field will continue to be used but the logic for creating and validating this timestamp will change.
We will refer to this as the **block timestamp** throughout this design.
At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`.
The following image shows this relationship.
The rest of this document describes the code changes that will make this possible.
![](./img/pbts-message.png)
### Saving the timestamp across heights
Currently, `BFTtime` uses `LastCommit` to construct the block timestamp.
The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`.
`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`.
For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`.
Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`.
To enable this, we will add a `Timestamp` field to the `Commit` struct.
This field will be populated at each height with the proposal timestamp decided on at the previous height.
This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes.
Changes to the `CommitSig` and `Commit` struct are detailed below.
### Changes to `CommitSig`
The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp.
This timestamp is the current Unix time known to the validator when it issued a `Precommit` for the block.
This timestamp is no longer used and will be removed in this change.
`CommitSig` will be updated as follows:
```diff
type CommitSig struct {
BlockIDFlag BlockIDFlag `json:"block_id_flag"`
ValidatorAddress Address `json:"validator_address"`
-- Timestamp time.Time `json:"timestamp"`
Signature []byte `json:"signature"`
}
```
### Changes to `Commit`
The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp.
The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp.
With these timestamps removed, the commit time will instead be stored in the `Commit` struct.
`Commit` will be updated as follows.
```diff
type Commit struct {
Height int64 `json:"height"`
Round int32 `json:"round"`
++ Timestamp time.Time `json:"timestamp"`
BlockID BlockID `json:"block_id"`
Signatures []CommitSig `json:"signatures"`
}
```
### Changes to `Vote` messages
`Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50).
This struct currently contains a timestamp.
This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator.
For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field.
For prevotes, this field is unused.
Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`.
This timestamp is therefore no longer useful and will be dropped.
`Vote` will be updated as follows:
```diff
type Vote struct {
Type tmproto.SignedMsgType `json:"type"`
Height int64 `json:"height"`
Round int32 `json:"round"`
BlockID BlockID `json:"block_id"` // zero if vote is nil.
-- Timestamp time.Time `json:"timestamp"`
ValidatorAddress Address `json:"validator_address"`
ValidatorIndex int32 `json:"validator_index"`
Signature []byte `json:"signature"`
}
```
### New consensus parameters
The proposer-based timestamp specification includes multiple new parameters that must be the same among all validators.
These parameters are `PRECISION`, `MSGDELAY`, and `ACCURACY`.
The `PRECISION` and `MSGDELAY` parameters are used to determine if the proposed timestamp is acceptable.
A validator will only Prevote a proposal if the proposal timestamp is considered `timely`.
A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator.
More specifically, a proposal timestamp is `timely` if `validatorLocalTime - PRECISION < proposalTime < validatorLocalTime + PRECISION + MSGDELAY`.
Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/types/params.proto#L13) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration).
The proposer-based timestamp specification also includes a [new ACCURACY parameter](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md#pbts-clocksync-external0).
Intuitively, `ACCURACY` represents the difference between the real time and the currently known time of correct validators.
The currently known Unix time of any validator is always somewhat different from real time.
`ACCURACY` is the largest such difference between each validator's time and real time taken as an absolute value.
This is not something a computer can determine on its own and must be specified as an estimate by community running a Tendermint-based chain.
It is used in the new algorithm to [calculate a timeout for the propose step](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md#pbts-alg-startround0).
`ACCURACY` is assumed to be the same across all validators and therefore should be included as a consensus parameter.
The consensus will be updated to include this `Timestamp` field as follows:
```diff
type ConsensusParams struct {
Block BlockParams `json:"block"`
Evidence EvidenceParams `json:"evidence"`
Validator ValidatorParams `json:"validator"`
Version VersionParams `json:"version"`
++ Timestamp TimestampParams `json:"timestamp"`
}
```
```go
type TimestampParams struct {
Accuracy time.Duration `json:"accuracy"`
Precision time.Duration `json:"precision"`
MsgDelay time.Duration `json:"msg_delay"`
}
```
### Changes to `Header`
The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp.
This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps.
This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`.
This timestamp will therfore be identical in both the `Header` and the `LastCommit`.
To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`.
`Header` will be updated as follows:
```diff
type Header struct {
// basic block info
Version version.Consensus `json:"version"`
ChainID string `json:"chain_id"`
Height int64 `json:"height"`
-- Time time.Time `json:"time"`
++ LastTimestamp time.Time `json:"last_timestamp"`
// prev block info
LastBlockID BlockID `json:"last_block_id"`
// hashes of block data
LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"`
DataHash tmbytes.HexBytes `json:"data_hash"`
// hashes from the app output from the prev block
ValidatorsHash tmbytes.HexBytes `json:"validators_hash"`
NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"`
ConsensusHash tmbytes.HexBytes `json:"consensus_hash"`
AppHash tmbytes.HexBytes `json:"app_hash"`
// root hash of all results from the txs from the previous block
LastResultsHash tmbytes.HexBytes `json:"last_results_hash"`
// consensus info
EvidenceHash tmbytes.HexBytes `json:"evidence_hash"`
ProposerAddress Address `json:"proposer_address"`
}
```
### Changes to the block proposal step
#### Proposer selects proposal timestamp
The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message.
This satisfies the proposer-based timestamp specification and does not need to change.
#### Proposer selects block timestamp
The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field.
The proposer will select this timestamp to use as the block timestamp at height `H`.
#### Proposer waits
Block timestamps must be monotonically increasing.
In `BFTTime`, if a validators clock was behind, the [validator added 1 millisecond to the previous blocks time and used that in its vote messages](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2246).
A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works.
Validator clocks will not be perfectly in sync.
Therefore, the proposers current known Unix time may be less than the `LastCommit.Timestamp`.
If the proposers current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`.
This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method.
This method should now block until the proposers time is greater than `LastCommit.Timestamp`.
#### Changes to the propose step timeout
Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen.
Proposer-based timestamps requires changing this timeout logic.
The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block.
The validators must now take this and some other factors into account when deciding when to timeout the propose step.
Specifically, the propose step timeout must also take into account potential inaccuracy in the validators clock and in the clock of the proposer.
Additionally, there may be a delay communicating the proposal message from the proposer to the other validators.
Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out.
To account for possible inaccuracy in its own clock, inaccuracy in the proposers clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`.
The spec defines this as `waitingTime`.
The [propose steps timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`.
`enterPropose` will be changed to calculate waiting time using the new consensus parameters.
The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013).
### Changes to validation rules
The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps.
Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well.
#### Proposal timestamp validation
Adding proposal timestamp validation is a reasonably straightforward change.
The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31).
Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field.
The precommit and prevote validation logic does not currently use this timestamp.
This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators.
If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid.
The validator will also check that the proposal time is greater than the block timestamp from the previous height.
If no valid proposal is received by the proposal timeout, the validator will prevote nil.
This is identical to the current logic.
#### Block timestamp validation
The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118).
First, the validation logic checks that this timestamp is greater than the previous blocks timestamp.
Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [blocks LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48).
Finally, the logic also authenticates the timestamps in the `LastCommit`.
The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validators private key.
One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`.
To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature.
This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25).
The logic to validate that the block timestamp is greater than the previous blocks timestamp also works for proposer-based timestamps and will not change.
`BFTTime` validation is no longer applicable and will be removed.
Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps.
This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117).
The `MedianTime` function can be completely removed.
The `LastCommit` timestamps may also be removed.
The `signedBytes` validation logic in `VerifyCommit` will be slightly altered.
The `CommitSig`s in the blocks `LastCommit` will no longer each contain a timestamp.
The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`.
The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp.
Specifically, the `VerifyCommit` function will be updated to use this new timestamp.
### Changes to the prevote step
Currently, a validator will prevote a proposal in one of three cases:
* Case 1: Validator has no locked block and receives a valid proposal.
* Case 2: Validator has a locked block and receives a valid proposal matching its locked block.
* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the new proposals block.
The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above.
### Changes to the precommit step
The precommit step will not require much modification.
Its proposal validation rules will change in the same ways that validation will change in the prevote step.
### Changes to locking a block
When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its locked block in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field.
In each subsequent round it will prevote that block.
A validator will only change which block it has locked if it sees +2/3 prevotes for a different block.
This mechanism will remain largely unchanged.
The only difference is the addition of proposal timestamp validation.
A validator will prevote nil in a round if the proposal message it received is not `timely`.
Prevoting nil in this case will not cause a validator to unlock its locked block.
This difference is an incidental result of the changes to prevote validation.
It is included in this design for completeness and to clarify that no additional changes will be made to block locking.
### Remove voteTime Completely
[voteTime](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L2229) is a mechanism for calculating the next `BFTTime` given both the validator's current known Unix time and the previous block timestamp.
If the previous block timestamp is greater than the validator's current known Unix time, then voteTime returns a value one millisecond greater than the previous block timestamp.
This logic is used in multiple places and is no longer needed for proposer-based timestamps.
It should therefore be removed completely.
## Future Improvements
* Implement BLS signature aggregation.
By removing fields from the `Precommit` messages, we are able to aggregate signatures.
## Consequences
### Positive
* `<2/3` of validators can no longer influence block timestamps.
* Block timestamp will have stronger correspondence to real time.
* Improves the reliability of light client block verification.
* Enables BLS signature aggregation.
* Enables evidence handling to use time instead of height for evidence validity.
### Neutral
* Alters Tendermints liveness properties.
Liveness now requires that all correct validators have synchronized clocks within a bound.
Liveness will now also require that validators clocks move forward, which was not required under `BFTTime`.
### Negative
* May increase the length of the propose step if there is a large skew between the previous proposer and the current proposers local Unix time.
This skew will be bound by the `PRECISION` value, so it is unlikely to be too large.
* Current chains with block timestamps far in the future will either need to pause consensus until after the erroneous block timestamp or must maintain synchronized but very inaccurate clocks.
## References
* [PBTS Spec](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp)
* [BFTTime spec](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md)

View File

@@ -1,105 +0,0 @@
# ADR 72: Restore Requests for Comments
## Changelog
- 20-Aug-2021: Initial draft (@creachadair)
## Status
Proposed
## Context
In the past, we kept a collection of Request for Comments (RFC) documents in `docs/rfc`.
Prior to the creation of the ADR process, these documents were used to document
design and implementation decisions about Tendermint Core. The RFC directory
was removed in favor of ADRs, in commit 3761aa69 (PR
[\#6345](https://github.com/tendermint/tendermint/pull/6345)).
For issues where an explicit design decision or implementation change is
required, an ADR is generally preferable to an open-ended RFC: An ADR is
relatively narrowly-focused, identifies a specific design or implementation
question, and documents the consensus answer to that question.
Some discussions are more open-ended, however, or don't require a specific
decision to be made (yet). Such conversations are still valuable to document,
and several members of the Tendermint team have been doing so by writing gists
or Google docs to share them around. That works well enough in the moment, but
gists do not support any kind of collaborative editing, and both gists and docs
are hard to discover after the fact. Google docs have much better collaborative
editing, but are worse for discoverability, especially when contributors span
different Google accounts.
Discoverability is important, because these kinds of open-ended discussions are
useful to people who come later -- either as new team members or as outside
contributors seeking to use and understand the thoughts behind our designs and
the architectural decisions that arose from those discussion.
With these in mind, I propose that:
- We re-create a new, initially empty `docs/rfc` directory in the repository,
and use it to capture these kinds of open-ended discussions in supplement to
ADRs.
- Unlike in the previous RFC scheme, documents in this new directory will
_not_ be used directly for decision-making. This is the key difference
between an RFC and an ADR.
Instead, an RFC will exist to document background, articulate general
principles, and serve as a historical record of discussion and motivation.
In this system, an RFC may _only_ result in a decision indirectly, via ADR
documents created in response to the RFC.
**In short:** If a decision is required, write an ADR; otherwise if a
sufficiently broad discussion is needed, write an RFC.
Just so that there is a consistent format, I also propose that:
- RFC files are named `rfc-XXX-title.{md,rst,txt}` and are written in plain
text, Markdown, or ReStructured Text.
- Like an ADR, an RFC should include a high-level change log at the top of the
document, and sections for:
* Abstract: A brief, high-level synopsis of the topic.
* Background: Any background necessary to understand the topic.
* Discussion: Detailed discussion of the issue being considered.
- Unlike an ADR, an RFC does _not_ include sections for Decisions, Detailed
Design, or evaluation of proposed solutions. If an RFC leads to a proposal
for an actual architectural change, that must be recorded in an ADR in the
usual way, and may refer back to the RFC in its References section.
## Alternative Approaches
Leaving aside implementation details, the main alternative to this proposal is
to leave things as they are now, with ADRs as the only log of record and other
discussions being held informally in whatever medium is convenient at the time.
## Decision
(pending)
## Detailed Design
- Create a new `docs/rfc` directory in the `tendermint` repository. Note that
this proposal intentionally does _not_ pull back the previous contents of
that path from Git history, as those documents were appropriately merged into
the ADR process.
- Create a `README.md` for RFCs that explains the rules and their relationship
to ADRs.
- Create an `rfc-template.md` file for RFC files.
## Consequences
### Positive
- We will have a more discoverable place to record open-ended discussions that
do not immediately result in a design change.
### Negative
- Potentially some people could be confused about the RFC/ADR distinction.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 672 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

View File

@@ -36,6 +36,10 @@ proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "anonymous"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = true
# Mode of Node: full | validator | seed (default: "validator")
# * validator node (default)
@@ -271,13 +275,9 @@ dial-timeout = "3s"
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - The legacy non-prioritized mempool reactor.
# 2) "v1" (default) - The prioritized mempool reactor.
version = "v1"
recheck = true
broadcast = true
wal-dir = ""
# Maximum number of transactions in the mempool
size = 5000
@@ -304,22 +304,6 @@ max-tx-bytes = 1048576
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = 0
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "0s"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = 0
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -350,18 +334,13 @@ discovery-time = "15s"
temp-dir = ""
#######################################################
### BlockSync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[blocksync]
[fastsync]
# If this node is many blocks behind the tip of the chain, BlockSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
enable = true
# Block Sync version to use:
# 1) "v0" (default) - the standard block sync implementation
# 2) "v2" - DEPRECATED, please use v0
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
#######################################################
@@ -442,6 +421,7 @@ max-open-connections = 3
# Instrumentation namespace
namespace = "tendermint"
```
## Empty blocks VS no empty blocks

View File

@@ -32,7 +32,7 @@ tendermint start --log-level "info"
Here is the list of modules you may encounter in Tendermint's log and a
little overview what they do.
- `abci-client` As mentioned in [Application Architecture Guide](../app-dev/app-architecture.md), Tendermint acts as an ABCI
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
client with respect to the application and maintains 3 connections:
mempool, consensus and query. The code used by Tendermint Core can
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
@@ -45,12 +45,12 @@ little overview what they do.
from a crash.
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
You can subscribe to them by calling `subscribe` RPC method. Refer
to [RPC docs](../tendermint-core/rpc.md) for additional information.
to [RPC docs](./rpc.md) for additional information.
- `mempool` Mempool module handles all incoming transactions, whenever
they are coming from peers or the application.
- `p2p` Provides an abstraction around peer-to-peer communication. For
more details, please check out the
[README](https://github.com/tendermint/spec/tree/master/spec/p2p).
[README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md).
- `rpc-server` RPC server. For implementation details, please read the
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
- `state` Represents the latest state and execution submodule, which

View File

@@ -40,7 +40,7 @@ Default logging level (`log-level = "info"`) should suffice for
normal operation mode. Read [this
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
for details on how to configure `log-level` config variable. Some of the
modules can be found [here](logging.md#list-of-modules). If
modules can be found [here](../nodes/logging#list-of-modules). If
you're trying to debug Tendermint or asked to provide logs with debug
logging level, you can do so by running Tendermint with
`--log-level="debug"`.
@@ -114,7 +114,7 @@ just the votes seen at the current height.
If, after consulting with the logs and above endpoints, you still have no idea
what's happening, consider using `tendermint debug kill` sub-command. This
command will scrap all the available info and kill the process. See
[Debugging](../tools/debugging/README.md) for the exact format.
[Debugging](../tools/debugging.md) for the exact format.
You can inspect the resulting archive yourself or create an issue on
[Github](https://github.com/tendermint/tendermint). Before opening an issue
@@ -134,7 +134,7 @@ Tendermint also can report and serve Prometheus metrics. See
[Metrics](./metrics.md).
`tendermint debug dump` sub-command can be used to periodically dump useful
information into an archive. See [Debugging](../tools/debugging/README.md) for more
information into an archive. See [Debugging](../tools/debugging.md) for more
information.
## What happens when my app dies
@@ -268,8 +268,6 @@ While we do not favor any operation system, more secure and stable Linux server
distributions (like Centos) should be preferred over desktop operation systems
(like Mac OS).
Native Windows support is not provided. If you are using a windows machine, you can try using the [bash shell](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
### Miscellaneous
NOTE: if you are going to use Tendermint in a public domain, make sure
@@ -315,7 +313,7 @@ We want `skip-timeout-commit=false` when there is economics on the line
because proposers should wait to hear for more votes. But if you don't
care about that and want the fastest consensus, you can skip it. It will
be kept false by default for public deployments (e.g. [Cosmos
Hub](https://hub.cosmos.network/main/hub-overview/overview.html)) while for enterprise
Hub](https://cosmos.network/intro/hub)) while for enterprise
applications, setting it to true is not a problem.
- `consensus.peer-gossip-sleep-duration`

View File

@@ -1,45 +0,0 @@
---
order: 1
parent:
order: false
---
# Requests for Comments
A Request for Comments (RFC) is a record of discussion on an open-ended topic
related to the design and implementation of Tendermint Core, for which no
immediate decision is required.
The purpose of an RFC is to serve as a historical record of a high-level
discussion that might otherwise only be recorded in an ad hoc way (for example,
via gists or Google docs) that are difficult to discover for someone after the
fact. An RFC _may_ give rise to more specific architectural _decisions_ for
Tendermint, but those decisions must be recorded separately in [Architecture
Decision Records (ADR)](./../architecture).
As a rule of thumb, if you can articulate a specific question that needs to be
answered, write an ADR. If you need to explore the topic and get input from
others to know what questions need to be answered, an RFC may be appropriate.
## RFC Content
An RFC should provide:
- A **changelog**, documenting when and how the RFC has changed.
- An **abstract**, briefly summarizing the topic so the reader can quickly tell
whether it is relevant to their interest.
- Any **background** a reader will need to understand and participate in the
substance of the discussion (links to other documents are fine here).
- The **discussion**, the primary content of the document.
The [rfc-template.md](./rfc-template.md) file includes placeholders for these
sections.
## Table of Contents
- [RFC-000: P2P Roadmap](./rfc-000-p2p-roadmap.rst)
- [RFC-001: Storage Engines](./rfc-001-storage-engine.rst)
- [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md)
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md)
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->

View File

@@ -1,316 +0,0 @@
====================
RFC 000: P2P Roadmap
====================
Changelog
---------
- 2021-08-20: Completed initial draft and distributed via a gist
- 2021-08-25: Migrated as an RFC and changed format
Abstract
--------
This document discusses the future of peer network management in Tendermint, with
a particular focus on features, semantics, and a proposed roadmap.
Specifically, we consider libp2p as a tool kit for implementing some fundamentals.
Background
----------
For the 0.35 release cycle the switching/routing layer of Tendermint was
replaced. This work was done "in place," and produced a version of Tendermint
that was backward-compatible and interoperable with previous versions of the
software. While there are new p2p/peer management constructs in the new
version (e.g. ``PeerManager`` and ``Router``), the main effect of this change
was to simplify the ways that other components within Tendermint interacted with
the peer management layer, and to make it possible for higher-level components
(specifically the reactors), to be used and tested more independently.
This refactoring, which was a major undertaking, was entirely necessary to
enable areas for future development and iteration on this aspect of
Tendermint. There are also a number of potential user-facing features that
depend heavily on the p2p layer: additional transport protocols, transport
compression, improved resilience to network partitions. These improvements to
modularity, stability, and reliability of the p2p system will also make
ongoing maintenance and feature development easier in the rest of Tendermint.
Critique of Current Peer-to-Peer Infrastructure
---------------------------------------
The current (refactored) P2P stack is an improvement on the previous iteration
(legacy), but as of 0.35, there remains room for improvement in the design and
implementation of the P2P layer.
Some limitations of the current stack include:
- heavy reliance on buffering to avoid backups in the flow of components,
which is fragile to maintain and can lead to unexpected memory usage
patterns and forces the routing layer to make decisions about when messages
should be discarded.
- the current p2p stack relies on convention (rather than the compiler) to
enforce the API boundaries and conventions between reactors and the router,
making it very easy to write "wrong" reactor code or introduce a bad
dependency.
- the current stack is probably more complex and difficult to maintain because
the legacy system must coexist with the new components in 0.35. When the
legacy stack is removed there are some simple changes that will become
possible and could reduce the complexity of the new system. (e.g. `#6598
<https://github.com/tendermint/tendermint/issues/6598>`_.)
- the current stack encapsulates a lot of information about peers, and makes it
difficult to expose that information to monitoring/observability tools. This
general opacity also makes it difficult to interact with the peer system
from other areas of the code base (e.g. tests, reactors).
- the legacy stack provided some control to operators to force the system to
dial new peers or seed nodes or manipulate the topology of the system _in
situ_. The current stack can't easily provide this, and while the new stack
may have better behavior, it does leave operators hands tied.
Some of these issues will be resolved early in the 0.36 cycle, with the
removal of the legacy components.
The 0.36 release also provides the opportunity to make changes to the
protocol, as the release will not be compatible with previous releases.
Areas for Development
---------------------
These sections describe features that may make sense to include in a Phase 2 of
a P2P project.
Internal Message Passing
~~~~~~~~~~~~~~~~~~~~~~~~
Currently, there's no provision for intranode communication using the P2P
layer, which means when two reactors need to interact with each other they
have to have dependencies on each other's interfaces, and
initialization. Changing these interactions (e.g. transitions between
blocksync and consensus) from procedure calls to message passing.
This is a relatively simple change and could be implemented with the following
components:
- a constant to represent "local" delivery as the ``To``` field on
``p2p.Envelope``.
- special path for routing local messages that doesn't require message
serialization (protobuf marshalling/unmarshaling).
Adding these semantics, particularly if in conjunction with synchronous
semantics provides a solution to dependency graph problems currently present
in the Tendermint codebase, which will simplify development, make it possible
to isolate components for testing.
Eventually, this will also make it possible to have a logical Tendermint node
running in multiple processes or in a collection of containers, although the
usecase of this may be debatable.
Synchronous Semantics (Paired Request/Response)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the current system, all messages are sent with fire-and-forget semantics,
and there's no coupling between a request sent via the p2p layer, and a
response. These kinds of semantics would simplify the implementation of
state and block sync reactors, and make intra-node message passing more
powerful.
For some interactions, like gossiping transactions between the mempools of
different nodes, fire-and-forget semantics make sense, but for other
operations the missing link between requests/responses leads to either
inefficiency when a node fails to respond or becomes unavailable, or code that
is just difficult to follow.
To support this kind of work, the protocol would need to accommodate some kind
of request/response ID to allow identifying out-of-order responses over a
single connection. Additionally, expanded the programming model of the
``p2p.Channel`` to accommodate some kind of _future_ or similar paradigm to
make it viable to write reactor code without needing for the reactor developer
to wrestle with lower level concurency constructs.
Timeout Handling (QoS)
~~~~~~~~~~~~~~~~~~~~~~
Currently, all timeouts, buffering, and QoS features are handled at the router
layer, and the reactors are implemented in ways that assume/require
asynchronous operation. This both increases the required complexity at the
routing layer, and means that misbehavior at the reactor level is difficult to
detect or attribute. Additionally, the current system provides three main
parameters to control quality of service:
- buffer sizes for channels and queues.
- priorities for channels
- queue implementation details for shedding load.
These end up being quite coarse controls, and changing the settings are
difficult because as the queues and channels are able to buffer large numbers
of messages it can be hard to see the impact of a given change, particularly
in our extant test environment. In general, we should endeavor to:
- set real timeouts, via contexts, on most message send operations, so that
senders rather than queues can be responsible for timeout
logic. Additionally, this will make it possible to avoid sending messages
during shutdown.
- reduce (to the greatest extent possible) the amount of buffering in
channels and the queues, to more readily surface backpressure and reduce the
potential for buildup of stale messages.
Stream Based Connection Handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the transport layer is message based, which makes sense from a
mental model of how the protocol works, but makes it more difficult to
implement transports and connection types, as it forces a higher level view of
the connection and interaction which makes it harder to implement for novel
transport types and makes it more likely that message-based caching and rate
limiting will be implemented at the transport layer rather than at a more
appropriate level.
The transport then, would be responsible for negitating the connection and the
handshake and otherwise behave like a socket/file discriptor with ``Read` and
``Write`` methods.
While this was included in the initial design for the new P2P layer, it may be
obviated entirely if the transport and peer layer is replaced with libp2p,
which is primarily stream based.
Service Discovery
~~~~~~~~~~~~~~~~~
In the current system, Tendermint assumes that all nodes in a network are
largely equivelent, and nodes tend to be "chatty" making many requests of
large numbers of peers and waiting for peers to (hopefully) respond. While
this works and has allowed Tendermint to get to a certain point, this both
produces a theoretical scaling bottle neck and makes it harder to test and
verify components of the system.
In addition to peer's identity and connection information, peers should be
able to advertise a number of services or capabilities, and node operators or
developers should be able to specify peer capability requirements (e.g. target
at least <x>-percent of peers with <y> capability.)
These capabilities may be useful in selecting peers to send messages to, it
may make sense to extend Tendermint's message addressing capability to allow
reactors to send messages to groups of peers based on role rather than only
allowing addressing to one or all peers.
Having a good service discovery mechanism may pair well with the synchronous
semantics (request/response) work, as it allows reactors to "make a request of
a peer with <x> capability and wait for the response," rather force the
reactors to need to track the capabilities or state of specific peers.
Solutions
---------
Continued Homegrown Implementation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The current peer system is homegrown and is conceptually compatible with the
needs of the project, and while there are limitations to the system, the p2p
layer is not (currently as of 0.35) a major source of bugs or friction during
development.
However, the current implementation makes a number of allowances for
interoperability, and there are a collection of iterative improvements that
should be considered in the next couple of releases. To maintain the current
implementation, upcoming work would include:
- change the ``Transport`` mechanism to facilitate easier implementations.
- implement different ``Transport`` handlers to be able to manage peer
connections using different protocols (e.g. QUIC, etc.)
- entirely remove the constructs and implementations of the legacy peer
implementation.
- establish and enforce clearer chains of responsibility for connection
establishment (e.g. handshaking, setup,) which is currently shared between
three components.
- report better metrics regarding the into the state of peers and network
connectivity, which are opaque outside of the system. This is constrained at
the moment as a side effect of the split responsibility for connection
establishment.
- extend the PEX system to include service information so that ndoes in the
network weren't necessarily homogeneous.
While maintaining a bespoke peer management layer would seem to distract from
development of core functionality, the truth is that (once the legacy code is
removed,) the scope of the peer layer is relatively small from a maintenance
perspective, and having control at this layer might actually afford the
project with the ability to more rapidly iterate on some features.
LibP2P
~~~~~~
LibP2P provides components that, approximately, account for the
``PeerManager`` and ``Transport`` components of the current (new) P2P
stack. The Go APIs seem reasonable, and being able to externalize the
implementation details of peer and connection management seems like it could
provide a lot of benefits, particularly in supporting a more active ecosystem.
In general the API provides the kind of stream-based, multi-protocol
supporting, and idiomatic baseline for implementing a peer layer. Additionally
because it handles peer exchange and connection management at a lower
level, by using libp2p it'd be possible to remove a good deal of code in favor
of just using libp2p. Having said that, Tendermint's P2P layer covers a
greater scope (e.g. message routing to different peers) and that layer is
something that Tendermint might want to retain.
The are a number of unknowns that require more research including how much of
a peer database the Tendermint engine itself needs to maintain, in order to
support higher level operations (consensus, statesync), but it might be the
case that our internal systems need to know much less about peers than
otherwise specified. Similarly, the current system has a notion of peer
scoring that cannot be communicated to libp2p, which may be fine as this is
only used to support peer exchange (PEX,) which would become a property libp2p
and not expressed in it's current higher-level form.
In general, the effort to switch to libp2p would involve:
- timing it during an appropriate protocol-breaking window, as it doesn't seem
viable to support both libp2p *and* the current p2p protocol.
- providing some in-memory testing network to support the use case that the
current ``p2p.MemoryNetwork`` provides.
- re-homing the ``p2p.Router`` implementation on top of libp2p components to
be able to maintain the current reactor implementations.
Open question include:
- how much local buffering should we be doing? It sort of seems like we should
figure out what the expected behavior is for libp2p for QoS-type
functionality, and if our requirements mean that we should be implementing
this on top of things ourselves?
- if Tendermint was going to use libp2p, how would libp2p's stability
guarantees (protocol, etc.) impact/constrain Tendermint's stability
guarantees?
- what kind of introspection does libp2p provide, and to what extend would
this change or constrain the kind of observability that Tendermint is able
to provide?
- how do efforts to select "the best" (healthy, close, well-behaving, etc.)
peers work out if Tendermint is not maintaining a local peer database?
- would adding additional higher level semantics (internal message passing,
request/response pairs, service discovery, etc.) facilitate removing some of
the direct linkages between constructs/components in the system and reduce
the need for Tendermint nodes to maintain state about its peers?
References
----------
- `Tracking Ticket for P2P Refactor Project <https://github.com/tendermint/tendermint/issues/5670>`_
- `ADR 61: P2P Refactor Scope <../architecture/adr-061-p2p-refactor-scope.md>`_
- `ADR 62: P2P Architecture and Abstraction <../architecture/adr-061-p2p-architecture.md>`_

View File

@@ -1,179 +0,0 @@
===========================================
RFC 001: Storage Engines and Database Layer
===========================================
Changelog
---------
- 2021-04-19: Initial Draft (gist)
- 2021-09-02: Migrated to RFC folder, with some updates
Abstract
--------
The aspect of Tendermint that's responsible for persistence and storage (often
"the database" internally) represents a bottle neck in the architecture of the
platform, that the 0.36 release presents a good opportunity to correct. The
current storage engine layer provides a great deal of flexibility that is
difficult for users to leverage or benefit from, while also making it harder
for Tendermint Core developers to deliver improvements on storage engine. This
RFC discusses the possible improvements to this layer of the system.
Background
----------
Tendermint has a very thin common wrapper that makes Tendermint itself
(largely) agnostic to the data storage layer (within the realm of the popular
key-value/embedded databases.) This flexibility is not particularly useful:
the benefits of a specific database engine in the context of Tendermint is not
particularly well understood, and the maintenance burden for multiple backends
is not commensurate with the benefit provided. Additionally, because the data
storage layer is handled generically, and most tests run with an in-memory
framework, it's difficult to take advantage of any higher-level features of a
database engine.
Ideally, developers within Tendermint will be able to interact with persisted
data via an interface that can function, approximately like an object
store, and this storage interface will be able to accommodate all existing
persistence workloads (e.g. block storage, local peer management information
like the "address book", crash-recovery log like the WAL.) In addition to
providing a more ergonomic interface and new semantics, by selecting a single
storage engine tendermint can use native durability and atomicity features of
the storage engine and simplify its own implementations.
Data Access Patterns
~~~~~~~~~~~~~~~~~~~~
Tendermint's data access patterns have the following characteristics:
- aggregate data size often exceeds memory.
- data is rarely mutated after it's written for most data (e.g. blocks), but
small amounts of working data is persisted by nodes and is frequently
mutated (e.g. peer information, validator information.)
- read patterns can be quite random.
- crash resistance and crash recovery, provided by write-ahead-logs (in
consensus, and potentially for the mempool) should allow the system to
resume work after an unexpected shut down.
Project Goals
~~~~~~~~~~~~~
As we think about replacing the current persistence layer, we should consider
the following high level goals:
- drop dependencies on storage engines that have a CGo dependency.
- encapsulate data format and data storage from higher-level services
(e.g. reactors) within tendermint.
- select a storage engine that does not incur any additional operational
complexity (e.g. database should be embedded.)
- provide database semantics with sufficient ACID, snapshots, and
transactional support.
Open Questions
~~~~~~~~~~~~~~
The following questions remain:
- what kind of data-access concurrency does tendermint require?
- would tendermint users SDK/etc. benefit from some shared database
infrastructure?
- In earlier conversations it seemed as if the SDK has selected Badger and
RocksDB for their storage engines, and it might make sense to be able to
(optionally) pass a handle to a Badger instance between the libraries in
some cases.
- what are typical data sizes, and what kinds of memory sizes can we expect
operators to be able to provide?
- in addition to simple persistence, what kind of additional semantics would
tendermint like to enjoy (e.g. transactional semantics, unique constraints,
indexes, in-place-updates, etc.)?
Decision Framework
~~~~~~~~~~~~~~~~~~
Given the constraint of removing the CGo dependency, the decision is between
"badger" and "boltdb" (in the form of the etcd/CoreOS fork,) as low level. On
top of this and somewhat orthogonally, we must also decide on the interface to
the database and how the larger application will have to interact with the
database layer. Users of the data layer shouldn't ever need to interact with
raw byte slices from the database, and should mostly have the experience of
interacting with Go-types.
Badger is more consistently developed and has a broader feature set than
Bolt. At the same time, Badger is likely more memory intensive and may have
more overhead in terms of open file handles given it's model. At first glance,
Badger is the obvious choice: it's actively developed and it has a lot of
features that could be useful. Bolt is not without some benefits: it's stable
and is maintained by the etcd folks, it's simpler model (single memory mapped
file, etc,) may be easier to reason about.
I propose that we consider the following specific questions about storage
engines:
- does Badger's evolving development, which may result in data file format
changes in the future, and could restrict our access to using the latest
version of the library between major upgrades, present a problem?
- do we do we have goals/concerns about memory footprint that Badger may
prevent us from hitting, particularly as data sets grow over time?
- what kind of additional tooling might we need/like to build (dump/restore,
etc.)?
- do we want to run unit/integration tests against a data files on disk rather
than relying exclusively on the memory database?
Project Scope
~~~~~~~~~~~~~
This project will consist of the following aspects:
- selecting a storage engine, and modifying the tendermint codebase to
disallow any configuration of the storage engine outside of the tendermint.
- remove the dependency on the current tm-db interfaces and replace with some
internalized, safe, and ergonomic interface for data persistence with all
required database semantics.
- update core tendermint code to use the new interface and data tools.
Next Steps
~~~~~~~~~~
- circulate the RFC, and discuss options with appropriate stakeholders.
- write brief ADR to summarize decisions around technical decisions reached
during the RFC phase.
References
----------
- `bolddb <https://github.com/etcd-io/bbolt>`_
- `badger <https://github.com/dgraph-io/badger>`_
- `badgerdb overview <https://dbdb.io/db/badgerdb>`_
- `botldb overview <https://dbdb.io/db/boltdb>`_
- `boltdb vs badger <https://tech.townsourced.com/post/boltdb-vs-badger>`_
- `bolthold <https://github.com/timshannon/bolthold>`_
- `badgerhold <https://github.com/timshannon/badgerhold>`_
- `Pebble <https://github.com/cockroachdb/pebble>`_
- `SDK Issue Regarding IVAL <https://github.com/cosmos/cosmos-sdk/issues/7100>`_
- `SDK Discussion about SMT/IVAL <https://github.com/cosmos/cosmos-sdk/discussions/8297>`_
Discussion
----------
- All things being equal, my tendency would be to use badger, with badgerhold
(if that makes sense) for its ergonomics and indexing capabilities, which
will require some small selection of wrappers for better write transaction
support. This is a weakly held tendency/belief and I think it would be
useful for the RFC process to build consensus (or not) around this basic
assumption.

View File

@@ -1,420 +0,0 @@
# RFC 002: Interprocess Communication (IPC) in Tendermint
## Changelog
- 08-Sep-2021: Initial draft (@creachadair).
## Abstract
Communication in Tendermint among consensus nodes, applications, and operator
tools all use different message formats and transport mechanisms. In some
cases there are multiple options. Having all these options complicates both the
code and the developer experience, and hides bugs. To support a more robust,
trustworthy, and usable system, we should document which communication paths
are essential, which could be removed or reduced in scope, and what we can
improve for the most important use cases.
This document proposes a variety of possible improvements of varying size and
scope. Specific design proposals should get their own documentation.
## Background
The Tendermint state replication engine has a complex IPC footprint.
1. Consensus nodes communicate with each other using a networked peer-to-peer
message-passing protocol.
2. Consensus nodes communicate with the application whose state is being
replicated via the [Application BlockChain Interface (ABCI)][abci].
3. Consensus nodes export a network-accessible [RPC service][rpc-service] to
support operations (bootstrapping, debugging) and synchronization of [light clients][light-client].
This interface is also used by the [`tendermint` CLI][tm-cli].
4. Consensus nodes export a gRPC service exposing a subset of the methods of
the RPC service described by (3). This was intended to simplify the
implementation of tools that already use gRPC to communicate with an
application (via the Cosmos SDK), and wanted to also talk to the consensus
node without implementing yet another RPC protocol.
The gRPC interface to the consensus node has been deprecated and is slated
for removal in the forthcoming Tendermint v0.36 release.
5. Consensus nodes may optionally communicate with a "remote signer" that holds
a validator key and can provide public keys and signatures to the consensus
node. One of the stated goals of this configuration is to allow the signer
to be run on a private network, separate from the consensus node, so that a
compromise of the consensus node from the public network would be less
likely to expose validator keys.
## Discussion: Transport Mechanisms
### Remote Signer Transport
A remote signer communicates with the consensus node in one of two ways:
1. "Raw": Using a TCP or Unix-domain socket which carries varint-prefixed
protocol buffer messages. In this mode, the consensus node is the server,
and the remote signer is the client.
This mode has been deprecated, and is intended to be removed.
2. gRPC: This mode uses the same protobuf messages as "Raw" node, but uses a
standard encrypted gRPC HTTP/2 stub as the transport. In this mode, the
remote signer is the server and the consensus node is the client.
### ABCI Transport
In ABCI, the _application_ is the server, and the Tendermint consensus engine
is the client. Most applications implement the server using the [Cosmos SDK][cosmos-sdk],
which handles low-level details of the ABCI interaction and provides a
higher-level interface to the rest of the application. The SDK is written in Go.
Beneath the SDK, the application communicates with Tendermint core in one of
two ways:
- In-process direct calls (for applications written in Go and compiled against
the Tendermint code). This is an optimization for the common case where an
application is written in Go, to save on the overhead of marshaling and
unmarshaling requests and responses within the same process:
[`abci/client/local_client.go`][local-client]
- A custom remote procedure protocol built on wire-format protobuf messages
using a socket (the "socket protocol"): [`abci/server/socket_server.go`][socket-server]
The SDK also provides a [gRPC service][sdk-grpc] accessible from outside the
application, allowing transactions to be broadcast to the network, look up
transactions, and simulate transaction costs.
### RPC Transport
The consensus node RPC service allows callers to query consensus parameters
(genesis data, transactions, commits), node status (network info, health
checks), application state (abci_query, abci_info), mempool state, and other
attributes of the node and its application. The service also provides methods
allowing transactions and evidence to be injected ("broadcast") into the
blockchain.
The RPC service is exposed in several ways:
- HTTP GET: Queries may be sent as URI parameters, with method names in the path.
- HTTP POST: Queries may be sent as JSON-RPC request messages in the body of an
HTTP POST request. The server uses a custom implementation of JSON-RPC that
is not fully compatible with the [JSON-RPC 2.0 spec][json-rpc], but handles
the common cases.
- Websocket: Queries may be sent as JSON-RPC request messages via a websocket.
This transport uses more or less the same JSON-RPC plumbing as the HTTP POST
handler.
The websocket endpoint also includes three methods that are _only_ exported
via websocket, which appear to support event subscription.
- gRPC: A subset of queries may be issued in protocol buffer format to the gRPC
interface described above under (4). As noted, this endpoint is deprecated
and will be removed in v0.36.
### Opportunities for Simplification
**Claim:** There are too many IPC mechanisms.
The preponderance of ABCI usage is via the Cosmos SDK, which means the
application and the consensus node are compiled together into a single binary,
and the consensus node calls the ABCI methods of the application directly as Go
functions.
We also need a true IPC transport to support ABCI applications _not_ written in
Go. There are also several known applications written in Rust, for example
(including [Anoma](https://github.com/anoma/anoma), Penumbra,
[Oasis](https://github.com/oasisprotocol/oasis-core), Twilight, and
[Nomic](https://github.com/nomic-io/nomic)). Ideally we will have at most one
such transport "built-in": More esoteric cases can be handled by a custom proxy.
Pragmatically, gRPC is probably the right choice here.
The primary consumers of the multi-headed "RPC service" today are the light
client and the `tendermint` command-line client. There is probably some local
use via curl, but I expect that is mostly ad hoc. Ethan reports that nodes are
often configured with the ports to the RPC service blocked, which is good for
security but complicates use by the light client.
### Context: Remote Signer Issues
Since the remote signer needs a secure communication channel to exchange keys
and signatures, and is expected to run truly remotely from the node (i.e., on a
separate physical server), there is not a whole lot we can do here. We should
finish the deprecation and removal of the "raw" socket protocol between the
consensus node and remote signers, but the use of gRPC is appropriate.
The main improvement we can make is to simplify the implementation quite a bit,
once we no longer need to support both "raw" and gRPC transports.
### Context: ABCI Issues
In the original design of ABCI, the presumption was that all access to the
application should be mediated by the consensus node. The idea is that outside
access could change application state and corrupt the consensus process, which
relies on the application to be deterministic. Of course, even without outside
access an application could behave nondeterministically, but allowing other
programs to send it requests was seen as courting trouble.
Conversely, users noted that most of the time, tools written for a particular
application don't want to talk to the consensus module directly. The
application "owns" the state machine the consensus engine is replicating, so
tools that care about application state should talk to the application.
Otherwise, they would have to bake in knowledge about Tendermint (e.g., its
interfaces and data structures) just because of the mediation.
For clients to talk directly to the application, however, there is another
concern: The consensus node is the ABCI _client_, so it is inconvenient for the
application to "push" work into the consensus module via ABCI itself. The
current implementation works around this by calling the consensus node's RPC
service, which exposes an `ABCIQuery` kitchen-sink method that allows the
application a way to poke ABCI messages in the other direction.
Without this RPC method, you could work around this (at least in principle) by
having the consensus module "poll" the application for work that needs done,
but that has unsatisfactory implications for performance and robustness, as
well as being harder to understand.
There has apparently been discussion about trying to make a more bidirectional
communication between the consensus node and the application, but this issue
seems to still be unresolved.
Another complication of ABCI is that it requires the application (server) to
maintain [four separate connections][abci-conn]: One for "consensus" operations
(BeginBlock, EndBlock, DeliverTx, Commit), one for "mempool" operations, one
for "query" operations, and one for "snapshot" (state synchronization) operations.
The rationale seems to have been that these groups of operations should be able
to proceed concurrently with each other. In practice, it results in a very complex
state management problem to coordinate state updates between the separate streams.
While application authors in Go are mostly insulated from that complexity by the
Cosmos SDK, the plumbing to maintain those separate streams is complicated, hard
to understand, and we suspect it contains concurrency bugs and/or lock contention
issues affecting performance that are subtle and difficult to pin down.
Even without changing the semantics of any ABCI operations, this code could be
made smaller and easier to debug by separating the management of concurrency
and locking from the IPC transport: If all requests and responses are routed
through one connection, the server can explicitly maintain priority queues for
requests and responses, and make less-conservative decisions about when locks
are (or aren't) required to synchronize state access. With independent queues,
the server must lock conservatively, and no optimistic scheduling is practical.
This would be a tedious implementation change, but should be achievable without
breaking any of the existing interfaces. More importantly, it could potentially
address a lot of difficult concurrency and performance problems we currently
see anecdotally but have difficultly isolating because of how intertwined these
separate message streams are at runtime.
TODO: Impact of ABCI++ for this topic?
### Context: RPC Issues
The RPC system serves several masters, and has a complex surface area. I
believe there are some improvements that can be exposed by separating some of
these concerns.
The Tendermint light client currently uses the RPC service to look up blocks
and transactions, and to forward ABCI queries to the application. The light
client proxy uses the RPC service via a websocket. The Cosmos IBC relayer also
uses the RPC service via websocket to watch for transaction events, and uses
the `ABCIQuery` method to fetch information and proofs for posted transactions.
Some work is already underway toward using P2P message passing rather than RPC
to synchronize light client state with the rest of the network. IBC relaying,
however, requires access to the event system, which is currently not accessible
except via the RPC interface. Event subscription _could_ be exposed via P2P,
but that is a larger project since it adds P2P communication load, and might
thus have an impact on the performance of consensus.
If event subscription can be moved into the P2P network, we could entirely
remove the websocket transport, even for clients that still need access to the
RPC service. Until then, we may still be able to reduce the scope of the
websocket endpoint to _only_ event subscription, by moving uses of the RPC
server as a proxy to ABCI over to the gRPC interface.
Having the RPC server still makes sense for local bootstrapping and operations,
but can be further simplified. Here are some specific proposals:
- Remove the HTTP GET interface entirely.
- Simplify JSON-RPC plumbing to remove unnecessary reflection and wrapping.
- Remove the gRPC interface (this is already planned for v0.36).
- Separate the websocket interface from the rest of the RPC service, and
restrict it to only event subscription.
Eventually we should try to emove the websocket interface entirely, but we
will need to revisit that (probably in a new RFC) once we've done some of the
easier things.
These changes would preserve the ability of operators to issue queries with
curl (but would require using JSON-RPC instead of URI parameters). That would
be a little less user-friendly, but for a use case that should not be that
prevalent.
These changes would also preserve compatibility with existing JSON-RPC based
code paths like the `tendermint` CLI and the light client (even ahead of
further work to remove that dependency).
**Design goal:** An operator should be able to disable non-local access to the
RPC server on any node in the network without impairing the ability of the
network to function for service of state replication, including light clients.
**Design principle:** All communication required to implement and monitor the
consensus network should use P2P, including the various synchronizations.
### Options for ABCI Transport
The majority of current usage is in Go, and the majority of that is mediated by
the Cosmos SDK, which uses the "direct call" interface. There is probably some
opportunity to clean up the implementation of that code, notably by inverting
which interface is at the "top" of the abstraction stack (currently it acts
like an RPC interface, and escape-hatches into the direct call). However, this
general approach works fine and doesn't need to be fundamentally changed.
For applications _not_ written in Go, the two remaining options are the
"socket" protocol (another variation on varint-prefixed protobuf messages over
an unstructured stream) and gRPC. It would be nice if we could get rid of one
of these to reduce (unneeded?) optionality.
Since both the socket protocol and gRPC depend on protocol buffers, the
"socket" protocol is the most obvious choice to remove. While gRPC is more
complex, the set of languages that _have_ protobuf support but _lack_ gRPC
support is small. Moreover, gRPC is already widely used in the rest of the
ecosystem (including the Cosmos SDK).
If some use case did arise later that can't work with gRPC, it would not be too
difficult for that application author to write a little proxy (in Go) that
bridges the convenient SDK APIs into a simpler protocol than gRPC.
**Design principle:** It is better for an uncommon special case to carry the
burdens of its specialness, than to bake an escape hatch into the infrastructure.
**Recommendation:** We should deprecate and remove the socket protocol.
### Options for RPC Transport
[ADR 057][adr-57] proposes using gRPC for the Tendermint RPC implementation.
This is still possible, but if we are able to simplify and decouple the
concerns as described above, I do not think it should be necessary.
While JSON-RPC is not the best possible RPC protocol for all situations, it has
some advantages over gRPC for our domain. Specifically:
- It is easy to call JSON-RPC manually from the command-line, which helps with
a common concern for the RPC service, local debugging and operations.
Relatedly: JSON is relatively easy for humans to read and write, and it can
be easily copied and pasted to share sample queries and debugging results in
chat, issue comments, and so on. Ideally, the RPC service will not be used
for activities where the costs of a text protocol are important compared to
its legibility and manual usability benefits.
- gRPC has an enormous dependency footprint for both clients and servers, and
many of the features it provides to support security and performance
(encryption, compression, streaming, etc.) are mostly irrelevant to local
use. Tendermint already needs to include a gRPC client for the remote signer,
but if we can avoid the need for a _client_ to depend on gRPC, that is a win
for usability.
- If we intend to migrate light clients off RPC to use P2P entirely, there is
no advantage to forcing a temporary migration to gRPC along the way; and once
the light client is not dependent on the RPC service, the efficiency of the
protocol is much less important.
- We can still get the benefits of generated data types using protocol buffers, even
without using gRPC:
- Protobuf defines a standard JSON encoding for all message types so
languages with protobuf support do not need to worry about type mapping
oddities.
- Using JSON means that even languages _without_ good protobuf support can
implement the protocol with a bit more work, and I expect this situation to
be rare.
Even if a language lacks a good standard JSON-RPC mechanism, the protocol is
lightweight and can be implemented by simple send/receive over TCP or
Unix-domain sockets with no need for code generation, encryption, etc. gRPC
uses a complex HTTP/2 based transport that is not easily replicated.
### Future Work
The background and proposals sketched above focus on the existing structure of
Tendermint and improvements we can make in the short term. It is worthwhile to
also consider options for longer-term broader changes to the IPC ecosystem.
The following outlines some ideas at a high level:
- **Consensus service:** Today, the application and the consensus node are
nominally connected only via ABCI. Tendermint was originally designed with
the assumption that all communication with the application should be mediated
by the consensus node. Based on further experience, however, the design goal
is now that the _application_ should be the mediator of application state.
As noted above, however, ABCI is a client/server protocol, with the
application as the server. For outside clients that turns out to have been a
good choice, but it complicates the relationship between the application and
the consensus node: Previously transactions were entered via the node, now
they are entered via the app.
We have worked around this by using the Tendermint RPC service to give the
application a "back channel" to the consensus node, so that it can push
transactions back into the consensus network. But the RPC service exposes a
lot of other functionality, too, including event subscription, block and
transaction queries, and a lot of node status information.
Even if we can't easily "fix" the orientation of the ABCI relationship, we
could improve isolation by splitting out the parts of the RPC service that
the application needs as a back-channel, and sharing those _only_ with the
application. By defining a "consensus service", we could give the application
a way to talk back limited to only the capabilities it needs. This approach
has the benefit that we could do it without breaking existing use, and if we
later did "fix" the ABCI directionality, we could drop the special case
without disrupting the rest of the RPC interface.
- **Event service:** Right now, the IBC relayer relies on the Tendermint RPC
service to provide a stream of block and transaction events, which it uses to
discover which transactions need relaying to other chains. While I think
that event subscription should eventually be handled via P2P, we could gain
some immediate benefit by splitting out event subscription from the rest of
the RPC service.
In this model, an event subscription service would be exposed on the public
network, but on a different endpoint. This would remove the need for the RPC
service to support the websocket protocol, and would allow operators to
isolate potentially sensitive status query results from the public network.
At the moment the relayers also use the RPC service to get block data for
synchronization, but work is already in progress to handle that concern via
the P2P layer. Once that's done, event subscription could be separated.
Separating parts of the existing RPC service is not without cost: It might
require additional connection endpoints, for example, though it is also not too
difficult for multiple otherwise-independent services to share a connection.
In return, though, it would become easier to reduce transport options and for
operators to independently control access to sensitive data. Considering the
viability and implications of these ideas is beyond the scope of this RFC, but
they are documented here since they follow from the background we have already
discussed.
## References
[abci]: https://github.com/tendermint/spec/tree/95cf253b6df623066ff7cd4074a94e7a3f147c7a/spec/abci
[rpc-service]: https://docs.tendermint.com/master/rpc/
[light-client]: https://docs.tendermint.com/master/tendermint-core/light-client.html
[tm-cli]: https://github.com/tendermint/tendermint/tree/master/cmd/tendermint
[cosmos-sdk]: https://github.com/cosmos/cosmos-sdk/
[local-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/local_client.go
[socket-server]: https://github.com/tendermint/tendermint/blob/master/abci/server/socket_server.go
[sdk-grpc]: https://pkg.go.dev/github.com/cosmos/cosmos-sdk/types/tx#ServiceServer
[json-rpc]: https://www.jsonrpc.org/specification
[abci-conn]: https://github.com/tendermint/spec/blob/master/spec/abci/apps.md#state
[adr-57]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-057-RPC.md

View File

@@ -1,213 +0,0 @@
========================================
RFC 004: E2E Test Framework Enhancements
========================================
Changelog
---------
- 2021-09-14: started initial draft (@tychoish)
Abstract
--------
This document discusses a series of improvements to the e2e test framework
that we can consider during the next few releases to help boost confidence in
Tendermint releases, and improve developer efficiency.
Background
----------
During the 0.35 release cycle, the E2E tests were a source of great
value, helping to identify a number of bugs before release. At the same time,
the tests were not consistently passing during this time, thereby reducing
their value, and forcing the core development team to allocate time and energy
to maintaining and chasing down issues with the e2e tests and the test
harness. The experience of this release cycle calls to mind a series of
improvements to the test framework, and this document attempts to capture
these improvements, along with motivations, and potential for impact.
Projects
--------
Flexible Workload Generation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Presently the e2e suite contains a single workload generation pattern, which
exists simply to ensure that the test networks have some work during their
runs. However, the shape and volume of the work is very consistent and is very
gentle to help ensure test reliability.
We don't need a complex workload generation framework, but being able to have
a few different workload shapes available for test networks, both generated and
hand-crafted, would be useful.
Workload patterns/configurations might include:
- transaction targeting patterns (include light nodes, round robin, target
individual nodes)
- variable transaction size over time.
- transaction broadcast option (synchronously, checked, fire-and-forget,
mixed).
- number of transactions to submit.
- non-transaction workloads: (evidence submission, query, event subscription.)
Configurable Generator
~~~~~~~~~~~~~~~~~~~~~~
The nightly e2e suite is defined by the `testnet generator
<https://github.com/tendermint/tendermint/blob/master/test/e2e/generator/generate.go#L13-L65>`_,
and it's difficult to add dimensions or change the focus of the test suite in
any way without modifying the implementation of the generator. If the
generator were more configurable, potentially via a file rather than in
the Go implementation, we could modify the focus of the test suite on the
fly.
Features that we might want to configure:
- number of test networks to generate of various topologies, to improve
coverage of different configurations.
- test application configurations (to modify the latency of ABCI calls, etc.)
- size of test networks.
- workload shape and behavior.
- initial sync and catch-up configurations.
The workload generator currently provides runtime options for limiting the
generator to specific types of P2P stacks, and for generating multiple groups
of test cases to support parallelism. The goal is to extend this pattern and
avoid hardcoding the matrix of test cases in the generator code. Once the
testnet configuration generation behavior is configurable at runtime,
developers may be able to use the e2e framework to validate changes before
landing changes that break e2e tests a day later.
In addition to the autogenerated suite, it might make sense to maintain a
small collection of hand-crafted cases that exercise configurations of
concern, to run as part of the nightly (or less frequent) loop.
Implementation Plan Structure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As a development team, we should determine the features should impact the e2e
testing early in the development cycle, and if we intend to modify the e2e
tests to exercise a feature, we should identify this early and begin the
integration process as early as possible.
To facilitate this, we should adopt a practice whereby we exercise specific
features that are currently under development more rigorously in the e2e
suite, and then as development stabilizes we can reduce the number or weight
of these features in the suite.
As of 0.35 there are essentially two end to end tests: the suite of 64
generated test networks, and the hand crafted `ci.toml` test case. The
generated test cases help provide systemtic coverage, while the `ci` run
provides coverage for a large number of features.
Reduce Cycle Time
~~~~~~~~~~~~~~~~~
One of the barriers to leveraging the e2e framework, and one of the challenges
in debugging failures, is the cycle time of running a single test iteration is
quite high: 5 minutes to build the docker image, plus the time to run the test
or tests.
There are a number of improvements and enhancements that can reduce the cycle
time in practice:
- reduce the amount of time required to build the docker image used in these
tests. Without the dependency on CGo, the tendermint binaries could be
(cross) compiled outside of the docker container and then injected into
them, which would take better advantage of docker's native caching,
although, without the dependency on CGo there would be no hard requirement
for the e2e tests to use docker.
- support test parallelism. Because of the way the testnets are orchestrated
a single system can really only run one network at a time. For executions
(local or remote) with more resources, there's no reason to run a few
networks in parallel to reduce the feedback time.
- prune testnet configurations that are unlikely to provide good signal, to
shorten the time to feedback.
- apply some kind of tiered approach to test execution, to improve the
legibility of the test result. For example order tests by the dependency of
their features, or run test networks without perturbations before running
that configuration with perturbations, to be able to isolate the impact of
specific features.
- orchestrate the test harness directly from go test rather than via a special
harness and shell scripts so e2e tests may more naively fit into developers
existing workflows.
Many of these improvements, particularly, reducing the build time will also
reduce the time to get feedback during automated builds.
Deeper Insights
~~~~~~~~~~~~~~~
When a test network fails, it's incredibly difficult to understand _why_ the
network failed, as the current system provides very little insight into the
system outside of the process logs. When a test network stalls or fails
developers should be able to quickly and easily get a sense of the state of
the network and all nodes.
Improvements in persuit of this goal, include functionality that would help
node operators in production environments by improving the quality and utility
of the logging messages and other reported metrics, but also provide some
tools to collect and aggregate this data for developers in the context of test
networks.
- Interleave messages from all nodes in the network to be able to correlate
events during the test run.
- Collect structured metrics of the system operation (CPU/MEM/IO) during the
test run, as well as from each tendermint/application process.
- Build (simple) tools to be able to render and summarize the data collected
during the test run to answer basic questions about test outcome.
Flexible Assertions
~~~~~~~~~~~~~~~~~~~
Currently, all assertions run for every test network, which makes the
assertions pretty bland, and the framework primarily useful as a smoke-test
framework, but it might be useful to be able to write and run different
tests for different configurations. This could allow us to test outside of the
happy-path.
In general our existing assertions occupy a fraction of the total test time,
so the relative cost of adding a few extra test assertions would be of limited
cost, and could help build confidence.
Additional Kinds of Testing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The existing e2e suite, exercises networks of nodes that have homogeneous
tendermint version, stable configuration, that are expected to make
progress. There are many other possible test configurations that may be
interesting to engage with. These could include dimensions, such as:
- Multi-version testing to exercise our compatibility guarantees for networks
that might have different tendermint versions.
- As a flavor or mult-version testing, include upgrade testing, to build
confidence in migration code and procedures.
- Additional test applications, particularly practical-type applciations
including some that use gaiad and/or the cosmos-sdk. Test-only applications
that simulate other kinds of applications (e.g. variable application
operation latency.)
- Tests of "non-viable" configurations that ensure that forbidden combinations
lead to halts.
References
----------
- `ADR 66: End-to-End Testing <../architecture/adr-66-e2e-testing.md>`_

View File

@@ -1,35 +0,0 @@
# RFC {RFC-NUMBER}: {TITLE}
## Changelog
- {date}: {changelog}
## Abstract
> A brief high-level synopsis of the topic of discussion for this RFC, ideally
> just a few sentences. This should help the reader quickly decide whether the
> rest of the discussion is relevant to their interest.
## Background
> Any context or orientation needed for a reader to understand and participate
> in the substance of the Discussion. If necessary, this section may include
> links to other documentation or sources rather than restating existing
> material, but should provide enough detail that the reader can tell what they
> need to read to be up-to-date.
### References
> Links to external materials needed to follow the discussion may be added here.
>
> In addition, if the discussion in a request for comments leads to any design
> decisions, it may be helpful to add links to the ADR documents here after the
> discussion has settled.
## Discussion
> This section contains the core of the discussion.
>
> There is no fixed format for this section, but ideally changes to this
> section should be updated before merging to reflect any discussion that took
> place on the PR that made those changes.

View File

@@ -14,7 +14,7 @@ This section dives into the internals of Go-Tendermint.
- [Subscribing to events](./subscription.md)
- [Block Structure](./block-structure.md)
- [RPC](./rpc.md)
- [Block Sync](./block-sync.md)
- [Fast Sync](./fast-sync.md)
- [State Sync](./state-sync.md)
- [Mempool](./mempool.md)
- [Light Client](./light-client.md)

View File

@@ -2,8 +2,7 @@
order: 10
---
# Block Sync
*Formerly known as Fast Sync*
# Fast Sync
In a proof of work blockchain, syncing with the chain is the same
process as staying up-to-date with the consensus: download blocks, and
@@ -15,49 +14,34 @@ scratch can take a very long time. It's much faster to just download
blocks and check the merkle tree of validators than to run the real-time
consensus gossip protocol.
## Using Block Sync
## Using Fast Sync
To support faster syncing, Tendermint offers a `blocksync` mode, which
To support faster syncing, Tendermint offers a `fast-sync` mode, which
is enabled by default, and can be toggled in the `config.toml` or via
`--blocksync.enable=false`.
`--fast_sync=false`.
In this mode, the Tendermint daemon will sync hundreds of times faster
than if it used the real-time consensus process. Once caught up, the
daemon will switch out of Block Sync and into the normal consensus mode.
daemon will switch out of fast sync and into the normal consensus mode.
After running for some time, the node is considered `caught up` if it
has at least one peer and it's height is at least as high as the max
reported peer height. See [the IsCaughtUp
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
Note: There are multiple versions of Block Sync. Please use v0 as the other versions are no longer supported.
Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta.
If you would like to use a different version you can do so by changing the version in the `config.toml`:
```toml
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[blocksync]
[fastsync]
# If this node is many blocks behind the tip of the chain, BlockSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
enable = true
# Block Sync version to use:
# 1) "v0" (default) - the standard Block Sync implementation
# 2) "v2" - DEPRECATED, please use v0
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
```
If we're lagging sufficiently, we should go back to block syncing, but
If we're lagging sufficiently, we should go back to fast syncing, but
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
## The Block Sync event
When the tendermint blockchain core launches, it might switch to the `block-sync`
mode to catch up the states to the current network best height. the core will emits
a fast-sync event to expose the current status and the sync height. Once it catched
the network best height, it will switches to the state sync mechanism and then emit
another event for exposing the fast-sync `complete` status and the state `height`.
The user can query the events by subscribing `EventQueryBlockSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -4,15 +4,8 @@ order: 11
# State Sync
With block sync a node is downloading all of the data of an application from genesis and verifying it.
With fast sync a node is downloading all of the data of an application from genesis and verifying it.
With state sync your node will download data related to the head or near the head of the chain and verify the data.
This leads to drastically shorter times for joining a network.
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
## Events
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
The user can query the events by subscribing `EventQueryStateSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -36,7 +36,7 @@ more information on query syntax and other options.
You can also use tags, given you had included them into DeliverTx
response, to query transaction results. See [Indexing
transactions](../app-dev/indexing-transactions.md) for details.
transactions](./indexing-transactions.md) for details.
## ValidatorSetUpdates

View File

@@ -185,65 +185,51 @@ the argument name and use `_` as a placeholder.
### Formatting
When sending transactions to the RPC interface, the following formatting rules
must be followed:
The following nuances when sending/formatting transactions should be
taken into account:
Using `GET` (with parameters in the URL):
With `GET`:
To send a UTF8 string as transaction data, enclose the value of the `tx`
parameter in double quotes:
To send a UTF8 string byte array, quote the value of the tx parameter:
```sh
curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"'
```
which sends a 5-byte transaction: "h e l l o" \[68 65 6c 6c 6f\].
which sends a 5 byte transaction: "h e l l o" \[68 65 6c 6c 6f\].
Note that the URL in this example is enclosed in single quotes to prevent the
shell from interpreting the double quotes. Alternatively, you may escape the
double quotes with backslashes:
Note the URL must be wrapped with single quotes, else bash will ignore
the double quotes. To avoid the single quotes, escape the double quotes:
```sh
curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\"
```
The double-quoted format works with for multibyte characters, as long as they
are valid UTF8, for example:
Using a special character:
```sh
curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"'
```
sends a 4-byte transaction: "€5" (UTF8) \[e2 82 ac 35\].
sends a 4 byte transaction: "€5" (UTF8) \[e2 82 ac 35\].
Arbitrary (non-UTF8) transaction data may also be encoded as a string of
hexadecimal digits (2 digits per byte). To do this, omit the quotation marks
and prefix the hex string with `0x`:
To send as raw hex, omit quotes AND prefix the hex string with `0x`:
```sh
curl http://localhost:26657/broadcast_tx_commit?tx=0x68656C6C6F
curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304
```
which sends the 5-byte transaction: \[68 65 6c 6c 6f\].
which sends a 4 byte transaction: \[01 02 03 04\].
Using `POST` (with parameters in JSON), the transaction data are sent as a JSON
string in base64 encoding:
With `POST` (using `json`), the raw hex must be `base64` encoded:
```sh
curl http://localhost:26657 -H 'Content-Type: application/json' --data-binary '{
"jsonrpc": "2.0",
"id": "anything",
"method": "broadcast_tx_commit",
"params": {
"tx": "aGVsbG8="
}
}'
curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657
```
which sends the same 5-byte transaction: \[68 65 6c 6c 6f\].
which sends the same 4 byte transaction: \[01 02 03 04\].
Note that the hexadecimal encoding of transaction data is _not_ supported in
JSON (`POST`) requests.
Note that raw hex cannot be used in `POST` transactions.
## Reset
@@ -566,7 +552,8 @@ To make a Tendermint network that can tolerate one of the validators
failing, you need at least four validator nodes (e.g., 2/3).
Updating validators in a live network is supported but must be
explicitly programmed by the application developer.
explicitly programmed by the application developer. See the [application
developers guide](../app-dev/app-development.md) for more details.
### Local Network

View File

@@ -62,30 +62,3 @@ given destination directory. Each archive will contain:
Note: goroutine.out and heap.out will only be written if a profile address is
provided and is operational. This command is blocking and will log any error.
## Tendermint Inspect
Tendermint includes an `inspect` command for querying Tendermint's state store and block
store over Tendermint RPC.
When the Tendermint consensus engine detects inconsistent state, it will crash the
entire Tendermint process.
While in this inconsistent state, a node running Tendermint's consensus engine will not start up.
The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store
and state store.
`inspect` allows operators to query a read-only view of the stage.
`inspect` does not run the consensus engine at all and can therefore be used to debug
processes that have crashed due to inconsistent state.
To start the `inspect` process, run
```bash
tendermint inspect
```
### RPC endpoints
The list of available RPC endpoints can be found by making a request to the RPC port.
For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to
`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints.
Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc).

View File

@@ -64,42 +64,13 @@ It wont kill the node, but it will gather all of the above data and package i
At this point, depending on how severe the degradation is, you may want to restart the process.
## Tendermint Inspect
What if the Tendermint node will not start up due to inconsistent consensus state?
When a node running the Tendermint consensus engine detects an inconsistent state
it will crash the entire Tendermint process.
The Tendermint consensus engine cannot be run in this inconsistent state and the so node
will fail to start up as a result.
The Tendermint RPC server can provide valuable information for debugging in this situation.
The Tendermint `inspect` command will run a subset of the Tendermint RPC server
that is useful for debugging inconsistent state.
### Running inspect
Start up the `inspect` tool on the machine where Tendermint crashed using:
```bash
tendermint inspect --home=</path/to/app.d>
```
`inspect` will use the data directory specified in your Tendermint configuration file.
`inspect` will also run the RPC server at the address specified in your Tendermint configuration file.
### Using inspect
With the `inspect` server running, you can access RPC endpoints that are critically important
for debugging.
Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint
will return useful information about the Tendermint consensus state.
## Outro
Were hoping that these Tendermint tools will become de facto the first response for any accidents.
Were hoping that the `tendermint debug` subcommand will become de facto the first response to any accidents.
Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet?
Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` yet?
Join our [discord chat](https://discord.gg/vcExX9T), where we discuss the current issues and future improvements.
Join our chat, where we discuss the current issues and future improvements.

View File

@@ -388,6 +388,7 @@ func main() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
os.Exit(0)
}
func newTendermint(app abci.Application, configFile string) (*nm.Node, error) {
@@ -563,6 +564,7 @@ defer func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
os.Exit(0)
```
## 1.5 Getting Up and Running

26
go.mod
View File

@@ -3,42 +3,40 @@ module github.com/tendermint/tendermint
go 1.16
require (
github.com/BurntSushi/toml v0.4.1
github.com/BurntSushi/toml v0.3.1
github.com/Masterminds/squirrel v1.5.0
github.com/Workiva/go-datastructures v1.0.53
github.com/adlio/schema v1.1.13
github.com/btcsuite/btcd v0.22.0-beta
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/fortytw2/leaktest v1.3.0
github.com/go-kit/kit v0.11.0
github.com/go-kit/kit v0.10.0
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/golangci/golangci-lint v1.42.1
github.com/google/orderedcode v0.0.1
github.com/google/uuid v1.3.0
github.com/google/uuid v1.2.0
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/lib/pq v1.10.3
github.com/lib/pq v1.10.2
github.com/libp2p/go-buffer-pool v0.0.2
github.com/minio/highwayhash v1.0.2
github.com/mroth/weightedrand v0.4.1
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
github.com/ory/dockertest v3.3.5+incompatible
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rs/cors v1.8.0
github.com/rs/zerolog v1.25.0
github.com/rs/zerolog v1.23.0
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.2.1
github.com/spf13/cobra v1.2.0
github.com/spf13/viper v1.8.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tm-db v0.6.4
github.com/vektra/mockery/v2 v2.9.0
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/grpc v1.40.0
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
google.golang.org/grpc v1.39.0
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
pgregory.net/rapid v0.4.7
)

607
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,36 +0,0 @@
/*
Package inspect provides a tool for investigating the state of a
failed Tendermint node.
This package provides the Inspector type. The Inspector type runs a subset of the Tendermint
RPC endpoints that are useful for debugging issues with Tendermint consensus.
When a node running the Tendermint consensus engine detects an inconsistent consensus state,
the entire node will crash. The Tendermint consensus engine cannot run in this
inconsistent state so the node will not be able to start up again.
The RPC endpoints provided by the Inspector type allow for a node operator to inspect
the block store and state store to better understand what may have caused the inconsistent state.
The Inspector type's lifecycle is controlled by a context.Context
ins := inspect.NewFromConfig(rpcConfig)
ctx, cancelFunc:= context.WithCancel(context.Background())
// Run blocks until the Inspector server is shut down.
go ins.Run(ctx)
...
// calling the cancel function will stop the running inspect server
cancelFunc()
Inspector serves its RPC endpoints on the address configured in the RPC configuration
rpcConfig.ListenAddress = "tcp://127.0.0.1:26657"
ins := inspect.NewFromConfig(rpcConfig)
go ins.Run(ctx)
The list of available RPC endpoints can then be viewed by navigating to
http://127.0.0.1:26657/ in the web browser.
*/
package inspect

View File

@@ -1,149 +0,0 @@
package inspect
import (
"context"
"errors"
"fmt"
"net"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect/rpc"
"github.com/tendermint/tendermint/libs/log"
tmstrings "github.com/tendermint/tendermint/libs/strings"
rpccore "github.com/tendermint/tendermint/rpc/core"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
"golang.org/x/sync/errgroup"
)
// Inspector manages an RPC service that exports methods to debug a failed node.
// After a node shuts down due to a consensus failure, it will no longer start
// up its state cannot easily be inspected. An Inspector value provides a similar interface
// to the node, using the underlying Tendermint data stores, without bringing up
// any other components. A caller can query the Inspector service to inspect the
// persisted state and debug the failure.
type Inspector struct {
routes rpccore.RoutesMap
config *config.RPCConfig
indexerService *indexer.Service
eventBus *types.EventBus
logger log.Logger
}
// New returns an Inspector that serves RPC on the specified BlockStore and StateStore.
// The Inspector type does not modify the state or block stores.
// The sinks are used to enable block and transaction querying via the RPC server.
// The caller is responsible for starting and stopping the Inspector service.
///
//nolint:lll
func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector {
routes := rpc.Routes(*cfg, ss, bs, es, logger)
eb := types.NewEventBus()
eb.SetLogger(logger.With("module", "events"))
is := indexer.NewIndexerService(es, eb)
is.SetLogger(logger.With("module", "txindex"))
return &Inspector{
routes: routes,
config: cfg,
logger: logger,
eventBus: eb,
indexerService: is,
}
}
// NewFromConfig constructs an Inspector using the values defined in the passed in config.
func NewFromConfig(cfg *config.Config) (*Inspector, error) {
bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg})
if err != nil {
return nil, err
}
bs := store.NewBlockStore(bsDB)
sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg})
if err != nil {
return nil, err
}
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
return nil, err
}
sinks, err := sink.EventSinksFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID)
if err != nil {
return nil, err
}
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
ss := state.NewStore(sDB)
return New(cfg.RPC, bs, ss, sinks, logger), nil
}
// Run starts the Inspector servers and blocks until the servers shut down. The passed
// in context is used to control the lifecycle of the servers.
func (ins *Inspector) Run(ctx context.Context) error {
err := ins.eventBus.Start()
if err != nil {
return fmt.Errorf("error starting event bus: %s", err)
}
defer func() {
err := ins.eventBus.Stop()
if err != nil {
ins.logger.Error("event bus stopped with error", "err", err)
}
}()
err = ins.indexerService.Start()
if err != nil {
return fmt.Errorf("error starting indexer service: %s", err)
}
defer func() {
err := ins.indexerService.Stop()
if err != nil {
ins.logger.Error("indexer service stopped with error", "err", err)
}
}()
return startRPCServers(ctx, ins.config, ins.logger, ins.routes)
}
func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error {
g, tctx := errgroup.WithContext(ctx)
listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ")
rh := rpc.Handler(cfg, routes, logger)
for _, listenerAddr := range listenAddrs {
server := rpc.Server{
Logger: logger,
Config: cfg,
Handler: rh,
Addr: listenerAddr,
}
if cfg.IsTLSEnabled() {
keyFile := cfg.KeyFile()
certFile := cfg.CertFile()
listenerAddr := listenerAddr
g.Go(func() error {
logger.Info("RPC HTTPS server starting", "address", listenerAddr,
"certfile", certFile, "keyfile", keyFile)
err := server.ListenAndServeTLS(tctx, certFile, keyFile)
if !errors.Is(err, net.ErrClosed) {
return err
}
logger.Info("RPC HTTPS server stopped", "address", listenerAddr)
return nil
})
} else {
listenerAddr := listenerAddr
g.Go(func() error {
logger.Info("RPC HTTP server starting", "address", listenerAddr)
err := server.ListenAndServe(tctx)
if !errors.Is(err, net.ErrClosed) {
return err
}
logger.Info("RPC HTTP server stopped", "address", listenerAddr)
return nil
})
}
}
return g.Wait()
}

View File

@@ -1,583 +0,0 @@
package inspect_test
import (
"context"
"fmt"
"net"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/proto/tendermint/state"
httpclient "github.com/tendermint/tendermint/rpc/client/http"
"github.com/tendermint/tendermint/state/indexer"
indexermocks "github.com/tendermint/tendermint/state/indexer/mocks"
statemocks "github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
func TestInspectConstructor(t *testing.T) {
cfg := config.ResetTestRoot("test")
t.Cleanup(leaktest.Check(t))
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
t.Run("from config", func(t *testing.T) {
d, err := inspect.NewFromConfig(cfg)
require.NoError(t, err)
require.NotNil(t, d)
})
}
func TestInspectRun(t *testing.T) {
cfg := config.ResetTestRoot("test")
t.Cleanup(leaktest.Check(t))
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
t.Run("from config", func(t *testing.T) {
d, err := inspect.NewFromConfig(cfg)
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
stoppedWG := &sync.WaitGroup{}
stoppedWG.Add(1)
go func() {
require.NoError(t, d.Run(ctx))
stoppedWG.Done()
}()
cancel()
stoppedWG.Wait()
})
}
func TestBlock(t *testing.T) {
testHeight := int64(1)
testBlock := new(types.Block)
testBlock.Header.Height = testHeight
testBlock.Header.LastCommitHash = []byte("test hash")
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{})
blockStoreMock.On("LoadBlock", testHeight).Return(testBlock)
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
resultBlock, err := cli.Block(context.Background(), &testHeight)
require.NoError(t, err)
require.Equal(t, testBlock.Height, resultBlock.Block.Height)
require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestTxSearch(t *testing.T) {
testHash := []byte("test")
testTx := []byte("tx")
testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash))
testTxResult := &abcitypes.TxResult{
Height: 1,
Index: 100,
Tx: testTx,
}
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
eventSinkMock.On("Type").Return(indexer.KV)
eventSinkMock.On("SearchTxEvents", mock.Anything,
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
Return([]*abcitypes.TxResult{testTxResult}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
var page = 1
resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "")
require.NoError(t, err)
require.Len(t, resultTxSearch.Txs, 1)
require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx)
cancel()
wg.Wait()
eventSinkMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
blockStoreMock.AssertExpectations(t)
}
func TestTx(t *testing.T) {
testHash := []byte("test")
testTx := []byte("tx")
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
eventSinkMock.On("Type").Return(indexer.KV)
eventSinkMock.On("GetTxByHash", testHash).Return(&abcitypes.TxResult{
Tx: testTx,
}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
res, err := cli.Tx(context.Background(), testHash, false)
require.NoError(t, err)
require.Equal(t, types.Tx(testTx), res.Tx)
cancel()
wg.Wait()
eventSinkMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
blockStoreMock.AssertExpectations(t)
}
func TestConsensusParams(t *testing.T) {
testHeight := int64(1)
testMaxGas := int64(55)
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{
Block: types.BlockParams{
MaxGas: testMaxGas,
},
}, nil)
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
params, err := cli.ConsensusParams(context.Background(), &testHeight)
require.NoError(t, err)
require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockResults(t *testing.T) {
testHeight := int64(1)
testGasUsed := int64(100)
stateStoreMock := &statemocks.Store{}
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{
{
GasUsed: testGasUsed,
},
},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}, nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("Height").Return(testHeight)
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
res, err := cli.BlockResults(context.Background(), &testHeight)
require.NoError(t, err)
require.Equal(t, res.TotalGasUsed, testGasUsed)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestCommit(t *testing.T) {
testHeight := int64(1)
testRound := int32(101)
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil)
blockStoreMock.On("LoadSeenCommit").Return(&types.Commit{
Height: testHeight,
Round: testRound,
}, nil)
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
res, err := cli.Commit(context.Background(), &testHeight)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, res.SignedHeader.Commit.Round, testRound)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockByHash(t *testing.T) {
testHeight := int64(1)
testHash := []byte("test hash")
testBlock := new(types.Block)
testBlock.Header.Height = testHeight
testBlock.Header.LastCommitHash = testHash
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
BlockID: types.BlockID{
Hash: testHash,
},
Header: types.Header{
Height: testHeight,
},
}, nil)
blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil)
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
res, err := cli.BlockByHash(context.Background(), testHash)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, []byte(res.BlockID.Hash), testHash)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockchain(t *testing.T) {
testHeight := int64(1)
testBlock := new(types.Block)
testBlockHash := []byte("test hash")
testBlock.Header.Height = testHeight
testBlock.Header.LastCommitHash = testBlockHash
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
BlockID: types.BlockID{
Hash: testBlockHash,
},
})
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
res, err := cli.BlockchainInfo(context.Background(), 0, 100)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash))
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestValidators(t *testing.T) {
testHeight := int64(1)
testVotingPower := int64(100)
testValidators := types.ValidatorSet{
Validators: []*types.Validator{
{
VotingPower: testVotingPower,
},
},
}
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
testPage := 1
testPerPage := 100
res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, testVotingPower, res.Validators[0].VotingPower)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockSearch(t *testing.T) {
testHeight := int64(1)
testBlockHash := []byte("test hash")
testQuery := "block.height = 1"
stateStoreMock := &statemocks.Store{}
blockStoreMock := &statemocks.BlockStore{}
eventSinkMock := &indexermocks.EventSink{}
eventSinkMock.On("Stop").Return(nil)
eventSinkMock.On("Type").Return(indexer.KV)
blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{
Header: types.Header{
Height: testHeight,
},
}, nil)
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
BlockID: types.BlockID{
Hash: testBlockHash,
},
})
eventSinkMock.On("SearchBlockEvents", mock.Anything,
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
Return([]int64{testHeight}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress)
require.NoError(t, err)
testPage := 1
testPerPage := 100
testOrderBy := "desc"
res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash))
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func requireConnect(t testing.TB, addr string, retries int) {
parts := strings.SplitN(addr, "://", 2)
if len(parts) != 2 {
t.Fatalf("malformed address to dial: %s", addr)
}
var err error
for i := 0; i < retries; i++ {
var conn net.Conn
conn, err = net.Dial(parts[0], parts[1])
if err == nil {
conn.Close()
return
}
// FIXME attempt to yield and let the other goroutine continue execution.
time.Sleep(time.Microsecond * 100)
}
t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err)
}

Some files were not shown because too many files have changed in this diff Show More