Compare commits

...

50 Commits

Author SHA1 Message Date
William Banfield
1f2fb500ae wip 2021-07-28 18:13:34 -04:00
William Banfield
b86acbbeab libs/net: fix GetFreePort to make port immediately usable 2021-07-28 15:22:53 -04:00
William Banfield
9e41414a53 light: replace homegrown mock with mockery (#6735)
This pull request removes the homegrown mocks in `light/provider/mock` in favor of mockery mocks.

Adds a simple benchmark only mock to avoid the overhead of `reflection` that `mockery` incurs.

part of #5274
2021-07-28 16:12:11 +00:00
Callum Waters
6ff4c3139c blockchain: rename to blocksync service (#6755) 2021-07-28 17:25:42 +02:00
JayT106
e87b0391cb cli/indexer: Reindex events (#6676) 2021-07-28 00:04:54 +02:00
Aleksandr Bezobchuk
4f73748bc8 mempool v1: tweak broadcastTxRoutine (#6771) 2021-07-27 15:34:06 -04:00
Callum Waters
9a3861fb82 light: wait for tendermint node to start before running example test (#6744) 2021-07-27 18:59:34 +02:00
William Banfield
44ac57489f abci: add changelog entry for mempool_error field (#6770)
Follow up from PR: #6740
2021-07-27 13:33:20 +00:00
Marko
76376e3161 avoid issues already labeled (#6767) 2021-07-27 06:17:32 +00:00
William Banfield
dd97ac6e1c test/fuzz: add mechanism to reproduce found fuzz errors (#6768)
* test/fuzz: add test to reproduce found fuzz errors
2021-07-26 17:30:43 -04:00
William Banfield
a751eee7f2 p2p: add test for pqueue dequeue full error (#6760)
This adds a test for closing the `pqueue` while the `pqueue` contains data that has not yet been dequeued.

This issue was found while debugging #6705 

This test will fail until @cmwaters fix for this condition is merged.
2021-07-26 19:22:32 +00:00
William Banfield
c5dc3b267f test/fuzz: add test to reproduce found fuzz errors (#6757)
This change does two things:
1. It fixes the json fuzzer to account for receiving array results. Arrays are returned by the rpc server when the input data is an array.
2. Adds a `fuzz_test.go` file and corresponding `testdata` directory containing the failing test case.

This seems like a reasonable way to add and track previous crash issues in our fuzz test cases. The upcoming stdlib go fuzz tool does effectively this automatically.
2021-07-26 14:58:51 +00:00
dependabot[bot]
93f462ef86 build(deps): Bump codecov/codecov-action from 2.0.1 to 2.0.2 (#6764)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 2.0.1 to 2.0.2.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v2.0.1...v2.0.2)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Marko <marbar3778@yahoo.com>
2021-07-26 14:17:02 +00:00
Callum Waters
91e277d7b7 enable pex reactor depending on config param (#6762) 2021-07-26 15:22:12 +02:00
Callum Waters
a341a626e0 p2p: avoid blocking on the dequeCh (#6765) 2021-07-26 09:09:07 -04:00
William Banfield
c3ae6f5b58 p2p: add coverage for mConnConnection.TrySendMessage (#6754)
This change adds additional coverage to the `mConnConnection.TrySendMessage` code path. Adds test to ensure it returns `io.EOF` when closed.

Addresses: #6570
2021-07-23 17:29:19 +00:00
Aleksandr Bezobchuk
a393cf8bab internal: update blockchain reactor godoc (#6749) 2021-07-23 08:15:57 -04:00
Callum Waters
0e2752ae42 light: improve error handling and allow providers to be added (#6733) 2021-07-22 18:12:34 +02:00
Callum Waters
97a8f125e0 e2e: allow for both v0 and v1 mempool implementations (#6752) 2021-07-22 17:59:02 +02:00
William Banfield
84c15857e4 mempool: return mempool errors to the abci client (#6740)
This changes adds an `MempoolError` field to the `ResponseCheckTx`. This will allow clients to understand that their transaction was rejected from the mempool despite passing the ABCI check. 

This change also updates the code to make use of early returns to prevent highly nested code blocks. Namely, it returns when the type assertion fails at the beginning of the method, instead of wrapping the entire method in a large if statement. This has a somewhat large effect on the diff as rendered by github.

addresses: #3546
2021-07-22 14:52:29 +00:00
JayT106
e70445f942 statesync/event: emit statesync start/end event (#6700) 2021-07-22 08:16:50 +02:00
Sam Kleinman
478f5321ad light: run examples as integration tests (#6745) 2021-07-21 09:54:14 -04:00
Marko
08e4e2ed3d ignore issues for stale bot (#6747) 2021-07-21 10:39:16 +00:00
dependabot[bot]
7d63e991c5 build(deps): Bump codecov/codecov-action from 1.5.2 to 2.0.1 (#6739)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 1.5.2 to 2.0.1.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.5.2...v2.0.1)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-07-20 17:38:25 -04:00
Sam Kleinman
7638235d33 e2e: longer test harness timeouts (#6728) 2021-07-20 15:57:48 -04:00
Callum Waters
2abfe20114 e2e: prevent adding light clients as persistent peers (#6743) 2021-07-20 18:11:43 +00:00
William Banfield
0bf7813c4e fix makefile test target to rely on test makefile (#6746)
The Makefile at the root of the repo [includes](cd19ef244e/Makefile (L61)) the Makefile under the `test` package. This fix removes the target defined in the root Makefile in favor of the included one.
2021-07-20 18:00:51 +00:00
Sam Kleinman
ff9038e2ce e2e: run tests in fewer groups (#6742) 2021-07-20 13:45:08 -04:00
Sam Kleinman
00a40835a2 e2e: remove cartesian testing of ipv6 (#6734)
Having looked at our network address parsing and connection code, it
really looks like we're not doing anything on top of what the standard
library is doing (both in terms using `net.ParseIP` and also
`net.Dial`,) and I don't think we need to run the tests 2x the number
of times just to run through different areas of the standard
library. I think most of our users are going to be using IPv4, and
would be down to fully remove this dimension as well, if we find it's
making noise, but for now I think it's fine.
2021-07-20 15:47:35 +00:00
JayT106
c4f77ab6d1 fastsync/event: emit fastsync status event when switching consensus/fastsync (#6619)
closes #2498 
solves part of #3365

Note: difficult to test the event emit in SwitchToFastSync part, might need to change `stateSyncReactor` to an interface in the `nodeImpl` struct
2021-07-20 15:36:47 +00:00
Sam Kleinman
2030875056 e2e: drop single node hybrid configurations (#6737) 2021-07-20 11:23:51 -04:00
Sam Kleinman
639e145729 e2e: avoid systematic key-type variation (#6736) 2021-07-20 09:25:00 -04:00
Aleksandr Bezobchuk
68ffe8bc64 mempool: add TTL configuration to mempool (#6715) 2021-07-19 15:54:44 -04:00
William Banfield
21309ccb7b clist: add a few basic clist tests (#6727) 2021-07-19 14:02:21 +00:00
William Banfield
f70396c6fd add and run make target for generating existing mocks (#6732)
There are many `//go:generate mockery` lines in the source code.
This change adds a make target to invoke these mock generations.
This change also invokes the mock invocations and adds the resulting mocks to the repo.

Related to #5274
2021-07-18 00:46:04 +00:00
William Banfield
fdc246e4a8 libs/clist: revert clear and detach changes while debugging (#6731) 2021-07-17 12:10:53 -04:00
Marko
78a0a5fe73 blockchain: error on v2 selection (#6730)
## Description

Remove v2 flag from toml
2021-07-16 10:19:55 +00:00
Marko
4f885209aa RPC: mark grpc as deprecated (#6725)
## Description

Mark gRPC as deprecated in the RPC layer. 

closes #6718
2021-07-15 22:05:21 +00:00
Callum Waters
6dd0cf92c8 router/statesync: add helpful log messages (#6724) 2021-07-15 19:26:35 +02:00
dependabot[bot]
626d9b4fbe build(deps): Bump actions/stale from 3.0.19 to 4 (#6726)
Bumps [actions/stale](https://github.com/actions/stale) from 3.0.19 to 4.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/stale/compare/v3.0.19...v4)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-07-15 10:17:51 -04:00
Sam Kleinman
8addf99f90 e2e: tweak sleep for pertubations (#6723)
This tweaks sleeps around pertubations, based on a theory that our
tests with "kill" pertubations restart the nodes fast enough the peers
haven't marked it down when it tries to reconnect. In my local test
runs, this clears out *most* of the test failures that I've seen,
except for one evidence-related test-harness problem (which should be
handled separately.)
2021-07-14 21:07:25 +00:00
Marko
76c6c67734 docs: fix broken links (#6719)
## Description 

Fix broken links

closes #6695
2021-07-14 14:31:08 +00:00
William Banfield
a46724e4f6 statesync: dispatcher test uses internal channel for timing (#6713)
This code change amends the dispatcher tests to read from the dispatcher's `requestCh`. This ensures that a request is waiting when the test calls `dispatcher.respond`. 
addresses: #6711
2021-07-14 14:16:09 +00:00
Callum Waters
40fba3960d add missing context catch and tests (#6701) 2021-07-14 11:23:15 +02:00
Callum Waters
36a859ae54 e2e: ensure evidence validator set matches nodes validator set (#6712) 2021-07-13 19:47:36 +02:00
Sam Kleinman
ab5c63eff3 statesync: increase dispatcher timeout (#6714) 2021-07-13 13:04:18 -04:00
Sam Kleinman
8228936155 e2e: extend timeouts in test harness (#6694) 2021-07-13 11:28:07 -04:00
Callum Waters
a12e2bbb60 statesync: use initial height as a floor to backfilling (#6709) 2021-07-13 16:36:16 +02:00
dependabot[bot]
11bebfb6a0 build(deps): Bump github.com/google/uuid from 1.2.0 to 1.3.0 (#6708)
Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.2.0 to 1.3.0.
- [Release notes](https://github.com/google/uuid/releases)
- [Commits](https://github.com/google/uuid/compare/v1.2.0...v1.3.0)

---
updated-dependencies:
- dependency-name: github.com/google/uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-07-13 15:15:09 +02:00
William Banfield
4009102e2b statesync: remove outgoingCalls race condition in dispatcher (#6699)
* statesync: remove outgoing calls race condition
2021-07-12 19:05:47 -04:00
166 changed files with 4173 additions and 1559 deletions

View File

@@ -121,7 +121,7 @@ jobs:
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v1.5.2
- uses: codecov/codecov-action@v2.0.2
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
p2p: ['legacy', 'new', 'hybrid']
group: ['00', '01', '02', '03']
group: ['00', '01']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
@@ -35,7 +35,7 @@ jobs:
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
working-directory: test/e2e

View File

@@ -23,9 +23,14 @@ jobs:
working-directory: test/fuzz
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- name: Fuzz mempool
- name: Fuzz mempool-v1
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
continue-on-error: true
- name: Fuzz mempool-v0
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
continue-on-error: true
- name: Fuzz p2p-addrbook

View File

@@ -7,12 +7,14 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v3.0.19
- uses: actions/stale@v4
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions."
days-before-stale: 10
days-before-close: 4
days-before-stale: -1
days-before-close: -1
days-before-pr-stale: 10
days-before-pr-close: 4
exempt-pr-labels: "S:wip"

View File

@@ -21,14 +21,16 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] \#6684 Delete counter example app
@@ -70,6 +72,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync`
(@cmwaters)
- Blockchain Protocol
@@ -80,6 +84,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
### FEATURES
@@ -97,6 +102,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)

View File

@@ -231,6 +231,15 @@ build-docker: build-linux
rm -rf DOCKER/tendermint
.PHONY: build-docker
###############################################################################
### Mocks ###
###############################################################################
mockery:
go generate -run="mockery" ./...
.PHONY: mockery
###############################################################################
### Local testnet using docker ###
###############################################################################

View File

@@ -17,7 +17,7 @@ This guide provides instructions for upgrading to specific versions of Tendermin
### Config Changes
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead.
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
you have updated all the variables in your `config.toml` file.
@@ -29,9 +29,12 @@ This guide provides instructions for upgrading to specific versions of Tendermin
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
persistent peers, there's no gaurantee that the node will remain connected with these peers.
- configuration values starting with `priv-validator-` have moved to the new
* configuration values starting with `priv-validator-` have moved to the new
`priv-validator` section, without the `priv-validator-` prefix.
* The fast sync process as well as the blockchain package and service has all
been renamed to block sync
### CLI Changes
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
@@ -74,6 +77,10 @@ will need to change to accommodate these changes. Most notably:
longer exported and have been replaced with `node.New` and
`node.NewDefault` which provide more functional interfaces.
### RPC changes
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**

View File

@@ -1,4 +1,4 @@
// Code generated by mockery 2.7.4. DO NOT EDIT.
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks

View File

@@ -1837,6 +1837,9 @@ type ResponseCheckTx struct {
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
// mempool_error is set by Tendermint.
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
}
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
@@ -1942,6 +1945,13 @@ func (m *ResponseCheckTx) GetPriority() int64 {
return 0
}
func (m *ResponseCheckTx) GetMempoolError() string {
if m != nil {
return m.MempoolError
}
return ""
}
type ResponseDeliverTx struct {
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
@@ -2954,170 +2964,172 @@ func init() {
func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) }
var fileDescriptor_252557cfdd89a31a = []byte{
// 2608 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xbd, 0x73, 0x1b, 0xc7,
0x15, 0xc7, 0x37, 0x70, 0x8f, 0x04, 0x08, 0xae, 0x68, 0x19, 0x86, 0x65, 0x52, 0x3e, 0x8f, 0x1d,
0x4b, 0xb6, 0xc9, 0x98, 0x1a, 0x29, 0xf2, 0x38, 0x1f, 0x26, 0x20, 0x28, 0xa0, 0xc5, 0x90, 0xcc,
0x12, 0x92, 0xc7, 0x49, 0xac, 0xf3, 0x01, 0xb7, 0x04, 0xce, 0x02, 0xee, 0xce, 0x77, 0x0b, 0x8a,
0x74, 0x99, 0x49, 0x1a, 0x4d, 0x0a, 0x95, 0x69, 0x3c, 0x93, 0xff, 0x20, 0x65, 0x52, 0xa5, 0x4a,
0xe1, 0x22, 0x99, 0x71, 0x99, 0xca, 0xc9, 0x48, 0x5d, 0xfe, 0x81, 0x54, 0x99, 0xc9, 0xec, 0xc7,
0x7d, 0x01, 0x38, 0x02, 0x8c, 0xd3, 0xa5, 0xdb, 0xdd, 0x7b, 0xef, 0x61, 0xf7, 0xed, 0xbe, 0xdf,
0xfb, 0xed, 0x5b, 0xc0, 0xcb, 0x94, 0x58, 0x06, 0x71, 0x47, 0xa6, 0x45, 0xb7, 0xf4, 0x6e, 0xcf,
0xdc, 0xa2, 0x67, 0x0e, 0xf1, 0x36, 0x1d, 0xd7, 0xa6, 0x36, 0x5a, 0x09, 0x3f, 0x6e, 0xb2, 0x8f,
0xf5, 0x57, 0x22, 0xd2, 0x3d, 0xf7, 0xcc, 0xa1, 0xf6, 0x96, 0xe3, 0xda, 0xf6, 0xb1, 0x90, 0xaf,
0x5f, 0x89, 0x7c, 0xe6, 0x76, 0xa2, 0xd6, 0x62, 0x5f, 0xa5, 0xf2, 0x23, 0x72, 0xe6, 0x7f, 0x7d,
0x65, 0x4a, 0xd7, 0xd1, 0x5d, 0x7d, 0xe4, 0x7f, 0xde, 0xe8, 0xdb, 0x76, 0x7f, 0x48, 0xb6, 0x78,
0xaf, 0x3b, 0x3e, 0xde, 0xa2, 0xe6, 0x88, 0x78, 0x54, 0x1f, 0x39, 0x52, 0x60, 0xad, 0x6f, 0xf7,
0x6d, 0xde, 0xdc, 0x62, 0x2d, 0x31, 0xaa, 0xfe, 0xb5, 0x08, 0x45, 0x4c, 0x3e, 0x1f, 0x13, 0x8f,
0xa2, 0x6d, 0xc8, 0x91, 0xde, 0xc0, 0xae, 0xa5, 0xaf, 0xa6, 0xdf, 0x5c, 0xda, 0xbe, 0xb2, 0x39,
0xb1, 0xb8, 0x4d, 0x29, 0xd7, 0xea, 0x0d, 0xec, 0x76, 0x0a, 0x73, 0x59, 0x74, 0x13, 0xf2, 0xc7,
0xc3, 0xb1, 0x37, 0xa8, 0x65, 0xb8, 0xd2, 0x2b, 0x49, 0x4a, 0x77, 0x99, 0x50, 0x3b, 0x85, 0x85,
0x34, 0xfb, 0x29, 0xd3, 0x3a, 0xb6, 0x6b, 0xd9, 0xf3, 0x7f, 0x6a, 0xd7, 0x3a, 0xe6, 0x3f, 0xc5,
0x64, 0x51, 0x03, 0xc0, 0xb4, 0x4c, 0xaa, 0xf5, 0x06, 0xba, 0x69, 0xd5, 0x72, 0x5c, 0xf3, 0xd5,
0x64, 0x4d, 0x93, 0x36, 0x99, 0x60, 0x3b, 0x85, 0x15, 0xd3, 0xef, 0xb0, 0xe9, 0x7e, 0x3e, 0x26,
0xee, 0x59, 0x2d, 0x7f, 0xfe, 0x74, 0x7f, 0xca, 0x84, 0xd8, 0x74, 0xb9, 0x34, 0x6a, 0xc1, 0x52,
0x97, 0xf4, 0x4d, 0x4b, 0xeb, 0x0e, 0xed, 0xde, 0xa3, 0x5a, 0x81, 0x2b, 0xab, 0x49, 0xca, 0x0d,
0x26, 0xda, 0x60, 0x92, 0xed, 0x14, 0x86, 0x6e, 0xd0, 0x43, 0xdf, 0x87, 0x52, 0x6f, 0x40, 0x7a,
0x8f, 0x34, 0x7a, 0x5a, 0x2b, 0x72, 0x1b, 0x1b, 0x49, 0x36, 0x9a, 0x4c, 0xae, 0x73, 0xda, 0x4e,
0xe1, 0x62, 0x4f, 0x34, 0xd9, 0xfa, 0x0d, 0x32, 0x34, 0x4f, 0x88, 0xcb, 0xf4, 0x4b, 0xe7, 0xaf,
0xff, 0x8e, 0x90, 0xe4, 0x16, 0x14, 0xc3, 0xef, 0xa0, 0x1f, 0x81, 0x42, 0x2c, 0x43, 0x2e, 0x43,
0xe1, 0x26, 0xae, 0x26, 0xee, 0xb3, 0x65, 0xf8, 0x8b, 0x28, 0x11, 0xd9, 0x46, 0xb7, 0xa1, 0xd0,
0xb3, 0x47, 0x23, 0x93, 0xd6, 0x80, 0x6b, 0xaf, 0x27, 0x2e, 0x80, 0x4b, 0xb5, 0x53, 0x58, 0xca,
0xa3, 0x7d, 0xa8, 0x0c, 0x4d, 0x8f, 0x6a, 0x9e, 0xa5, 0x3b, 0xde, 0xc0, 0xa6, 0x5e, 0x6d, 0x89,
0x5b, 0x78, 0x3d, 0xc9, 0xc2, 0x9e, 0xe9, 0xd1, 0x23, 0x5f, 0xb8, 0x9d, 0xc2, 0xe5, 0x61, 0x74,
0x80, 0xd9, 0xb3, 0x8f, 0x8f, 0x89, 0x1b, 0x18, 0xac, 0x2d, 0x9f, 0x6f, 0xef, 0x80, 0x49, 0xfb,
0xfa, 0xcc, 0x9e, 0x1d, 0x1d, 0x40, 0x3f, 0x87, 0x4b, 0x43, 0x5b, 0x37, 0x02, 0x73, 0x5a, 0x6f,
0x30, 0xb6, 0x1e, 0xd5, 0xca, 0xdc, 0xe8, 0xb5, 0xc4, 0x49, 0xda, 0xba, 0xe1, 0x9b, 0x68, 0x32,
0x85, 0x76, 0x0a, 0xaf, 0x0e, 0x27, 0x07, 0xd1, 0x43, 0x58, 0xd3, 0x1d, 0x67, 0x78, 0x36, 0x69,
0xbd, 0xc2, 0xad, 0x5f, 0x4f, 0xb2, 0xbe, 0xc3, 0x74, 0x26, 0xcd, 0x23, 0x7d, 0x6a, 0xb4, 0x51,
0x84, 0xfc, 0x89, 0x3e, 0x1c, 0x13, 0xf5, 0x3b, 0xb0, 0x14, 0x09, 0x53, 0x54, 0x83, 0xe2, 0x88,
0x78, 0x9e, 0xde, 0x27, 0x3c, 0xaa, 0x15, 0xec, 0x77, 0xd5, 0x0a, 0x2c, 0x47, 0x43, 0x53, 0x7d,
0x9a, 0x0e, 0x34, 0x59, 0xd4, 0x31, 0xcd, 0x13, 0xe2, 0x7a, 0xa6, 0x6d, 0xf9, 0x9a, 0xb2, 0x8b,
0x5e, 0x83, 0x32, 0x3f, 0x3f, 0x9a, 0xff, 0x9d, 0x85, 0x7e, 0x0e, 0x2f, 0xf3, 0xc1, 0x07, 0x52,
0x68, 0x03, 0x96, 0x9c, 0x6d, 0x27, 0x10, 0xc9, 0x72, 0x11, 0x70, 0xb6, 0x1d, 0x5f, 0xe0, 0x55,
0x58, 0x66, 0x2b, 0x0d, 0x24, 0x72, 0xfc, 0x47, 0x96, 0xd8, 0x98, 0x14, 0x51, 0xff, 0x92, 0x81,
0xea, 0x64, 0x38, 0xa3, 0xdb, 0x90, 0x63, 0xc8, 0x26, 0x41, 0xaa, 0xbe, 0x29, 0x60, 0x6f, 0xd3,
0x87, 0xbd, 0xcd, 0x8e, 0x0f, 0x7b, 0x8d, 0xd2, 0x57, 0xdf, 0x6c, 0xa4, 0x9e, 0xfe, 0x7d, 0x23,
0x8d, 0xb9, 0x06, 0x7a, 0x89, 0x45, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x29, 0x2b, 0x2c, 0xb4,
0x74, 0xd3, 0xda, 0x35, 0xd0, 0x1e, 0x54, 0x7b, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x7b, 0x9a, 0x80,
0x55, 0x09, 0x4d, 0xb1, 0x00, 0x13, 0x60, 0xdd, 0xf4, 0x25, 0x0f, 0xb9, 0x20, 0x5e, 0xe9, 0xc5,
0x07, 0xd0, 0x5d, 0x80, 0x13, 0x7d, 0x68, 0x1a, 0x3a, 0xb5, 0x5d, 0xaf, 0x96, 0xbb, 0x9a, 0x9d,
0x19, 0x65, 0x0f, 0x7c, 0x91, 0xfb, 0x8e, 0xa1, 0x53, 0xd2, 0xc8, 0xb1, 0xe9, 0xe2, 0x88, 0x26,
0x7a, 0x03, 0x56, 0x74, 0xc7, 0xd1, 0x3c, 0xaa, 0x53, 0xa2, 0x75, 0xcf, 0x28, 0xf1, 0x38, 0x6c,
0x2d, 0xe3, 0xb2, 0xee, 0x38, 0x47, 0x6c, 0xb4, 0xc1, 0x06, 0xd1, 0xeb, 0x50, 0x61, 0x08, 0x67,
0xea, 0x43, 0x6d, 0x40, 0xcc, 0xfe, 0x80, 0x72, 0x80, 0xca, 0xe2, 0xb2, 0x1c, 0x6d, 0xf3, 0x41,
0xd5, 0x08, 0x76, 0x9c, 0xa3, 0x1b, 0x42, 0x90, 0x33, 0x74, 0xaa, 0x73, 0x4f, 0x2e, 0x63, 0xde,
0x66, 0x63, 0x8e, 0x4e, 0x07, 0xd2, 0x3f, 0xbc, 0x8d, 0x2e, 0x43, 0x41, 0x9a, 0xcd, 0x72, 0xb3,
0xb2, 0x87, 0xd6, 0x20, 0xef, 0xb8, 0xf6, 0x09, 0xe1, 0x5b, 0x57, 0xc2, 0xa2, 0xa3, 0xfe, 0x2a,
0x03, 0xab, 0x53, 0x38, 0xc8, 0xec, 0x0e, 0x74, 0x6f, 0xe0, 0xff, 0x16, 0x6b, 0xa3, 0x5b, 0xcc,
0xae, 0x6e, 0x10, 0x57, 0xe6, 0x8e, 0xda, 0xb4, 0xab, 0xdb, 0xfc, 0xbb, 0x74, 0x8d, 0x94, 0x46,
0x07, 0x50, 0x1d, 0xea, 0x1e, 0xd5, 0x04, 0xae, 0x68, 0x91, 0x3c, 0x32, 0x8d, 0xa6, 0x7b, 0xba,
0x8f, 0x44, 0xec, 0x50, 0x4b, 0x43, 0x95, 0x61, 0x6c, 0x14, 0x61, 0x58, 0xeb, 0x9e, 0x7d, 0xa1,
0x5b, 0xd4, 0xb4, 0x88, 0x36, 0xb5, 0x73, 0x2f, 0x4d, 0x19, 0x6d, 0x9d, 0x98, 0x06, 0xb1, 0x7a,
0xfe, 0x96, 0x5d, 0x0a, 0x94, 0x83, 0x2d, 0xf5, 0x54, 0x0c, 0x95, 0x38, 0x92, 0xa3, 0x0a, 0x64,
0xe8, 0xa9, 0x74, 0x40, 0x86, 0x9e, 0xa2, 0xef, 0x42, 0x8e, 0x2d, 0x92, 0x2f, 0xbe, 0x32, 0x23,
0x05, 0x4a, 0xbd, 0xce, 0x99, 0x43, 0x30, 0x97, 0x54, 0xd5, 0x20, 0x1c, 0x02, 0x74, 0x9f, 0xb4,
0xaa, 0x5e, 0x83, 0x95, 0x09, 0xf8, 0x8e, 0xec, 0x5f, 0x3a, 0xba, 0x7f, 0xea, 0x0a, 0x94, 0x63,
0x58, 0xad, 0x5e, 0x86, 0xb5, 0x59, 0xd0, 0xab, 0x0e, 0x82, 0xf1, 0x18, 0x84, 0xa2, 0x9b, 0x50,
0x0a, 0xb0, 0x57, 0x84, 0xe3, 0xb4, 0xaf, 0x7c, 0x61, 0x1c, 0x88, 0xb2, 0x38, 0x64, 0xc7, 0x9a,
0x9f, 0x87, 0x0c, 0x9f, 0x78, 0x51, 0x77, 0x9c, 0xb6, 0xee, 0x0d, 0xd4, 0x4f, 0xa1, 0x96, 0x84,
0xab, 0x13, 0xcb, 0xc8, 0x05, 0xc7, 0xf0, 0x32, 0x14, 0x8e, 0x6d, 0x77, 0xa4, 0x53, 0x6e, 0xac,
0x8c, 0x65, 0x8f, 0x1d, 0x4f, 0x81, 0xb1, 0x59, 0x3e, 0x2c, 0x3a, 0xaa, 0x06, 0x2f, 0x25, 0x62,
0x2b, 0x53, 0x31, 0x2d, 0x83, 0x08, 0x7f, 0x96, 0xb1, 0xe8, 0x84, 0x86, 0xc4, 0x64, 0x45, 0x87,
0xfd, 0xac, 0xc7, 0xd7, 0xca, 0xed, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x95, 0xa0, 0x84, 0x89, 0xe7,
0x30, 0x4c, 0x40, 0x0d, 0x50, 0xc8, 0x69, 0x8f, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x48,
0xb7, 0x7c, 0x49, 0x96, 0xb2, 0x03, 0x35, 0x74, 0x43, 0xb2, 0xb2, 0x64, 0x82, 0x25, 0xd5, 0xa3,
0xb4, 0xec, 0x96, 0x4f, 0xcb, 0xb2, 0x89, 0x59, 0x5a, 0x68, 0x4d, 0xf0, 0xb2, 0x1b, 0x92, 0x97,
0xe5, 0xe6, 0xfc, 0x58, 0x8c, 0x98, 0x35, 0x63, 0xc4, 0x2c, 0x3f, 0x67, 0x99, 0x09, 0xcc, 0xec,
0x96, 0xcf, 0xcc, 0x0a, 0x73, 0x66, 0x3c, 0x41, 0xcd, 0xee, 0xc6, 0xa9, 0x99, 0xa0, 0x55, 0xaf,
0x25, 0x6a, 0x27, 0x72, 0xb3, 0x1f, 0x44, 0xb8, 0x59, 0x29, 0x91, 0x18, 0x09, 0x23, 0x33, 0xc8,
0x59, 0x33, 0x46, 0xce, 0x94, 0x39, 0x3e, 0x48, 0x60, 0x67, 0x1f, 0x44, 0xd9, 0x19, 0x24, 0x12,
0x3c, 0xb9, 0xdf, 0xb3, 0xe8, 0xd9, 0x7b, 0x01, 0x3d, 0x5b, 0x4a, 0xe4, 0x97, 0x72, 0x0d, 0x93,
0xfc, 0xec, 0x60, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xa2, 0x89, 0x39, 0x04, 0xed, 0x60, 0x8a,
0xa0, 0x95, 0xe7, 0x18, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0x25, 0x73, 0x28, 0x39, 0xcd,
0xc5, 0x28, 0x9a, 0x96, 0x40, 0xd1, 0x56, 0xb8, 0xf9, 0xb7, 0x12, 0xcd, 0x5f, 0x9c, 0xa3, 0x5d,
0x63, 0x19, 0x72, 0x22, 0xe6, 0x19, 0xca, 0x10, 0xd7, 0xb5, 0x5d, 0xc9, 0xb6, 0x44, 0x47, 0x7d,
0x93, 0xe5, 0xec, 0x30, 0xbe, 0xcf, 0xe1, 0x73, 0x1c, 0xcd, 0x23, 0x31, 0xad, 0xfe, 0x31, 0x1d,
0xea, 0xf2, 0x34, 0x17, 0xcd, 0xf7, 0x8a, 0xcc, 0xf7, 0x11, 0x96, 0x97, 0x89, 0xb3, 0xbc, 0x0d,
0x58, 0x62, 0x28, 0x3d, 0x41, 0xe0, 0x74, 0x27, 0x20, 0x70, 0xd7, 0x61, 0x95, 0xa7, 0x61, 0xc1,
0x05, 0x25, 0x34, 0xe7, 0x78, 0x86, 0x59, 0x61, 0x1f, 0xc4, 0xe1, 0x14, 0x18, 0xfd, 0x0e, 0x5c,
0x8a, 0xc8, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x35, 0x90, 0xde, 0x91, 0x69, 0xe0, 0xcf, 0xe9, 0xd0,
0x43, 0x21, 0xf3, 0x9b, 0x45, 0xd2, 0xd2, 0xff, 0x23, 0x92, 0x96, 0xf9, 0xaf, 0x49, 0x5a, 0x34,
0x9b, 0x65, 0xe3, 0xd9, 0xec, 0x5f, 0xe9, 0x70, 0x4f, 0x02, 0xca, 0xd5, 0xb3, 0x0d, 0x22, 0xf3,
0x0b, 0x6f, 0xa3, 0x2a, 0x64, 0x87, 0x76, 0x5f, 0x66, 0x11, 0xd6, 0x64, 0x52, 0x01, 0x08, 0x2b,
0x12, 0x63, 0x83, 0xd4, 0x94, 0xe7, 0x1e, 0x96, 0xa9, 0xa9, 0x0a, 0xd9, 0x47, 0x44, 0x40, 0xe6,
0x32, 0x66, 0x4d, 0x26, 0xc7, 0x0f, 0x19, 0x07, 0xc2, 0x65, 0x2c, 0x3a, 0xe8, 0x36, 0x28, 0xbc,
0x0c, 0xa1, 0xd9, 0x8e, 0x27, 0xd1, 0xed, 0xe5, 0xe8, 0x5a, 0x45, 0xb5, 0x61, 0xf3, 0x90, 0xc9,
0x1c, 0x38, 0x1e, 0x2e, 0x39, 0xb2, 0x15, 0xc9, 0xba, 0x4a, 0x8c, 0xfc, 0x5d, 0x01, 0x85, 0xcd,
0xde, 0x73, 0xf4, 0x1e, 0xe1, 0x50, 0xa5, 0xe0, 0x70, 0x40, 0x7d, 0x08, 0x68, 0x1a, 0x70, 0x51,
0x1b, 0x0a, 0xe4, 0x84, 0x58, 0x94, 0x6d, 0x1b, 0x73, 0xf7, 0xe5, 0x19, 0xcc, 0x8a, 0x58, 0xb4,
0x51, 0x63, 0x4e, 0xfe, 0xe7, 0x37, 0x1b, 0x55, 0x21, 0xfd, 0xb6, 0x3d, 0x32, 0x29, 0x19, 0x39,
0xf4, 0x0c, 0x4b, 0x7d, 0xf5, 0x0f, 0x19, 0x46, 0x73, 0x62, 0x60, 0x3c, 0xd3, 0xb7, 0xfe, 0x91,
0xcf, 0x44, 0x28, 0xee, 0x62, 0xfe, 0x5e, 0x07, 0xe8, 0xeb, 0x9e, 0xf6, 0x58, 0xb7, 0x28, 0x31,
0xa4, 0xd3, 0x23, 0x23, 0xa8, 0x0e, 0x25, 0xd6, 0x1b, 0x7b, 0xc4, 0x90, 0x6c, 0x3b, 0xe8, 0x47,
0xd6, 0x59, 0xfc, 0x76, 0xeb, 0x8c, 0x7b, 0xb9, 0x34, 0xe1, 0xe5, 0x08, 0x05, 0x51, 0xa2, 0x14,
0x84, 0xcd, 0xcd, 0x71, 0x4d, 0xdb, 0x35, 0xe9, 0x19, 0xdf, 0x9a, 0x2c, 0x0e, 0xfa, 0xea, 0xaf,
0x33, 0x61, 0x68, 0x85, 0x2c, 0xf2, 0xff, 0xce, 0x77, 0xea, 0x6f, 0xf8, 0xdd, 0x32, 0x9e, 0x49,
0xd1, 0x11, 0xac, 0x06, 0x91, 0xad, 0x8d, 0x79, 0xc4, 0xfb, 0x67, 0x75, 0x51, 0x68, 0xa8, 0x9e,
0xc4, 0x87, 0x3d, 0xf4, 0x31, 0xbc, 0x38, 0x01, 0x5b, 0x81, 0xe9, 0xcc, 0xa2, 0xe8, 0xf5, 0x42,
0x1c, 0xbd, 0x7c, 0xd3, 0xa1, 0xb3, 0xb2, 0xdf, 0x32, 0xa0, 0x76, 0xd9, 0x75, 0x25, 0x4a, 0x0c,
0x66, 0x6e, 0xff, 0x6b, 0x50, 0x76, 0x09, 0x65, 0x57, 0xe8, 0xd8, 0x85, 0x70, 0x59, 0x0c, 0xca,
0x6b, 0xe6, 0x21, 0xbc, 0x30, 0x93, 0x20, 0xa0, 0xef, 0x81, 0x12, 0x72, 0x8b, 0x74, 0xc2, 0xdd,
0x2a, 0xb8, 0x2f, 0x84, 0xb2, 0xea, 0x9f, 0xd2, 0xa1, 0xc9, 0xf8, 0x0d, 0xa4, 0x05, 0x05, 0x97,
0x78, 0xe3, 0xa1, 0xb8, 0x13, 0x54, 0xb6, 0xdf, 0x59, 0x8c, 0x5a, 0xb0, 0xd1, 0xf1, 0x90, 0x62,
0xa9, 0xac, 0x3e, 0x84, 0x82, 0x18, 0x41, 0x4b, 0x50, 0xbc, 0xbf, 0x7f, 0x6f, 0xff, 0xe0, 0xa3,
0xfd, 0x6a, 0x0a, 0x01, 0x14, 0x76, 0x9a, 0xcd, 0xd6, 0x61, 0xa7, 0x9a, 0x46, 0x0a, 0xe4, 0x77,
0x1a, 0x07, 0xb8, 0x53, 0xcd, 0xb0, 0x61, 0xdc, 0xfa, 0xb0, 0xd5, 0xec, 0x54, 0xb3, 0x68, 0x15,
0xca, 0xa2, 0xad, 0xdd, 0x3d, 0xc0, 0x3f, 0xd9, 0xe9, 0x54, 0x73, 0x91, 0xa1, 0xa3, 0xd6, 0xfe,
0x9d, 0x16, 0xae, 0xe6, 0xd5, 0x77, 0xd9, 0xa5, 0x23, 0x81, 0x8c, 0x84, 0xd7, 0x8b, 0x74, 0xe4,
0x7a, 0xa1, 0xfe, 0x36, 0x03, 0xf5, 0x64, 0x86, 0x81, 0x3e, 0x9c, 0x58, 0xf8, 0xf6, 0x05, 0xe8,
0xc9, 0xc4, 0xea, 0xd1, 0xeb, 0x50, 0x71, 0xc9, 0x31, 0xa1, 0xbd, 0x81, 0x60, 0x3c, 0x22, 0x1b,
0x96, 0x71, 0x59, 0x8e, 0x72, 0x25, 0x4f, 0x88, 0x7d, 0x46, 0x7a, 0x54, 0x13, 0x30, 0x23, 0x0e,
0x9d, 0xc2, 0xc4, 0xd8, 0xe8, 0x91, 0x18, 0x54, 0x3f, 0xbd, 0x90, 0x2f, 0x15, 0xc8, 0xe3, 0x56,
0x07, 0x7f, 0x5c, 0xcd, 0x22, 0x04, 0x15, 0xde, 0xd4, 0x8e, 0xf6, 0x77, 0x0e, 0x8f, 0xda, 0x07,
0xcc, 0x97, 0x97, 0x60, 0xc5, 0xf7, 0xa5, 0x3f, 0x98, 0x57, 0x3f, 0x81, 0x4a, 0xfc, 0x5a, 0xcf,
0x5c, 0xe8, 0xda, 0x63, 0xcb, 0xe0, 0xce, 0xc8, 0x63, 0xd1, 0x41, 0x37, 0x21, 0x7f, 0x62, 0x8b,
0x30, 0x9b, 0x7d, 0xd6, 0x1e, 0xd8, 0x94, 0x44, 0xca, 0x02, 0x42, 0x5a, 0xfd, 0x02, 0xf2, 0x3c,
0x6a, 0x58, 0x04, 0xf0, 0x0b, 0xba, 0xe4, 0x4b, 0xac, 0x8d, 0x3e, 0x01, 0xd0, 0x29, 0x75, 0xcd,
0xee, 0x38, 0x34, 0xbc, 0x31, 0x3b, 0xea, 0x76, 0x7c, 0xb9, 0xc6, 0x15, 0x19, 0x7e, 0x6b, 0xa1,
0x6a, 0x24, 0x04, 0x23, 0x06, 0xd5, 0x7d, 0xa8, 0xc4, 0x75, 0xfd, 0x0c, 0x2f, 0xe6, 0x10, 0xcf,
0xf0, 0x82, 0xb0, 0xc9, 0x0c, 0x1f, 0xf0, 0x83, 0xac, 0x28, 0xc6, 0xf0, 0x8e, 0xfa, 0x24, 0x0d,
0xa5, 0xce, 0xa9, 0xdc, 0x8f, 0x84, 0x3a, 0x40, 0xa8, 0x9a, 0x89, 0xde, 0x7a, 0x45, 0x61, 0x21,
0x1b, 0x94, 0x2b, 0x3e, 0x08, 0x4e, 0x5c, 0x6e, 0xd1, 0xcb, 0x8d, 0x5f, 0xb7, 0x91, 0x51, 0xf6,
0x3e, 0x28, 0x01, 0x66, 0x32, 0xe2, 0xa9, 0x1b, 0x86, 0x4b, 0x3c, 0x4f, 0x9e, 0x7b, 0xbf, 0xcb,
0xcb, 0x4a, 0xf6, 0x63, 0x79, 0xaf, 0xce, 0x62, 0xd1, 0x51, 0x0d, 0x58, 0x99, 0x00, 0x5c, 0xf4,
0x3e, 0x14, 0x9d, 0x71, 0x57, 0xf3, 0xdd, 0x33, 0xf1, 0x8c, 0xe0, 0x53, 0x9a, 0x71, 0x77, 0x68,
0xf6, 0xee, 0x91, 0x33, 0x7f, 0x32, 0xce, 0xb8, 0x7b, 0x4f, 0x78, 0x51, 0xfc, 0x4a, 0x26, 0xfa,
0x2b, 0x27, 0x50, 0xf2, 0x0f, 0x05, 0xfa, 0x21, 0x28, 0x01, 0x96, 0x07, 0xd5, 0xc6, 0xc4, 0x24,
0x20, 0xcd, 0x87, 0x2a, 0x8c, 0x1f, 0x7b, 0x66, 0xdf, 0x22, 0x86, 0x16, 0x52, 0x5f, 0xfe, 0x6b,
0x25, 0xbc, 0x22, 0x3e, 0xec, 0xf9, 0xbc, 0x57, 0xfd, 0x77, 0x1a, 0x4a, 0x7e, 0x55, 0x09, 0xbd,
0x1b, 0x39, 0x77, 0x95, 0x19, 0x77, 0x70, 0x5f, 0x30, 0xac, 0x0c, 0xc5, 0xe7, 0x9a, 0xb9, 0xf8,
0x5c, 0x93, 0x4a, 0x7c, 0x7e, 0xb1, 0x35, 0x77, 0xe1, 0x62, 0xeb, 0xdb, 0x80, 0xa8, 0x4d, 0xf5,
0xa1, 0x76, 0x62, 0x53, 0xd3, 0xea, 0x6b, 0xc2, 0xd9, 0x82, 0x0b, 0x54, 0xf9, 0x97, 0x07, 0xfc,
0xc3, 0x21, 0xf7, 0xfb, 0x2f, 0xd3, 0x50, 0x0a, 0x40, 0xfd, 0xa2, 0x85, 0x9e, 0xcb, 0x50, 0x90,
0xb8, 0x25, 0x2a, 0x3d, 0xb2, 0x17, 0xd4, 0x1c, 0x73, 0x91, 0x9a, 0x63, 0x1d, 0x4a, 0x23, 0x42,
0x75, 0x9e, 0xd9, 0xc4, 0xed, 0x23, 0xe8, 0x5f, 0x7f, 0x0f, 0x96, 0x22, 0x35, 0x37, 0x16, 0x79,
0xfb, 0xad, 0x8f, 0xaa, 0xa9, 0x7a, 0xf1, 0xc9, 0x97, 0x57, 0xb3, 0xfb, 0xe4, 0x31, 0x3b, 0xb3,
0xb8, 0xd5, 0x6c, 0xb7, 0x9a, 0xf7, 0xaa, 0xe9, 0xfa, 0xd2, 0x93, 0x2f, 0xaf, 0x16, 0x31, 0xe1,
0xf7, 0xff, 0xeb, 0x6d, 0x58, 0x8e, 0xee, 0x4a, 0x1c, 0xfa, 0x10, 0x54, 0xee, 0xdc, 0x3f, 0xdc,
0xdb, 0x6d, 0xee, 0x74, 0x5a, 0xda, 0x83, 0x83, 0x4e, 0xab, 0x9a, 0x46, 0x2f, 0xc2, 0xa5, 0xbd,
0xdd, 0x1f, 0xb7, 0x3b, 0x5a, 0x73, 0x6f, 0xb7, 0xb5, 0xdf, 0xd1, 0x76, 0x3a, 0x9d, 0x9d, 0xe6,
0xbd, 0x6a, 0x66, 0xfb, 0xf7, 0x0a, 0xac, 0xec, 0x34, 0x9a, 0xbb, 0x0c, 0xb6, 0xcd, 0x9e, 0xce,
0xaf, 0x86, 0x4d, 0xc8, 0xf1, 0xcb, 0xdf, 0xb9, 0x2f, 0x72, 0xf5, 0xf3, 0x2b, 0x43, 0xe8, 0x2e,
0xe4, 0xf9, 0xbd, 0x10, 0x9d, 0xff, 0x44, 0x57, 0x9f, 0x53, 0x2a, 0x62, 0x93, 0xe1, 0xe1, 0x71,
0xee, 0x9b, 0x5d, 0xfd, 0xfc, 0xca, 0x11, 0xc2, 0xa0, 0x84, 0xe4, 0x73, 0xfe, 0x1b, 0x56, 0x7d,
0x01, 0xb0, 0x41, 0x7b, 0x50, 0xf4, 0xaf, 0x02, 0xf3, 0x5e, 0xd5, 0xea, 0x73, 0x4b, 0x3b, 0xcc,
0x5d, 0xe2, 0xca, 0x76, 0xfe, 0x13, 0x61, 0x7d, 0x4e, 0x9d, 0x0a, 0xed, 0x42, 0x41, 0x12, 0xaa,
0x39, 0x2f, 0x65, 0xf5, 0x79, 0xa5, 0x1a, 0xe6, 0xb4, 0xf0, 0x32, 0x3c, 0xff, 0xe1, 0xb3, 0xbe,
0x40, 0x09, 0x0e, 0xdd, 0x07, 0x88, 0x5c, 0xd0, 0x16, 0x78, 0xd1, 0xac, 0x2f, 0x52, 0x5a, 0x43,
0x07, 0x50, 0x0a, 0x48, 0xf5, 0xdc, 0xf7, 0xc5, 0xfa, 0xfc, 0x1a, 0x17, 0x7a, 0x08, 0xe5, 0x38,
0x99, 0x5c, 0xec, 0xd5, 0xb0, 0xbe, 0x60, 0xf1, 0x8a, 0xd9, 0x8f, 0x33, 0xcb, 0xc5, 0x5e, 0x11,
0xeb, 0x0b, 0xd6, 0xb2, 0xd0, 0x67, 0xb0, 0x3a, 0xcd, 0xfc, 0x16, 0x7f, 0x54, 0xac, 0x5f, 0xa0,
0xba, 0x85, 0x46, 0x80, 0x66, 0x30, 0xc6, 0x0b, 0xbc, 0x31, 0xd6, 0x2f, 0x52, 0xec, 0x6a, 0xb4,
0xbe, 0x7a, 0xb6, 0x9e, 0xfe, 0xfa, 0xd9, 0x7a, 0xfa, 0x1f, 0xcf, 0xd6, 0xd3, 0x4f, 0x9f, 0xaf,
0xa7, 0xbe, 0x7e, 0xbe, 0x9e, 0xfa, 0xdb, 0xf3, 0xf5, 0xd4, 0xcf, 0xde, 0xea, 0x9b, 0x74, 0x30,
0xee, 0x6e, 0xf6, 0xec, 0xd1, 0x56, 0xf4, 0xcf, 0x0b, 0xb3, 0xfe, 0x50, 0xd1, 0x2d, 0xf0, 0xa4,
0x72, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xed, 0x8f, 0xef, 0x31, 0x70, 0x21, 0x00, 0x00,
// 2627 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0xdb, 0xc6,
0x15, 0xe7, 0x37, 0x89, 0x47, 0x91, 0xa2, 0xd6, 0x8a, 0x43, 0x33, 0xb6, 0xe4, 0xc0, 0xe3, 0x34,
0x76, 0x12, 0xa9, 0x91, 0xc7, 0xae, 0x33, 0xe9, 0x47, 0x44, 0x9a, 0x2e, 0x15, 0xab, 0x92, 0xba,
0xa2, 0x9d, 0x49, 0xdb, 0x18, 0x01, 0x89, 0x15, 0x89, 0x98, 0x04, 0x10, 0x60, 0x29, 0x4b, 0x39,
0x76, 0xda, 0x8b, 0xa7, 0x07, 0x1f, 0x7b, 0xc9, 0x4c, 0xff, 0x83, 0x5e, 0x7b, 0xea, 0xa9, 0x87,
0x1c, 0xda, 0x99, 0x1c, 0x7b, 0xe8, 0xa4, 0x1d, 0xfb, 0xd6, 0x7f, 0xa0, 0xa7, 0xce, 0x74, 0xf6,
0x03, 0x20, 0x40, 0x12, 0x22, 0xd5, 0xf4, 0xd6, 0xdb, 0xee, 0xc3, 0x7b, 0x8f, 0xbb, 0x6f, 0xf7,
0xfd, 0xf6, 0xb7, 0x6f, 0x09, 0xaf, 0x51, 0x62, 0x19, 0xc4, 0x1d, 0x9a, 0x16, 0xdd, 0xd4, 0x3b,
0x5d, 0x73, 0x93, 0x9e, 0x3a, 0xc4, 0xdb, 0x70, 0x5c, 0x9b, 0xda, 0x68, 0x79, 0xfc, 0x71, 0x83,
0x7d, 0xac, 0x5d, 0x09, 0x69, 0x77, 0xdd, 0x53, 0x87, 0xda, 0x9b, 0x8e, 0x6b, 0xdb, 0x47, 0x42,
0xbf, 0x76, 0x39, 0xf4, 0x99, 0xfb, 0x09, 0x7b, 0x8b, 0x7c, 0x95, 0xc6, 0x4f, 0xc8, 0xa9, 0xff,
0xf5, 0xca, 0x94, 0xad, 0xa3, 0xbb, 0xfa, 0xd0, 0xff, 0xbc, 0xde, 0xb3, 0xed, 0xde, 0x80, 0x6c,
0xf2, 0x5e, 0x67, 0x74, 0xb4, 0x49, 0xcd, 0x21, 0xf1, 0xa8, 0x3e, 0x74, 0xa4, 0xc2, 0x6a, 0xcf,
0xee, 0xd9, 0xbc, 0xb9, 0xc9, 0x5a, 0x42, 0xaa, 0xfe, 0x25, 0x0f, 0x79, 0x4c, 0x3e, 0x1f, 0x11,
0x8f, 0xa2, 0x2d, 0xc8, 0x90, 0x6e, 0xdf, 0xae, 0x26, 0xaf, 0x26, 0xdf, 0x2c, 0x6e, 0x5d, 0xde,
0x98, 0x98, 0xdc, 0x86, 0xd4, 0x6b, 0x76, 0xfb, 0x76, 0x2b, 0x81, 0xb9, 0x2e, 0xba, 0x0d, 0xd9,
0xa3, 0xc1, 0xc8, 0xeb, 0x57, 0x53, 0xdc, 0xe8, 0x4a, 0x9c, 0xd1, 0x7d, 0xa6, 0xd4, 0x4a, 0x60,
0xa1, 0xcd, 0x7e, 0xca, 0xb4, 0x8e, 0xec, 0x6a, 0xfa, 0xec, 0x9f, 0xda, 0xb1, 0x8e, 0xf8, 0x4f,
0x31, 0x5d, 0x54, 0x07, 0x30, 0x2d, 0x93, 0x6a, 0xdd, 0xbe, 0x6e, 0x5a, 0xd5, 0x0c, 0xb7, 0x7c,
0x3d, 0xde, 0xd2, 0xa4, 0x0d, 0xa6, 0xd8, 0x4a, 0x60, 0xc5, 0xf4, 0x3b, 0x6c, 0xb8, 0x9f, 0x8f,
0x88, 0x7b, 0x5a, 0xcd, 0x9e, 0x3d, 0xdc, 0x9f, 0x32, 0x25, 0x36, 0x5c, 0xae, 0x8d, 0x9a, 0x50,
0xec, 0x90, 0x9e, 0x69, 0x69, 0x9d, 0x81, 0xdd, 0x7d, 0x52, 0xcd, 0x71, 0x63, 0x35, 0xce, 0xb8,
0xce, 0x54, 0xeb, 0x4c, 0xb3, 0x95, 0xc0, 0xd0, 0x09, 0x7a, 0xe8, 0xfb, 0x50, 0xe8, 0xf6, 0x49,
0xf7, 0x89, 0x46, 0x4f, 0xaa, 0x79, 0xee, 0x63, 0x3d, 0xce, 0x47, 0x83, 0xe9, 0xb5, 0x4f, 0x5a,
0x09, 0x9c, 0xef, 0x8a, 0x26, 0x9b, 0xbf, 0x41, 0x06, 0xe6, 0x31, 0x71, 0x99, 0x7d, 0xe1, 0xec,
0xf9, 0xdf, 0x13, 0x9a, 0xdc, 0x83, 0x62, 0xf8, 0x1d, 0xf4, 0x23, 0x50, 0x88, 0x65, 0xc8, 0x69,
0x28, 0xdc, 0xc5, 0xd5, 0xd8, 0x75, 0xb6, 0x0c, 0x7f, 0x12, 0x05, 0x22, 0xdb, 0xe8, 0x2e, 0xe4,
0xba, 0xf6, 0x70, 0x68, 0xd2, 0x2a, 0x70, 0xeb, 0xb5, 0xd8, 0x09, 0x70, 0xad, 0x56, 0x02, 0x4b,
0x7d, 0xb4, 0x07, 0xe5, 0x81, 0xe9, 0x51, 0xcd, 0xb3, 0x74, 0xc7, 0xeb, 0xdb, 0xd4, 0xab, 0x16,
0xb9, 0x87, 0xeb, 0x71, 0x1e, 0x76, 0x4d, 0x8f, 0x1e, 0xfa, 0xca, 0xad, 0x04, 0x2e, 0x0d, 0xc2,
0x02, 0xe6, 0xcf, 0x3e, 0x3a, 0x22, 0x6e, 0xe0, 0xb0, 0xba, 0x74, 0xb6, 0xbf, 0x7d, 0xa6, 0xed,
0xdb, 0x33, 0x7f, 0x76, 0x58, 0x80, 0x7e, 0x0e, 0x17, 0x06, 0xb6, 0x6e, 0x04, 0xee, 0xb4, 0x6e,
0x7f, 0x64, 0x3d, 0xa9, 0x96, 0xb8, 0xd3, 0x1b, 0xb1, 0x83, 0xb4, 0x75, 0xc3, 0x77, 0xd1, 0x60,
0x06, 0xad, 0x04, 0x5e, 0x19, 0x4c, 0x0a, 0xd1, 0x63, 0x58, 0xd5, 0x1d, 0x67, 0x70, 0x3a, 0xe9,
0xbd, 0xcc, 0xbd, 0xdf, 0x8c, 0xf3, 0xbe, 0xcd, 0x6c, 0x26, 0xdd, 0x23, 0x7d, 0x4a, 0x5a, 0xcf,
0x43, 0xf6, 0x58, 0x1f, 0x8c, 0x88, 0xfa, 0x1d, 0x28, 0x86, 0xd2, 0x14, 0x55, 0x21, 0x3f, 0x24,
0x9e, 0xa7, 0xf7, 0x08, 0xcf, 0x6a, 0x05, 0xfb, 0x5d, 0xb5, 0x0c, 0x4b, 0xe1, 0xd4, 0x54, 0x9f,
0x27, 0x03, 0x4b, 0x96, 0x75, 0xcc, 0xf2, 0x98, 0xb8, 0x9e, 0x69, 0x5b, 0xbe, 0xa5, 0xec, 0xa2,
0x6b, 0x50, 0xe2, 0xfb, 0x47, 0xf3, 0xbf, 0xb3, 0xd4, 0xcf, 0xe0, 0x25, 0x2e, 0x7c, 0x24, 0x95,
0xd6, 0xa1, 0xe8, 0x6c, 0x39, 0x81, 0x4a, 0x9a, 0xab, 0x80, 0xb3, 0xe5, 0xf8, 0x0a, 0xaf, 0xc3,
0x12, 0x9b, 0x69, 0xa0, 0x91, 0xe1, 0x3f, 0x52, 0x64, 0x32, 0xa9, 0xa2, 0xfe, 0x39, 0x05, 0x95,
0xc9, 0x74, 0x46, 0x77, 0x21, 0xc3, 0x90, 0x4d, 0x82, 0x54, 0x6d, 0x43, 0xc0, 0xde, 0x86, 0x0f,
0x7b, 0x1b, 0x6d, 0x1f, 0xf6, 0xea, 0x85, 0xaf, 0xbe, 0x59, 0x4f, 0x3c, 0xff, 0xfb, 0x7a, 0x12,
0x73, 0x0b, 0x74, 0x89, 0x65, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x21, 0x2b, 0x2c, 0xb5, 0x74,
0xd3, 0xda, 0x31, 0xd0, 0x2e, 0x54, 0xba, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x79, 0x9a, 0x80, 0x55,
0x09, 0x4d, 0x91, 0x04, 0x13, 0x60, 0xdd, 0xf0, 0x35, 0x0f, 0xb8, 0x22, 0x5e, 0xee, 0x46, 0x05,
0xe8, 0x3e, 0xc0, 0xb1, 0x3e, 0x30, 0x0d, 0x9d, 0xda, 0xae, 0x57, 0xcd, 0x5c, 0x4d, 0xcf, 0xcc,
0xb2, 0x47, 0xbe, 0xca, 0x43, 0xc7, 0xd0, 0x29, 0xa9, 0x67, 0xd8, 0x70, 0x71, 0xc8, 0x12, 0xbd,
0x01, 0xcb, 0xba, 0xe3, 0x68, 0x1e, 0xd5, 0x29, 0xd1, 0x3a, 0xa7, 0x94, 0x78, 0x1c, 0xb6, 0x96,
0x70, 0x49, 0x77, 0x9c, 0x43, 0x26, 0xad, 0x33, 0x21, 0xba, 0x0e, 0x65, 0x86, 0x70, 0xa6, 0x3e,
0xd0, 0xfa, 0xc4, 0xec, 0xf5, 0x29, 0x07, 0xa8, 0x34, 0x2e, 0x49, 0x69, 0x8b, 0x0b, 0x55, 0x23,
0x58, 0x71, 0x8e, 0x6e, 0x08, 0x41, 0xc6, 0xd0, 0xa9, 0xce, 0x23, 0xb9, 0x84, 0x79, 0x9b, 0xc9,
0x1c, 0x9d, 0xf6, 0x65, 0x7c, 0x78, 0x1b, 0x5d, 0x84, 0x9c, 0x74, 0x9b, 0xe6, 0x6e, 0x65, 0x0f,
0xad, 0x42, 0xd6, 0x71, 0xed, 0x63, 0xc2, 0x97, 0xae, 0x80, 0x45, 0x47, 0xfd, 0x55, 0x0a, 0x56,
0xa6, 0x70, 0x90, 0xf9, 0xed, 0xeb, 0x5e, 0xdf, 0xff, 0x2d, 0xd6, 0x46, 0x77, 0x98, 0x5f, 0xdd,
0x20, 0xae, 0x3c, 0x3b, 0xaa, 0xd3, 0xa1, 0x6e, 0xf1, 0xef, 0x32, 0x34, 0x52, 0x1b, 0xed, 0x43,
0x65, 0xa0, 0x7b, 0x54, 0x13, 0xb8, 0xa2, 0x85, 0xce, 0x91, 0x69, 0x34, 0xdd, 0xd5, 0x7d, 0x24,
0x62, 0x9b, 0x5a, 0x3a, 0x2a, 0x0f, 0x22, 0x52, 0x84, 0x61, 0xb5, 0x73, 0xfa, 0x85, 0x6e, 0x51,
0xd3, 0x22, 0xda, 0xd4, 0xca, 0x5d, 0x9a, 0x72, 0xda, 0x3c, 0x36, 0x0d, 0x62, 0x75, 0xfd, 0x25,
0xbb, 0x10, 0x18, 0x07, 0x4b, 0xea, 0xa9, 0x18, 0xca, 0x51, 0x24, 0x47, 0x65, 0x48, 0xd1, 0x13,
0x19, 0x80, 0x14, 0x3d, 0x41, 0xdf, 0x85, 0x0c, 0x9b, 0x24, 0x9f, 0x7c, 0x79, 0xc6, 0x11, 0x28,
0xed, 0xda, 0xa7, 0x0e, 0xc1, 0x5c, 0x53, 0x55, 0x83, 0x74, 0x08, 0xd0, 0x7d, 0xd2, 0xab, 0x7a,
0x03, 0x96, 0x27, 0xe0, 0x3b, 0xb4, 0x7e, 0xc9, 0xf0, 0xfa, 0xa9, 0xcb, 0x50, 0x8a, 0x60, 0xb5,
0x7a, 0x11, 0x56, 0x67, 0x41, 0xaf, 0xda, 0x0f, 0xe4, 0x11, 0x08, 0x45, 0xb7, 0xa1, 0x10, 0x60,
0xaf, 0x48, 0xc7, 0xe9, 0x58, 0xf9, 0xca, 0x38, 0x50, 0x65, 0x79, 0xc8, 0xb6, 0x35, 0xdf, 0x0f,
0x29, 0x3e, 0xf0, 0xbc, 0xee, 0x38, 0x2d, 0xdd, 0xeb, 0xab, 0x9f, 0x42, 0x35, 0x0e, 0x57, 0x27,
0xa6, 0x91, 0x09, 0xb6, 0xe1, 0x45, 0xc8, 0x1d, 0xd9, 0xee, 0x50, 0xa7, 0xdc, 0x59, 0x09, 0xcb,
0x1e, 0xdb, 0x9e, 0x02, 0x63, 0xd3, 0x5c, 0x2c, 0x3a, 0xaa, 0x06, 0x97, 0x62, 0xb1, 0x95, 0x99,
0x98, 0x96, 0x41, 0x44, 0x3c, 0x4b, 0x58, 0x74, 0xc6, 0x8e, 0xc4, 0x60, 0x45, 0x87, 0xfd, 0xac,
0xc7, 0xe7, 0xca, 0xfd, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x15, 0xa0, 0x80, 0x89, 0xe7, 0x30, 0x4c,
0x40, 0x75, 0x50, 0xc8, 0x49, 0x97, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x68, 0x37, 0x7d,
0x4d, 0x76, 0x64, 0x07, 0x66, 0xe8, 0x96, 0x64, 0x65, 0xf1, 0x04, 0x4b, 0x9a, 0x87, 0x69, 0xd9,
0x1d, 0x9f, 0x96, 0xa5, 0x63, 0x4f, 0x69, 0x61, 0x35, 0xc1, 0xcb, 0x6e, 0x49, 0x5e, 0x96, 0x99,
0xf3, 0x63, 0x11, 0x62, 0xd6, 0x88, 0x10, 0xb3, 0xec, 0x9c, 0x69, 0xc6, 0x30, 0xb3, 0x3b, 0x3e,
0x33, 0xcb, 0xcd, 0x19, 0xf1, 0x04, 0x35, 0xbb, 0x1f, 0xa5, 0x66, 0x82, 0x56, 0x5d, 0x8b, 0xb5,
0x8e, 0xe5, 0x66, 0x3f, 0x08, 0x71, 0xb3, 0x42, 0x2c, 0x31, 0x12, 0x4e, 0x66, 0x90, 0xb3, 0x46,
0x84, 0x9c, 0x29, 0x73, 0x62, 0x10, 0xc3, 0xce, 0x3e, 0x08, 0xb3, 0x33, 0x88, 0x25, 0x78, 0x72,
0xbd, 0x67, 0xd1, 0xb3, 0xf7, 0x02, 0x7a, 0x56, 0x8c, 0xe5, 0x97, 0x72, 0x0e, 0x93, 0xfc, 0x6c,
0x7f, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xac, 0x8b, 0x39, 0x04, 0x6d, 0x7f, 0x8a, 0xa0, 0x95,
0xe6, 0x38, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0xc5, 0x73, 0x28, 0x39, 0xcc, 0xc5, 0x28,
0x9a, 0x16, 0x43, 0xd1, 0x96, 0xb9, 0xfb, 0xb7, 0x62, 0xdd, 0x9f, 0x9f, 0xa3, 0xdd, 0x60, 0x27,
0xe4, 0x44, 0xce, 0x33, 0x94, 0x21, 0xae, 0x6b, 0xbb, 0x92, 0x6d, 0x89, 0x8e, 0xfa, 0x26, 0x3b,
0xb3, 0xc7, 0xf9, 0x7d, 0x06, 0x9f, 0xe3, 0x68, 0x1e, 0xca, 0x69, 0xf5, 0x0f, 0xc9, 0xb1, 0x2d,
0x3f, 0xe6, 0xc2, 0xe7, 0xbd, 0x22, 0xcf, 0xfb, 0x10, 0xcb, 0x4b, 0x45, 0x59, 0xde, 0x3a, 0x14,
0x19, 0x4a, 0x4f, 0x10, 0x38, 0xdd, 0x09, 0x08, 0xdc, 0x4d, 0x58, 0xe1, 0xc7, 0xb0, 0xe0, 0x82,
0x12, 0x9a, 0x33, 0xfc, 0x84, 0x59, 0x66, 0x1f, 0xc4, 0xe6, 0x14, 0x18, 0xfd, 0x0e, 0x5c, 0x08,
0xe9, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x25, 0xd0, 0xde, 0x96, 0xc7, 0xc0, 0x9f, 0x92, 0xe3, 0x08,
0x8d, 0x99, 0xdf, 0x2c, 0x92, 0x96, 0xfc, 0x1f, 0x91, 0xb4, 0xd4, 0x7f, 0x4d, 0xd2, 0xc2, 0xa7,
0x59, 0x3a, 0x7a, 0x9a, 0xfd, 0x2b, 0x39, 0x5e, 0x93, 0x80, 0x72, 0x75, 0x6d, 0x83, 0xc8, 0xf3,
0x85, 0xb7, 0x51, 0x05, 0xd2, 0x03, 0xbb, 0x27, 0x4f, 0x11, 0xd6, 0x64, 0x5a, 0x01, 0x08, 0x2b,
0x12, 0x63, 0x83, 0xa3, 0x29, 0xcb, 0x23, 0x2c, 0x8f, 0xa6, 0x0a, 0xa4, 0x9f, 0x10, 0x01, 0x99,
0x4b, 0x98, 0x35, 0x99, 0x1e, 0xdf, 0x64, 0x1c, 0x08, 0x97, 0xb0, 0xe8, 0xa0, 0xbb, 0xa0, 0xf0,
0x32, 0x84, 0x66, 0x3b, 0x9e, 0x44, 0xb7, 0xd7, 0xc2, 0x73, 0x15, 0xd5, 0x86, 0x8d, 0x03, 0xa6,
0xb3, 0xef, 0x78, 0xb8, 0xe0, 0xc8, 0x56, 0xe8, 0xd4, 0x55, 0x22, 0xe4, 0xef, 0x32, 0x28, 0x6c,
0xf4, 0x9e, 0xa3, 0x77, 0x09, 0x87, 0x2a, 0x05, 0x8f, 0x05, 0xea, 0x63, 0x40, 0xd3, 0x80, 0x8b,
0x5a, 0x90, 0x23, 0xc7, 0xc4, 0xa2, 0x6c, 0xd9, 0x58, 0xb8, 0x2f, 0xce, 0x60, 0x56, 0xc4, 0xa2,
0xf5, 0x2a, 0x0b, 0xf2, 0x3f, 0xbf, 0x59, 0xaf, 0x08, 0xed, 0xb7, 0xed, 0xa1, 0x49, 0xc9, 0xd0,
0xa1, 0xa7, 0x58, 0xda, 0xab, 0x7f, 0x4b, 0x31, 0x9a, 0x13, 0x01, 0xe3, 0x99, 0xb1, 0xf5, 0xb7,
0x7c, 0x2a, 0x44, 0x71, 0x17, 0x8b, 0xf7, 0x1a, 0x40, 0x4f, 0xf7, 0xb4, 0xa7, 0xba, 0x45, 0x89,
0x21, 0x83, 0x1e, 0x92, 0xa0, 0x1a, 0x14, 0x58, 0x6f, 0xe4, 0x11, 0x43, 0xb2, 0xed, 0xa0, 0x1f,
0x9a, 0x67, 0xfe, 0xdb, 0xcd, 0x33, 0x1a, 0xe5, 0xc2, 0x44, 0x94, 0x43, 0x14, 0x44, 0x09, 0x53,
0x10, 0x36, 0x36, 0xc7, 0x35, 0x6d, 0xd7, 0xa4, 0xa7, 0x7c, 0x69, 0xd2, 0x38, 0xe8, 0xb3, 0xcb,
0xdb, 0x90, 0x0c, 0x1d, 0xdb, 0x1e, 0x68, 0x02, 0x6e, 0x8a, 0xdc, 0x74, 0x49, 0x0a, 0x9b, 0x1c,
0x75, 0x7e, 0x9d, 0x1a, 0xe7, 0xdf, 0x98, 0x6a, 0xfe, 0xdf, 0x05, 0x58, 0xfd, 0x0d, 0xbf, 0x80,
0x46, 0x8f, 0x5b, 0x74, 0x08, 0x2b, 0x41, 0xfa, 0x6b, 0x23, 0x0e, 0x0b, 0xfe, 0x86, 0x5e, 0x14,
0x3f, 0x2a, 0xc7, 0x51, 0xb1, 0x87, 0x3e, 0x86, 0x57, 0x27, 0xb0, 0x2d, 0x70, 0x9d, 0x5a, 0x14,
0xe2, 0x5e, 0x89, 0x42, 0x9c, 0xef, 0x7a, 0x1c, 0xac, 0xf4, 0xb7, 0xcc, 0xba, 0x1d, 0x76, 0xa7,
0x09, 0xb3, 0x87, 0x99, 0xcb, 0x7f, 0x0d, 0x4a, 0x2e, 0xa1, 0xec, 0x9e, 0x1d, 0xb9, 0x35, 0x2e,
0x09, 0xa1, 0xbc, 0x8b, 0x1e, 0xc0, 0x2b, 0x33, 0x59, 0x04, 0xfa, 0x1e, 0x28, 0x63, 0x02, 0x92,
0x8c, 0xb9, 0x80, 0x05, 0x97, 0x8a, 0xb1, 0xae, 0xfa, 0xc7, 0xe4, 0xd8, 0x65, 0xf4, 0x9a, 0xd2,
0x84, 0x9c, 0x4b, 0xbc, 0xd1, 0x40, 0x5c, 0x1c, 0xca, 0x5b, 0xef, 0x2c, 0xc6, 0x3f, 0x98, 0x74,
0x34, 0xa0, 0x58, 0x1a, 0xab, 0x8f, 0x21, 0x27, 0x24, 0xa8, 0x08, 0xf9, 0x87, 0x7b, 0x0f, 0xf6,
0xf6, 0x3f, 0xda, 0xab, 0x24, 0x10, 0x40, 0x6e, 0xbb, 0xd1, 0x68, 0x1e, 0xb4, 0x2b, 0x49, 0xa4,
0x40, 0x76, 0xbb, 0xbe, 0x8f, 0xdb, 0x95, 0x14, 0x13, 0xe3, 0xe6, 0x87, 0xcd, 0x46, 0xbb, 0x92,
0x46, 0x2b, 0x50, 0x12, 0x6d, 0xed, 0xfe, 0x3e, 0xfe, 0xc9, 0x76, 0xbb, 0x92, 0x09, 0x89, 0x0e,
0x9b, 0x7b, 0xf7, 0x9a, 0xb8, 0x92, 0x55, 0xdf, 0x65, 0x37, 0x93, 0x18, 0xc6, 0x32, 0xbe, 0x83,
0x24, 0x43, 0x77, 0x10, 0xf5, 0xb7, 0x29, 0xa8, 0xc5, 0xd3, 0x10, 0xf4, 0xe1, 0xc4, 0xc4, 0xb7,
0xce, 0xc1, 0x61, 0x26, 0x66, 0x8f, 0xae, 0x43, 0xd9, 0x25, 0x47, 0x84, 0x76, 0xfb, 0x82, 0x16,
0x89, 0x23, 0xb3, 0x84, 0x4b, 0x52, 0xca, 0x8d, 0x3c, 0xa1, 0xf6, 0x19, 0xe9, 0x52, 0x4d, 0x60,
0x91, 0xd8, 0x74, 0x0a, 0x53, 0x63, 0xd2, 0x43, 0x21, 0x54, 0x3f, 0x3d, 0x57, 0x2c, 0x15, 0xc8,
0xe2, 0x66, 0x1b, 0x7f, 0x5c, 0x49, 0x23, 0x04, 0x65, 0xde, 0xd4, 0x0e, 0xf7, 0xb6, 0x0f, 0x0e,
0x5b, 0xfb, 0x2c, 0x96, 0x17, 0x60, 0xd9, 0x8f, 0xa5, 0x2f, 0xcc, 0xaa, 0x9f, 0x40, 0x39, 0x7a,
0xf7, 0x67, 0x21, 0x74, 0xed, 0x91, 0x65, 0xf0, 0x60, 0x64, 0xb1, 0xe8, 0xa0, 0xdb, 0x90, 0x3d,
0xb6, 0x45, 0x9a, 0xcd, 0xde, 0x6b, 0x8f, 0x6c, 0x4a, 0x42, 0xb5, 0x03, 0xa1, 0xad, 0x7e, 0x01,
0x59, 0x9e, 0x35, 0x2c, 0x03, 0xf8, 0x2d, 0x5e, 0x92, 0x2a, 0xd6, 0x46, 0x9f, 0x00, 0xe8, 0x94,
0xba, 0x66, 0x67, 0x34, 0x76, 0xbc, 0x3e, 0x3b, 0xeb, 0xb6, 0x7d, 0xbd, 0xfa, 0x65, 0x99, 0x7e,
0xab, 0x63, 0xd3, 0x50, 0x0a, 0x86, 0x1c, 0xaa, 0x7b, 0x50, 0x8e, 0xda, 0xfa, 0x34, 0x40, 0x8c,
0x21, 0x4a, 0x03, 0x04, 0xab, 0x93, 0x34, 0x20, 0x20, 0x11, 0x69, 0x51, 0xb1, 0xe1, 0x1d, 0xf5,
0x59, 0x12, 0x0a, 0xed, 0x13, 0xb9, 0x1e, 0x31, 0xc5, 0x82, 0xb1, 0x69, 0x2a, 0x7c, 0x35, 0x16,
0xd5, 0x87, 0x74, 0x50, 0xd3, 0xf8, 0x20, 0xd8, 0x71, 0x99, 0x45, 0x6f, 0x40, 0x7e, 0x71, 0x47,
0x66, 0xd9, 0xfb, 0xa0, 0x04, 0x98, 0xc9, 0xd8, 0xa9, 0x6e, 0x18, 0x2e, 0xf1, 0x3c, 0xb9, 0xef,
0xfd, 0x2e, 0xaf, 0x3d, 0xd9, 0x4f, 0xe5, 0xe5, 0x3b, 0x8d, 0x45, 0x47, 0x35, 0x60, 0x79, 0x02,
0x70, 0xd1, 0xfb, 0x90, 0x77, 0x46, 0x1d, 0xcd, 0x0f, 0xcf, 0xc4, 0x5b, 0x83, 0xcf, 0x7b, 0x46,
0x9d, 0x81, 0xd9, 0x7d, 0x40, 0x4e, 0xfd, 0xc1, 0x38, 0xa3, 0xce, 0x03, 0x11, 0x45, 0xf1, 0x2b,
0xa9, 0xf0, 0xaf, 0x1c, 0x43, 0xc1, 0xdf, 0x14, 0xe8, 0x87, 0xa0, 0x04, 0x58, 0x1e, 0x94, 0x24,
0x63, 0x0f, 0x01, 0xe9, 0x7e, 0x6c, 0xc2, 0x48, 0xb4, 0x67, 0xf6, 0x2c, 0x62, 0x68, 0x63, 0x7e,
0xcc, 0x7f, 0xad, 0x80, 0x97, 0xc5, 0x87, 0x5d, 0x9f, 0x1c, 0xab, 0xff, 0x4e, 0x42, 0xc1, 0x2f,
0x3d, 0xa1, 0x77, 0x43, 0xfb, 0xae, 0x3c, 0xe3, 0xa2, 0xee, 0x2b, 0x8e, 0xcb, 0x47, 0xd1, 0xb1,
0xa6, 0xce, 0x3f, 0xd6, 0xb8, 0x3a, 0xa0, 0x5f, 0x91, 0xcd, 0x9c, 0xbb, 0x22, 0xfb, 0x36, 0x20,
0x6a, 0x53, 0x7d, 0xa0, 0x1d, 0xdb, 0xd4, 0xb4, 0x7a, 0x9a, 0x08, 0xb6, 0xe0, 0x02, 0x15, 0xfe,
0xe5, 0x11, 0xff, 0x70, 0xc0, 0xe3, 0xfe, 0xcb, 0x24, 0x14, 0x02, 0x50, 0x3f, 0x6f, 0x35, 0xe8,
0x22, 0xe4, 0x24, 0x6e, 0x89, 0x72, 0x90, 0xec, 0x05, 0x85, 0xc9, 0x4c, 0xa8, 0x30, 0x59, 0x83,
0xc2, 0x90, 0x50, 0x9d, 0x9f, 0x6c, 0xe2, 0x8a, 0x12, 0xf4, 0x6f, 0xbe, 0x07, 0xc5, 0x50, 0x61,
0x8e, 0x65, 0xde, 0x5e, 0xf3, 0xa3, 0x4a, 0xa2, 0x96, 0x7f, 0xf6, 0xe5, 0xd5, 0xf4, 0x1e, 0x79,
0xca, 0xf6, 0x2c, 0x6e, 0x36, 0x5a, 0xcd, 0xc6, 0x83, 0x4a, 0xb2, 0x56, 0x7c, 0xf6, 0xe5, 0xd5,
0x3c, 0x26, 0xbc, 0x48, 0x70, 0xb3, 0x05, 0x4b, 0xe1, 0x55, 0x89, 0x42, 0x1f, 0x82, 0xf2, 0xbd,
0x87, 0x07, 0xbb, 0x3b, 0x8d, 0xed, 0x76, 0x53, 0x7b, 0xb4, 0xdf, 0x6e, 0x56, 0x92, 0xe8, 0x55,
0xb8, 0xb0, 0xbb, 0xf3, 0xe3, 0x56, 0x5b, 0x6b, 0xec, 0xee, 0x34, 0xf7, 0xda, 0xda, 0x76, 0xbb,
0xbd, 0xdd, 0x78, 0x50, 0x49, 0x6d, 0xfd, 0x5e, 0x81, 0xe5, 0xed, 0x7a, 0x63, 0x87, 0xc1, 0xb6,
0xd9, 0xd5, 0xf9, 0xfd, 0xb1, 0x01, 0x19, 0x7e, 0x43, 0x3c, 0xf3, 0xd9, 0xae, 0x76, 0x76, 0xf9,
0x08, 0xdd, 0x87, 0x2c, 0xbf, 0x3c, 0xa2, 0xb3, 0xdf, 0xf1, 0x6a, 0x73, 0xea, 0x49, 0x6c, 0x30,
0x3c, 0x3d, 0xce, 0x7c, 0xd8, 0xab, 0x9d, 0x5d, 0x5e, 0x42, 0x18, 0x94, 0x31, 0xf9, 0x9c, 0xff,
0xd0, 0x55, 0x5b, 0x00, 0x6c, 0xd0, 0x2e, 0xe4, 0xfd, 0xfb, 0xc2, 0xbc, 0xa7, 0xb7, 0xda, 0xdc,
0xfa, 0x0f, 0x0b, 0x97, 0xb8, 0xd7, 0x9d, 0xfd, 0x8e, 0x58, 0x9b, 0x53, 0xcc, 0x42, 0x3b, 0x90,
0x93, 0x84, 0x6a, 0xce, 0x73, 0x5a, 0x6d, 0x5e, 0x3d, 0x87, 0x05, 0x6d, 0x7c, 0x63, 0x9e, 0xff,
0x3a, 0x5a, 0x5b, 0xa0, 0x4e, 0x87, 0x1e, 0x02, 0x84, 0x6e, 0x71, 0x0b, 0x3c, 0x7b, 0xd6, 0x16,
0xa9, 0xbf, 0xa1, 0x7d, 0x28, 0x04, 0xa4, 0x7a, 0xee, 0x23, 0x64, 0x6d, 0x7e, 0x21, 0x0c, 0x3d,
0x86, 0x52, 0x94, 0x4c, 0x2e, 0xf6, 0xb4, 0x58, 0x5b, 0xb0, 0xc2, 0xc5, 0xfc, 0x47, 0x99, 0xe5,
0x62, 0x4f, 0x8d, 0xb5, 0x05, 0x0b, 0x5e, 0xe8, 0x33, 0x58, 0x99, 0x66, 0x7e, 0x8b, 0xbf, 0x3c,
0xd6, 0xce, 0x51, 0x02, 0x43, 0x43, 0x40, 0x33, 0x18, 0xe3, 0x39, 0x1e, 0x22, 0x6b, 0xe7, 0xa9,
0x88, 0xd5, 0x9b, 0x5f, 0xbd, 0x58, 0x4b, 0x7e, 0xfd, 0x62, 0x2d, 0xf9, 0x8f, 0x17, 0x6b, 0xc9,
0xe7, 0x2f, 0xd7, 0x12, 0x5f, 0xbf, 0x5c, 0x4b, 0xfc, 0xf5, 0xe5, 0x5a, 0xe2, 0x67, 0x6f, 0xf5,
0x4c, 0xda, 0x1f, 0x75, 0x36, 0xba, 0xf6, 0x70, 0x33, 0xfc, 0x0f, 0x87, 0x59, 0xff, 0xba, 0xe8,
0xe4, 0xf8, 0xa1, 0x72, 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0xa5, 0x39, 0xcc, 0x95,
0x21, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -5229,6 +5241,13 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.MempoolError) > 0 {
i -= len(m.MempoolError)
copy(dAtA[i:], m.MempoolError)
i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError)))
i--
dAtA[i] = 0x5a
}
if m.Priority != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Priority))
i--
@@ -6842,6 +6861,10 @@ func (m *ResponseCheckTx) Size() (n int) {
if m.Priority != 0 {
n += 1 + sovTypes(uint64(m.Priority))
}
l = len(m.MempoolError)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
@@ -11107,6 +11130,38 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
break
}
}
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MempoolError = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])

View File

@@ -0,0 +1,251 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/progressbar"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
// ReIndexEventCmd allows re-index the event by given block height interval
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
either or both arguments.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
es, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err = eventReIndex(cmd, es, bs, ss); err != nil {
fmt.Println(reindexFailed, err)
return
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
// Check duplicated sinks.
sinks := map[string]bool{}
for _, s := range cfg.TxIndex.Indexer {
sl := strings.ToLower(s)
if sinks[sl] {
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
}
sinks[sl] = true
}
eventSinks := []indexer.EventSink{}
for k := range sinks {
switch k {
case string(indexer.NULL):
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case string(indexer.KV):
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, kv.NewEventSink(store))
case string(indexer.PSQL):
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, errors.New("the psql connection settings cannot be empty")
}
es, _, err := psql.NewEventSink(conn, chainID)
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, es)
default:
return nil, errors.New("unsupported event sink type")
}
}
if len(eventSinks) == 0 {
return nil, errors.New("no proper event sink can do event re-indexing," +
" please check the tx-index section in the config.toml")
}
if !indexer.IndexingEnabled(eventSinks) {
return nil, fmt.Errorf("no event sink has been enabled")
}
return eventSinks, nil
}
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
// Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
blockStore := store.NewBlockStore(blockStoreDB)
// Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB)
return blockStore, stateStore, nil
}
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
var bar progressbar.Bar
bar.NewOption(startHeight-1, endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := startHeight; i <= endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := bs.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := ss.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *indexer.Batch
if e.NumTxs > 0 {
batch = indexer.NewBatch(e.NumTxs)
for i, tx := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: tx,
Result: *(r.DeliverTxs[i]),
}
_ = batch.Add(&tr)
}
}
for _, sink := range es {
if err := sink.IndexBlockEvents(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
if batch != nil {
if err := sink.IndexTxEvents(batch.Ops); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -0,0 +1,171 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks []string
connURL string
loadErr bool
}{
{[]string{}, "", true},
{[]string{"NULL"}, "", true},
{[]string{"KV"}, "", false},
{[]string{"KV", "KV"}, "", true},
{[]string{"PSQL"}, "", true}, // true because empty connect url
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
// skip to test PSQL connect with correct url
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
}
for _, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
func TestLoadBlockStore(t *testing.T) {
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockEventSink := &mocks.EventSink{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
mockEventSink.
On("Type").Return(indexer.KV).
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -15,6 +15,7 @@ func main() {
rootCmd := cmd.RootCmd
rootCmd.AddCommand(
cmd.GenValidatorCmd,
cmd.ReIndexEventCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LightCmd,

View File

@@ -29,8 +29,8 @@ const (
ModeValidator = "validator"
ModeSeed = "seed"
BlockchainV0 = "v0"
BlockchainV2 = "v2"
BlockSyncV0 = "v0"
BlockSyncV2 = "v2"
MempoolV0 = "v0"
MempoolV1 = "v1"
@@ -76,7 +76,7 @@ type Config struct {
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
StateSync *StateSyncConfig `mapstructure:"statesync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
BlockSync *BlockSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
@@ -91,7 +91,7 @@ func DefaultConfig() *Config {
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
StateSync: DefaultStateSyncConfig(),
FastSync: DefaultFastSyncConfig(),
BlockSync: DefaultBlockSyncConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
@@ -114,7 +114,7 @@ func TestConfig() *Config {
P2P: TestP2PConfig(),
Mempool: TestMempoolConfig(),
StateSync: TestStateSyncConfig(),
FastSync: TestFastSyncConfig(),
BlockSync: TestBlockSyncConfig(),
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
@@ -151,7 +151,7 @@ func (cfg *Config) ValidateBasic() error {
if err := cfg.StateSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [statesync] section: %w", err)
}
if err := cfg.FastSync.ValidateBasic(); err != nil {
if err := cfg.BlockSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [fastsync] section: %w", err)
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
@@ -197,6 +197,7 @@ type BaseConfig struct { //nolint: maligned
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
// TODO: This should be moved to the blocksync config
FastSyncMode bool `mapstructure:"fast-sync"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
@@ -446,6 +447,7 @@ type RPCConfig struct {
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCListenAddress string `mapstructure:"grpc-laddr"`
// Maximum number of simultaneous connections.
@@ -453,6 +455,7 @@ type RPCConfig struct {
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
@@ -782,25 +785,47 @@ type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.
// This only accounts for raw transactions (e.g. given 1MB transactions and
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
// Size of the cache (used to filter transactions we saw earlier) in transactions
CacheSize int `mapstructure:"cache-size"`
// Do not remove invalid transactions from the cache (default: false)
// Set to true if it's not possible for any invalid transaction to become
// valid again in the future.
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
// Maximum size of a single transaction
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
MaxTxBytes int `mapstructure:"max-tx-bytes"`
// Maximum size of a batch of transactions to send to a peer
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
// can exist for in the mempool.
//
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
// insertion time into the mempool is beyond TTLDuration.
TTLDuration time.Duration `mapstructure:"ttl-duration"`
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
// can exist for in the mempool.
//
// Note, if TTLDuration is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if
// it's insertion time into the mempool is beyond TTLDuration.
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
@@ -811,10 +836,12 @@ func DefaultMempoolConfig() *MempoolConfig {
Broadcast: true,
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
TTLDuration: 0 * time.Second,
TTLNumBlocks: 0,
}
}
@@ -840,6 +867,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.MaxTxBytes < 0 {
return errors.New("max-tx-bytes can't be negative")
}
if cfg.TTLDuration < 0 {
return errors.New("ttl-duration can't be negative")
}
if cfg.TTLNumBlocks < 0 {
return errors.New("ttl-num-blocks can't be negative")
}
return nil
}
@@ -878,7 +912,7 @@ func DefaultStateSyncConfig() *StateSyncConfig {
}
}
// TestFastSyncConfig returns a default configuration for the state sync service
// TestStateSyncConfig returns a default configuration for the state sync service
func TestStateSyncConfig() *StateSyncConfig {
return DefaultStateSyncConfig()
}
@@ -934,34 +968,33 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
}
//-----------------------------------------------------------------------------
// FastSyncConfig
// FastSyncConfig defines the configuration for the Tendermint fast sync service
type FastSyncConfig struct {
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
type BlockSyncConfig struct {
Version string `mapstructure:"version"`
}
// DefaultFastSyncConfig returns a default configuration for the fast sync service
func DefaultFastSyncConfig() *FastSyncConfig {
return &FastSyncConfig{
Version: BlockchainV0,
// DefaultBlockSyncConfig returns a default configuration for the block sync service
func DefaultBlockSyncConfig() *BlockSyncConfig {
return &BlockSyncConfig{
Version: BlockSyncV0,
}
}
// TestFastSyncConfig returns a default configuration for the fast sync.
func TestFastSyncConfig() *FastSyncConfig {
return DefaultFastSyncConfig()
// TestBlockSyncConfig returns a default configuration for the block sync.
func TestBlockSyncConfig() *BlockSyncConfig {
return DefaultBlockSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *FastSyncConfig) ValidateBasic() error {
func (cfg *BlockSyncConfig) ValidateBasic() error {
switch cfg.Version {
case BlockchainV0:
return nil
case BlockchainV2:
case BlockSyncV0:
return nil
case BlockSyncV2:
return errors.New("blocksync version v2 is no longer supported. Please use v0")
default:
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
}
}

View File

@@ -125,13 +125,13 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
require.NoError(t, cfg.ValidateBasic())
}
func TestFastSyncConfigValidateBasic(t *testing.T) {
cfg := TestFastSyncConfig()
func TestBlockSyncConfigValidateBasic(t *testing.T) {
cfg := TestBlockSyncConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with version
cfg.Version = "v2"
assert.NoError(t, cfg.ValidateBasic())
assert.Error(t, cfg.ValidateBasic())
cfg.Version = "invalid"
assert.Error(t, cfg.ValidateBasic())

View File

@@ -200,6 +200,7 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# Maximum number of simultaneous connections.
@@ -209,6 +210,7 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
@@ -397,6 +399,22 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "{{ .Mempool.TTLDuration }}"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -434,14 +452,14 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
fetchers = "{{ .StateSync.Fetchers }}"
#######################################################
### Fast Sync Configuration Connections ###
### Block Sync Configuration Connections ###
#######################################################
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "{{ .FastSync.Version }}"
# Block Sync version to use:
# 1) "v0" (default) - the legacy block sync implementation
# 2) "v2" - DEPRECATED, please use v0
version = "{{ .BlockSync.Version }}"
#######################################################
### Consensus Configuration Options ###

View File

@@ -61,7 +61,7 @@ Note the context/background should be written in the present tense.
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks)
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)

View File

@@ -97,8 +97,7 @@ design for tendermint was originally tracked in
[#828](https://github.com/tendermint/tendermint/issues/828).
#### Eager StateSync
Warp Sync as implemented in Parity
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
Warp Sync as implemented in OpenEthereum to rapidly
download both blocks and state snapshots from peers. Data is carved into ~4MB
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
@@ -234,5 +233,3 @@ Proposed
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed

View File

@@ -119,7 +119,7 @@ network usage.
---
Check out the formal specification
[here](https://docs.tendermint.com/master/spec/consensus/light-client.html).
[here](https://github.com/tendermint/spec/tree/master/spec/light-client).
## Status

View File

@@ -18,7 +18,7 @@ graceful here, but that's for another day.
It's possible to fool lite clients without there being a fork on the
main chain - so called Fork-Lite. See the
[fork accountability](https://docs.tendermint.com/master/spec/consensus/fork-accountability.html)
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
document for more details. For a sequential lite client, this can happen via
equivocation or amnesia attacks. For a skipping lite client this can also happen
via lunatic validator attacks. There must be some way for applications to punish

View File

@@ -179,7 +179,7 @@ This then ends the process and the verify function that was called at the start
the user.
For a detailed overview of how each of these three attacks can be conducted please refer to the
[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)).
[fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md).
## Full Node Verification

View File

@@ -36,7 +36,7 @@ proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "anonymous"
# If this node is many blocks behind the tip of the chain, FastSync
# If this node is many blocks behind the tip of the chain, BlockSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = true
@@ -275,9 +275,13 @@ dial-timeout = "3s"
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - The legacy non-prioritized mempool reactor.
# 2) "v1" (default) - The prioritized mempool reactor.
version = "v1"
recheck = true
broadcast = true
wal-dir = ""
# Maximum number of transactions in the mempool
size = 5000
@@ -304,6 +308,22 @@ max-tx-bytes = 1048576
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = 0
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "0s"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = 0
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -334,12 +354,12 @@ discovery-time = "15s"
temp-dir = ""
#######################################################
### Fast Sync Configuration Connections ###
### BlockSync Configuration Connections ###
#######################################################
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# Block Sync version to use:
# 1) "v0" (default) - the legacy block sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
@@ -421,7 +441,6 @@ max-open-connections = 3
# Instrumentation namespace
namespace = "tendermint"
```
## Empty blocks VS no empty blocks

View File

@@ -32,7 +32,7 @@ tendermint start --log-level "info"
Here is the list of modules you may encounter in Tendermint's log and a
little overview what they do.
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
- `abci-client` As mentioned in [Application Architecture Guide](../app-dev/app-architecture.md), Tendermint acts as an ABCI
client with respect to the application and maintains 3 connections:
mempool, consensus and query. The code used by Tendermint Core can
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
@@ -45,12 +45,12 @@ little overview what they do.
from a crash.
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
You can subscribe to them by calling `subscribe` RPC method. Refer
to [RPC docs](./rpc.md) for additional information.
to [RPC docs](../tendermint-core/rpc.md) for additional information.
- `mempool` Mempool module handles all incoming transactions, whenever
they are coming from peers or the application.
- `p2p` Provides an abstraction around peer-to-peer communication. For
more details, please check out the
[README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md).
[README](https://github.com/tendermint/spec/tree/master/spec/p2p).
- `rpc-server` RPC server. For implementation details, please read the
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
- `state` Represents the latest state and execution submodule, which

View File

@@ -40,7 +40,7 @@ Default logging level (`log-level = "info"`) should suffice for
normal operation mode. Read [this
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
for details on how to configure `log-level` config variable. Some of the
modules can be found [here](../nodes/logging#list-of-modules). If
modules can be found [here](logging.md#list-of-modules). If
you're trying to debug Tendermint or asked to provide logs with debug
logging level, you can do so by running Tendermint with
`--log-level="debug"`.
@@ -114,7 +114,7 @@ just the votes seen at the current height.
If, after consulting with the logs and above endpoints, you still have no idea
what's happening, consider using `tendermint debug kill` sub-command. This
command will scrap all the available info and kill the process. See
[Debugging](../tools/debugging.md) for the exact format.
[Debugging](../tools/debugging/README.md) for the exact format.
You can inspect the resulting archive yourself or create an issue on
[Github](https://github.com/tendermint/tendermint). Before opening an issue
@@ -134,7 +134,7 @@ Tendermint also can report and serve Prometheus metrics. See
[Metrics](./metrics.md).
`tendermint debug dump` sub-command can be used to periodically dump useful
information into an archive. See [Debugging](../tools/debugging.md) for more
information into an archive. See [Debugging](../tools/debugging/README.md) for more
information.
## What happens when my app dies
@@ -315,7 +315,7 @@ We want `skip-timeout-commit=false` when there is economics on the line
because proposers should wait to hear for more votes. But if you don't
care about that and want the fastest consensus, you can skip it. It will
be kept false by default for public deployments (e.g. [Cosmos
Hub](https://cosmos.network/intro/hub)) while for enterprise
Hub](https://hub.cosmos.network/main/hub-overview/overview.html)) while for enterprise
applications, setting it to true is not a problem.
- `consensus.peer-gossip-sleep-duration`

View File

@@ -14,7 +14,7 @@ This section dives into the internals of Go-Tendermint.
- [Subscribing to events](./subscription.md)
- [Block Structure](./block-structure.md)
- [RPC](./rpc.md)
- [Fast Sync](./fast-sync.md)
- [Block Sync](./block-sync.md)
- [State Sync](./state-sync.md)
- [Mempool](./mempool.md)
- [Light Client](./light-client.md)

View File

@@ -2,7 +2,8 @@
order: 10
---
# Fast Sync
# Block Sync
*Formerly known as Fast Sync*
In a proof of work blockchain, syncing with the chain is the same
process as staying up-to-date with the consensus: download blocks, and
@@ -14,7 +15,7 @@ scratch can take a very long time. It's much faster to just download
blocks and check the merkle tree of validators than to run the real-time
consensus gossip protocol.
## Using Fast Sync
## Using Block Sync
To support faster syncing, Tendermint offers a `fast-sync` mode, which
is enabled by default, and can be toggled in the `config.toml` or via
@@ -22,26 +23,36 @@ is enabled by default, and can be toggled in the `config.toml` or via
In this mode, the Tendermint daemon will sync hundreds of times faster
than if it used the real-time consensus process. Once caught up, the
daemon will switch out of fast sync and into the normal consensus mode.
daemon will switch out of Block Sync and into the normal consensus mode.
After running for some time, the node is considered `caught up` if it
has at least one peer and it's height is at least as high as the max
reported peer height. See [the IsCaughtUp
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta.
Note: There are two versions of Block Sync. We recommend using v0 as v2 is still in beta.
If you would like to use a different version you can do so by changing the version in the `config.toml`:
```toml
#######################################################
### Fast Sync Configuration Connections ###
### Block Sync Configuration Connections ###
#######################################################
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# Block Sync version to use:
# 1) "v0" (default) - the legacy Block Sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
```
If we're lagging sufficiently, we should go back to fast syncing, but
If we're lagging sufficiently, we should go back to block syncing, but
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
## The Block Sync event
When the tendermint blockchain core launches, it might switch to the `block-sync`
mode to catch up the states to the current network best height. the core will emits
a fast-sync event to expose the current status and the sync height. Once it catched
the network best height, it will switches to the state sync mechanism and then emit
another event for exposing the fast-sync `complete` status and the state `height`.
The user can query the events by subscribing `EventQueryBlockSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -4,8 +4,15 @@ order: 11
# State Sync
With fast sync a node is downloading all of the data of an application from genesis and verifying it.
With block sync a node is downloading all of the data of an application from genesis and verifying it.
With state sync your node will download data related to the head or near the head of the chain and verify the data.
This leads to drastically shorter times for joining a network.
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
## Events
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
The user can query the events by subscribing `EventQueryStateSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -36,7 +36,7 @@ more information on query syntax and other options.
You can also use tags, given you had included them into DeliverTx
response, to query transaction results. See [Indexing
transactions](./indexing-transactions.md) for details.
transactions](../app-dev/indexing-transactions.md) for details.
## ValidatorSetUpdates

View File

@@ -552,8 +552,7 @@ To make a Tendermint network that can tolerate one of the validators
failing, you need at least four validator nodes (e.g., 2/3).
Updating validators in a live network is supported but must be
explicitly programmed by the application developer. See the [application
developers guide](../app-dev/app-development.md) for more details.
explicitly programmed by the application developer.
### Local Network

2
go.mod
View File

@@ -15,7 +15,7 @@ require (
github.com/golang/protobuf v1.5.2
github.com/golangci/golangci-lint v1.41.1
github.com/google/orderedcode v0.0.1
github.com/google/uuid v1.2.0
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0

4
go.sum
View File

@@ -393,8 +393,8 @@ github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4Mgqvf
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=

View File

@@ -1,17 +0,0 @@
/*
Package blockchain provides two implementations of the fast-sync protocol.
- v0 was the very first implementation. it's battle tested, but does not have a
lot of test coverage.
- v2 is the newest implementation, with a focus on testability and readability.
Check out ADR-40 for the formal model and requirements.
# Termination criteria
1. the maximum peer height is reached
2. termination timeout is triggered, which is set if the peer set is empty or
there are no pending requests.
*/
package blockchain

36
internal/blocksync/doc.go Normal file
View File

@@ -0,0 +1,36 @@
/*
Package blocksync implements two versions of a reactor Service that are
responsible for block propagation and gossip between peers. This mechanism was
formerly known as fast-sync.
In order for a full node to successfully participate in consensus, it must have
the latest view of state. The blocksync protocol is a mechanism in which peers
may exchange and gossip entire blocks with one another, in a request/response
type model, until they've successfully synced to the latest head block. Once
succussfully synced, the full node can switch to an active role in consensus and
will no longer blocksync and thus no longer run the blocksync process.
Note, the blocksync reactor Service gossips entire block and relevant data such
that each receiving peer may construct the entire view of the blocksync state.
There are currently two versions of the blocksync reactor Service:
- v0: The initial implementation that is battle-tested, but whose test coverage
is lacking and is not formally verifiable.
- v2: The latest implementation that has much higher test coverage and is formally
verified. However, the current implementation of v2 is not as battle-tested and
is known to have various bugs that could make it unreliable in production
environments.
The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This
channel is responsible for handling messages that both request blocks and respond
to block requests from peers. For every block request from a peer, the reactor
will execute respondToPeer which will fetch the block from the node's state store
and respond to the peer. For every block response, the node will add the block
to its pool via AddBlock.
Internally, v0 runs a poolRoutine that constantly checks for what blocks it needs
and requests them. The poolRoutine is also responsible for taking blocks from the
pool, saving and executing each block.
*/
package blocksync

View File

@@ -1,7 +1,7 @@
package blockchain
package blocksync
import (
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/types"
)

View File

@@ -65,7 +65,7 @@ type BlockRequest struct {
PeerID types.NodeID
}
// BlockPool keeps track of the fast sync peers, block requests and block responses.
// BlockPool keeps track of the block sync peers, block requests and block responses.
type BlockPool struct {
service.BaseService
lastAdvance time.Time

View File

@@ -6,13 +6,13 @@ import (
"sync"
"time"
bc "github.com/tendermint/tendermint/internal/blockchain"
bc "github.com/tendermint/tendermint/internal/blocksync"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmSync "github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
@@ -60,7 +60,7 @@ const (
)
type consensusReactor interface {
// For when we switch from blockchain reactor and fast sync to the consensus
// For when we switch from blockchain reactor and block sync to the consensus
// machine.
SwitchToConsensus(state sm.State, skipWAL bool)
}
@@ -85,7 +85,7 @@ type Reactor struct {
store *store.BlockStore
pool *BlockPool
consReactor consensusReactor
fastSync *tmSync.AtomicBool
blockSync *tmSync.AtomicBool
blockchainCh *p2p.Channel
// blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope
@@ -121,7 +121,7 @@ func NewReactor(
consReactor consensusReactor,
blockchainCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
fastSync bool,
blockSync bool,
metrics *cons.Metrics,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
@@ -142,7 +142,7 @@ func NewReactor(
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
fastSync: tmSync.NewBool(fastSync),
blockSync: tmSync.NewBool(blockSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
@@ -162,10 +162,10 @@ func NewReactor(
// messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed.
//
// If fastSync is enabled, we also start the pool and the pool processing
// If blockSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart() error {
if r.fastSync.IsSet() {
if r.blockSync.IsSet() {
if err := r.pool.Start(); err != nil {
return err
}
@@ -183,7 +183,7 @@ func (r *Reactor) OnStart() error {
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit.
func (r *Reactor) OnStop() {
if r.fastSync.IsSet() {
if r.blockSync.IsSet() {
if err := r.pool.Stop(); err != nil {
r.Logger.Error("failed to stop pool", "err", err)
}
@@ -371,10 +371,10 @@ func (r *Reactor) processPeerUpdates() {
}
}
// SwitchToFastSync is called by the state sync reactor when switching to fast
// SwitchToBlockSync is called by the state sync reactor when switching to fast
// sync.
func (r *Reactor) SwitchToFastSync(state sm.State) error {
r.fastSync.Set()
func (r *Reactor) SwitchToBlockSync(state sm.State) error {
r.blockSync.Set()
r.initialState = state
r.pool.height = state.LastBlockHeight + 1
@@ -496,7 +496,7 @@ FOR_LOOP:
r.Logger.Error("failed to stop pool", "err", err)
}
r.fastSync.UnSet()
r.blockSync.UnSet()
if r.consReactor != nil {
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
@@ -591,7 +591,7 @@ FOR_LOOP:
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"fast sync rate",
"block sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
@@ -614,14 +614,14 @@ func (r *Reactor) GetMaxPeerBlockHeight() int64 {
}
func (r *Reactor) GetTotalSyncedTime() time.Duration {
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *Reactor) GetRemainingSyncTime() time.Duration {
if !r.fastSync.IsSet() {
if !r.blockSync.IsSet() {
return time.Duration(0)
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
@@ -36,7 +36,7 @@ type reactorTestSuite struct {
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
fastSync bool
blockSync bool
}
func setup(
@@ -61,7 +61,7 @@ func setup(
blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
fastSync: true,
blockSync: true,
}
chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)}
@@ -163,7 +163,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
nil,
rts.blockchainChannels[nodeID],
rts.peerUpdates[nodeID],
rts.fastSync,
rts.blockSync,
cons.NopMetrics())
require.NoError(t, err)

View File

@@ -4,7 +4,7 @@ import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
"github.com/tendermint/tendermint/types"
)

View File

@@ -5,7 +5,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -42,7 +42,7 @@ const (
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// for when we switch from blockchain reactor and block sync to
// the consensus machine
SwitchToConsensus(state state.State, skipWAL bool)
}

View File

@@ -7,14 +7,14 @@ import (
proto "github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/internal/blockchain"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
bc "github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -31,12 +31,12 @@ type blockStore interface {
Height() int64
}
// BlockchainReactor handles fast sync protocol.
// BlockchainReactor handles block sync protocol.
type BlockchainReactor struct {
p2p.BaseReactor
fastSync *sync.AtomicBool // enable fast sync on start when it's been Set
stateSynced bool // set to true when SwitchToFastSync is called by state sync
blockSync *sync.AtomicBool // enable block sync on start when it's been Set
stateSynced bool // set to true when SwitchToBlockSync is called by state sync
scheduler *Routine
processor *Routine
logger log.Logger
@@ -44,7 +44,7 @@ type BlockchainReactor struct {
mtx tmsync.RWMutex
maxPeerHeight int64
syncHeight int64
events chan Event // non-nil during a fast sync
events chan Event // non-nil during a block sync
reporter behavior.Reporter
io iIO
@@ -61,7 +61,7 @@ type blockApplier interface {
// XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, fastSync bool, metrics *cons.Metrics) *BlockchainReactor {
blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1
if initHeight == 1 {
initHeight = state.InitialHeight
@@ -78,7 +78,7 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
store: store,
reporter: reporter,
logger: log.NewNopLogger(),
fastSync: sync.NewBool(fastSync),
blockSync: sync.NewBool(blockSync),
syncStartHeight: initHeight,
syncStartTime: time.Time{},
lastSyncRate: 0,
@@ -90,10 +90,10 @@ func NewBlockchainReactor(
state state.State,
blockApplier blockApplier,
store blockStore,
fastSync bool,
blockSync bool,
metrics *cons.Metrics) *BlockchainReactor {
reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, fastSync, metrics)
return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
}
// SetSwitch implements Reactor interface.
@@ -137,22 +137,22 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
// Start implements cmn.Service interface
func (r *BlockchainReactor) Start() error {
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
if r.fastSync.IsSet() {
if r.blockSync.IsSet() {
err := r.startSync(nil)
if err != nil {
return fmt.Errorf("failed to start fast sync: %w", err)
return fmt.Errorf("failed to start block sync: %w", err)
}
}
return nil
}
// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil,
// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil,
// the scheduler and processor is updated with this state on startup.
func (r *BlockchainReactor) startSync(state *state.State) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if r.events != nil {
return errors.New("fast sync already in progress")
return errors.New("block sync already in progress")
}
r.events = make(chan Event, chBufferSize)
go r.scheduler.start()
@@ -167,7 +167,7 @@ func (r *BlockchainReactor) startSync(state *state.State) error {
return nil
}
// endSync ends a fast sync
// endSync ends a block sync
func (r *BlockchainReactor) endSync() {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -179,8 +179,8 @@ func (r *BlockchainReactor) endSync() {
r.processor.stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error {
r.stateSynced = true
state = state.Copy()
@@ -434,7 +434,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
} else {
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
}
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
r.logger.Info("block sync Rate", "height", r.syncHeight,
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
lastHundred = time.Now()
}
@@ -442,12 +442,12 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
case pcBlockVerificationFailure:
r.scheduler.send(event)
case pcFinished:
r.logger.Info("Fast sync complete, switching to consensus")
r.logger.Info("block sync complete, switching to consensus")
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
r.logger.Error("Failed to switch to consensus reactor")
}
r.endSync()
r.fastSync.UnSet()
r.blockSync.UnSet()
return
case noOpEvent:
default:
@@ -617,14 +617,14 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
}
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
if !r.fastSync.IsSet() {
if !r.blockSync.IsSet() {
return time.Duration(0)
}

View File

@@ -15,7 +15,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
@@ -23,7 +23,7 @@ import (
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"

View File

@@ -163,7 +163,7 @@ type scheduler struct {
height int64
// lastAdvance tracks the last time a block execution happened.
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
// syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing.
// This covers the cases where there are no peers or all peers have a lower height.
lastAdvance time.Time
syncTimeout time.Duration

View File

@@ -54,8 +54,8 @@ type Metrics struct {
TotalTxs metrics.Gauge
// The latest block height.
CommittedHeight metrics.Gauge
// Whether or not a node is fast syncing. 1 if yes, 0 if no.
FastSyncing metrics.Gauge
// Whether or not a node is block syncing. 1 if yes, 0 if no.
BlockSyncing metrics.Gauge
// Whether or not a node is state syncing. 1 if yes, 0 if no.
StateSyncing metrics.Gauge
@@ -169,11 +169,11 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "latest_block_height",
Help: "The latest block height.",
}, labels).With(labelsAndValues...),
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "fast_syncing",
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
Name: "block_syncing",
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
}, labels).With(labelsAndValues...),
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
@@ -214,7 +214,7 @@ func NopMetrics() *Metrics {
BlockSizeBytes: discard.NewHistogram(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
FastSyncing: discard.NewGauge(),
BlockSyncing: discard.NewGauge(),
StateSyncing: discard.NewGauge(),
BlockParts: discard.NewCounter(),
}

View File

@@ -0,0 +1,28 @@
// Code generated by mockery 2.7.5. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/state"
)
// ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type
type ConsSyncReactor struct {
mock.Mock
}
// SetBlockSyncingMetrics provides a mock function with given fields: _a0
func (_m *ConsSyncReactor) SetBlockSyncingMetrics(_a0 float64) {
_m.Called(_a0)
}
// SetStateSyncingMetrics provides a mock function with given fields: _a0
func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
_m.Called(_a0)
}
// SwitchToConsensus provides a mock function with given fields: _a0, _a1
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
_m.Called(_a0, _a1)
}

View File

@@ -0,0 +1,71 @@
// Code generated by mockery 2.7.5. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/state"
time "time"
)
// BlockSyncReactor is an autogenerated mock type for the BlockSyncReactor type
type BlockSyncReactor struct {
mock.Mock
}
// GetMaxPeerBlockHeight provides a mock function with given fields:
func (_m *BlockSyncReactor) GetMaxPeerBlockHeight() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}
// GetRemainingSyncTime provides a mock function with given fields:
func (_m *BlockSyncReactor) GetRemainingSyncTime() time.Duration {
ret := _m.Called()
var r0 time.Duration
if rf, ok := ret.Get(0).(func() time.Duration); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Duration)
}
return r0
}
// GetTotalSyncedTime provides a mock function with given fields:
func (_m *BlockSyncReactor) GetTotalSyncedTime() time.Duration {
ret := _m.Called()
var r0 time.Duration
if rf, ok := ret.Get(0).(func() time.Duration); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Duration)
}
return r0
}
// SwitchToBlockSync provides a mock function with given fields: _a0
func (_m *BlockSyncReactor) SwitchToBlockSync(_a0 state.State) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(state.State) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@@ -96,22 +96,30 @@ const (
type ReactorOption func(*Reactor)
// Temporary interface for switching to fast sync, we should get rid of v0.
// NOTE: Temporary interface for switching to block sync, we should get rid of v0.
// See: https://github.com/tendermint/tendermint/issues/4595
type FastSyncReactor interface {
SwitchToFastSync(sm.State) error
type BlockSyncReactor interface {
SwitchToBlockSync(sm.State) error
GetMaxPeerBlockHeight() int64
// GetTotalSyncedTime returns the time duration since the fastsync starting.
// GetTotalSyncedTime returns the time duration since the blocksync starting.
GetTotalSyncedTime() time.Duration
// GetRemainingSyncTime returns the estimating time the node will be fully synced,
// if will return 0 if the fastsync does not perform or the number of block synced is
// if will return 0 if the blocksync does not perform or the number of block synced is
// too small (less than 100).
GetRemainingSyncTime() time.Duration
}
//go:generate mockery --case underscore --name ConsSyncReactor
// ConsSyncReactor defines an interface used for testing abilities of node.startStateSync.
type ConsSyncReactor interface {
SwitchToConsensus(sm.State, bool)
SetStateSyncingMetrics(float64)
SetBlockSyncingMetrics(float64)
}
// Reactor defines a reactor for the consensus service.
type Reactor struct {
service.BaseService
@@ -257,7 +265,7 @@ func (r *Reactor) SetEventBus(b *types.EventBus) {
r.state.SetEventBus(b)
}
// WaitSync returns whether the consensus reactor is waiting for state/fast sync.
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
func (r *Reactor) WaitSync() bool {
r.mtx.RLock()
defer r.mtx.RUnlock()
@@ -270,8 +278,8 @@ func ReactorMetrics(metrics *Metrics) ReactorOption {
return func(r *Reactor) { r.Metrics = metrics }
}
// SwitchToConsensus switches from fast-sync mode to consensus mode. It resets
// the state, turns off fast-sync, and starts the consensus state-machine.
// SwitchToConsensus switches from block-sync mode to consensus mode. It resets
// the state, turns off block-sync, and starts the consensus state-machine.
func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
r.Logger.Info("switching to consensus")
@@ -288,7 +296,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
r.waitSync = false
r.mtx.Unlock()
r.Metrics.FastSyncing.Set(0)
r.Metrics.BlockSyncing.Set(0)
r.Metrics.StateSyncing.Set(0)
if skipWAL {
@@ -304,6 +312,11 @@ conS:
conR:
%+v`, err, r.state, r))
}
d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight}
if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil {
r.Logger.Error("failed to emit the blocksync complete event", "err", err)
}
}
// String returns a string representation of the Reactor.
@@ -956,7 +969,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
go r.gossipVotesRoutine(ps)
go r.queryMaj23Routine(ps)
// Send our state to the peer. If we're fast-syncing, broadcast a
// Send our state to the peer. If we're block-syncing, broadcast a
// RoundStepMessage later upon SwitchToConsensus().
if !r.waitSync {
go r.sendNewRoundStepMessage(ps.peerID)
@@ -1206,7 +1219,7 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
//
// NOTE: We process these messages even when we're fast_syncing. Messages affect
// NOTE: We process these messages even when we're block syncing. Messages affect
// either a peer state or the consensus state. Peer state updates can happen in
// parallel, but processing of proposals, block parts, and votes are ordered by
// the p2p channel.
@@ -1424,3 +1437,11 @@ func (r *Reactor) peerStatsRoutine() {
func (r *Reactor) GetConsensusState() *State {
return r.state
}
func (r *Reactor) SetStateSyncingMetrics(v float64) {
r.Metrics.StateSyncing.Set(v)
}
func (r *Reactor) SetBlockSyncingMetrics(v float64) {
r.Metrics.BlockSyncing.Set(v)
}

View File

@@ -25,6 +25,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
sm "github.com/tendermint/tendermint/state"
statemocks "github.com/tendermint/tendermint/state/mocks"
@@ -42,6 +43,7 @@ type reactorTestSuite struct {
states map[types.NodeID]*State
reactors map[types.NodeID]*Reactor
subs map[types.NodeID]types.Subscription
blocksyncSubs map[types.NodeID]types.Subscription
stateChannels map[types.NodeID]*p2p.Channel
dataChannels map[types.NodeID]*p2p.Channel
voteChannels map[types.NodeID]*p2p.Channel
@@ -58,10 +60,11 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
t.Helper()
rts := &reactorTestSuite{
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
}
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
@@ -69,6 +72,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size)
rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size)
_, cancel := context.WithCancel(context.Background())
i := 0
for nodeID, node := range rts.network.Nodes {
state := states[i]
@@ -89,9 +94,13 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size)
require.NoError(t, err)
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size)
require.NoError(t, err)
rts.states[nodeID] = state
rts.subs[nodeID] = blocksSub
rts.reactors[nodeID] = reactor
rts.blocksyncSubs[nodeID] = fsSub
// simulate handle initChain in handshake
if state.state.LastBlockHeight == 0 {
@@ -117,6 +126,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
}
leaktest.Check(t)
cancel()
})
return rts
@@ -253,6 +263,15 @@ func waitForBlockWithUpdatedValsAndValidateIt(
wg.Wait()
}
func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
t.Helper()
status, ok := msg.Data().(types.EventDataBlockSyncStatus)
require.True(t, ok)
require.Equal(t, complete, status.Complete)
require.Equal(t, height, status.Height)
}
func TestReactorBasic(t *testing.T) {
config := configSetup(t)
@@ -275,8 +294,21 @@ func TestReactorBasic(t *testing.T) {
// wait till everyone makes the first new block
go func(s types.Subscription) {
defer wg.Done()
<-s.Out()
wg.Done()
}(sub)
}
wg.Wait()
for _, sub := range rts.blocksyncSubs {
wg.Add(1)
// wait till everyone makes the consensus switch
go func(s types.Subscription) {
defer wg.Done()
msg := <-s.Out()
ensureBlockSyncStatus(t, msg, true, 0)
}(sub)
}

View File

@@ -489,14 +489,12 @@ func (evpool *Pool) batchExpiredPendingEvidence(batch dbm.Batch) (int64, time.Ti
func (evpool *Pool) removeEvidenceFromList(
blockEvidenceMap map[string]struct{}) {
el := evpool.evidenceList
var nextE *clist.CElement
for e := el.Front(); e != nil; e = nextE {
nextE = e.Next()
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
// Remove from clist
ev := e.Value.(types.Evidence)
if _, ok := blockEvidenceMap[evMapKey(ev)]; ok {
evpool.evidenceList.Remove(e)
e.DetachPrev()
}
}
}

View File

@@ -196,26 +196,21 @@ func (e *CElement) SetPrev(newPrev *CElement) {
e.mtx.Unlock()
}
// Detach is a shortcut to mark the given element remove and detach the prev/next elements.
func (e *CElement) Detach() {
func (e *CElement) SetRemoved() {
e.mtx.Lock()
defer e.mtx.Unlock()
e.removed = true
// This wakes up anyone waiting in either direction.
if e.prev == nil {
e.prevWg.Done()
close(e.prevWaitCh)
} else {
e.prev = nil
}
if e.next == nil {
e.nextWg.Done()
close(e.nextWaitCh)
} else {
e.next = nil
}
e.mtx.Unlock()
}
//--------------------------------------------------------------------------------
@@ -353,26 +348,24 @@ func (l *CList) PushBack(v interface{}) *CElement {
return e
}
// Remove removes the given element in the CList
// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks.
// NOTE: As per the contract of CList, removed elements cannot be added back.
// Because CList detachse the prev/next element when it removes the given element,
// please do not use CElement.Next() in the for loop postcondition, uses
// a variable ahead the for loop and then assigns the Next() element
// to it in the loop as the postcondition.
func (l *CList) Remove(e *CElement) interface{} {
l.mtx.Lock()
defer l.mtx.Unlock()
prev := e.Prev()
next := e.Next()
if l.head == nil || l.tail == nil {
l.mtx.Unlock()
panic("Remove(e) on empty CList")
}
if prev == nil && l.head != e {
l.mtx.Unlock()
panic("Remove(e) with false head")
}
if next == nil && l.tail != e {
l.mtx.Unlock()
panic("Remove(e) with false tail")
}
@@ -397,53 +390,13 @@ func (l *CList) Remove(e *CElement) interface{} {
next.SetPrev(prev)
}
e.Detach()
// Set .Done() on e, otherwise waiters will wait forever.
e.SetRemoved()
l.mtx.Unlock()
return e.Value
}
// Clear removes all the elements in the CList
func (l *CList) Clear() {
l.mtx.Lock()
defer l.mtx.Unlock()
if l.head == nil || l.tail == nil {
return
}
for e := l.head; e != nil; e = l.head {
prevE := e.Prev()
nextE := e.Next()
if prevE == nil && e != l.head {
panic("CList.Clear failed due to nil prev element")
}
if nextE == nil && e != l.tail {
panic("CList.Clear failed due to nil next element")
}
if l.len == 1 {
l.wg = waitGroup1()
l.waitCh = make(chan struct{})
}
l.len--
if prevE == nil {
l.head = nextE
} else {
prevE.SetNext(nextE)
}
if nextE == nil {
l.tail = prevE
} else {
nextE.SetPrev(prevE)
}
e.Detach()
}
}
func waitGroup1() (wg *sync.WaitGroup) {
wg = &sync.WaitGroup{}
wg.Add(1)

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPanicOnMaxLength(t *testing.T) {
@@ -252,16 +253,9 @@ func TestScanRightDeleteRandom(t *testing.T) {
// time.Sleep(time.Second * 1)
// And remove all the elements.
// we detach the prev/next element in CList.Remove, so just assign l.Front direcly.
halfElements := numElements / 2
for el := l.Front(); el != nil && halfElements > 0; el = l.Front() {
for el := l.Front(); el != nil; el = el.Next() {
l.Remove(el)
halfElements--
}
// remove the rest half elements in the CList
l.Clear()
if l.Len() != 0 {
t.Fatal("Failed to remove all elements from CList")
}
@@ -341,3 +335,50 @@ FOR_LOOP2:
t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
}
}
func TestRemoved(t *testing.T) {
l := New()
el1 := l.PushBack(1)
el2 := l.PushBack(2)
l.Remove(el1)
require.True(t, el1.Removed())
require.False(t, el2.Removed())
}
func TestNextWaitChan(t *testing.T) {
l := New()
el1 := l.PushBack(1)
t.Run("tail element should not have a closed nextWaitChan", func(t *testing.T) {
select {
case <-el1.NextWaitChan():
t.Fatal("nextWaitChan should not have been closed")
default:
}
})
el2 := l.PushBack(2)
t.Run("adding element should close tail nextWaitChan", func(t *testing.T) {
select {
case <-el1.NextWaitChan():
require.NotNil(t, el1.Next())
default:
t.Fatal("nextWaitChan should have been closed")
}
select {
case <-el2.NextWaitChan():
t.Fatal("nextWaitChan should not have been closed")
default:
}
})
t.Run("removing element should close its nextWaitChan", func(t *testing.T) {
l.Remove(el2)
select {
case <-el2.NextWaitChan():
require.Nil(t, el2.Next())
default:
t.Fatal("nextWaitChan should have been closed")
}
})
}

View File

@@ -0,0 +1,41 @@
package progressbar
import "fmt"
// the progressbar indicates the current status and progress would be desired.
// ref: https://www.pixelstech.net/article/1596946473-A-simple-example-on-implementing-progress-bar-in-GoLang
type Bar struct {
percent int64 // progress percentage
cur int64 // current progress
start int64 // the init starting value for progress
total int64 // total value for progress
rate string // the actual progress bar to be printed
graph string // the fill value for progress bar
}
func (bar *Bar) NewOption(start, total int64) {
bar.cur = start
bar.start = start
bar.total = total
bar.graph = "█"
bar.percent = bar.getPercent()
}
func (bar *Bar) getPercent() int64 {
return int64(float32(bar.cur-bar.start) / float32(bar.total-bar.start) * 100)
}
func (bar *Bar) Play(cur int64) {
bar.cur = cur
last := bar.percent
bar.percent = bar.getPercent()
if bar.percent != last && bar.percent%2 == 0 {
bar.rate += bar.graph
}
fmt.Printf("\r[%-50s]%3d%% %8d/%d", bar.rate, bar.percent, bar.cur, bar.total)
}
func (bar *Bar) Finish() {
fmt.Println()
}

View File

@@ -0,0 +1,41 @@
package progressbar
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestProgressBar(t *testing.T) {
zero := int64(0)
hundred := int64(100)
var bar Bar
bar.NewOption(zero, hundred)
require.Equal(t, zero, bar.start)
require.Equal(t, zero, bar.cur)
require.Equal(t, hundred, bar.total)
require.Equal(t, zero, bar.percent)
require.Equal(t, "█", bar.graph)
require.Equal(t, "", bar.rate)
defer bar.Finish()
for i := zero; i <= hundred; i++ {
time.Sleep(1 * time.Millisecond)
bar.Play(i)
}
require.Equal(t, zero, bar.start)
require.Equal(t, hundred, bar.cur)
require.Equal(t, hundred, bar.total)
require.Equal(t, hundred, bar.percent)
var rate string
for i := zero; i < hundred/2; i++ {
rate += "█"
}
require.Equal(t, rate, bar.rate)
}

View File

@@ -164,7 +164,10 @@ func (mem *CListMempool) Flush() {
_ = atomic.SwapInt64(&mem.txsBytes, 0)
mem.cache.Reset()
mem.txs.Clear()
for e := mem.txs.Front(); e != nil; e = e.Next() {
mem.txs.Remove(e)
e.DetachPrev()
}
mem.txsMap.Range(func(key, _ interface{}) bool {
mem.txsMap.Delete(key)
@@ -335,6 +338,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
// - resCbRecheck (lock not held) if tx was invalidated
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
mem.txs.Remove(elem)
elem.DetachPrev()
mem.txsMap.Delete(mempool.TxKey(tx))
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))

View File

@@ -74,6 +74,14 @@ type TxMempool struct {
// thread-safe priority queue.
priorityIndex *TxPriorityQueue
// heightIndex defines a height-based, in ascending order, transaction index.
// i.e. older transactions are first.
heightIndex *WrappedTxList
// timestampIndex defines a timestamp-based, in ascending order, transaction
// index. i.e. older transactions are first.
timestampIndex *WrappedTxList
// A read/write lock is used to safe guard updates, insertions and deletions
// from the mempool. A read-lock is implicitly acquired when executing CheckTx,
// however, a caller must explicitly grab a write-lock via Lock when updating
@@ -101,6 +109,12 @@ func NewTxMempool(
txStore: NewTxStore(),
gossipIndex: clist.New(),
priorityIndex: NewTxPriorityQueue(),
heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
return wtx1.height >= wtx2.height
}),
timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp)
}),
}
if cfg.CacheSize > 0 {
@@ -174,8 +188,8 @@ func (txmp *TxMempool) WaitForNextTx() <-chan struct{} {
// NextGossipTx returns the next valid transaction to gossip. A caller must wait
// for WaitForNextTx to signal a transaction is available to gossip first. It is
// thread-safe.
func (txmp *TxMempool) NextGossipTx() *WrappedTx {
return txmp.gossipIndex.Front().Value.(*WrappedTx)
func (txmp *TxMempool) NextGossipTx() *clist.CElement {
return txmp.gossipIndex.Front()
}
// EnableTxsAvailable enables the mempool to trigger events when transactions
@@ -279,6 +293,7 @@ func (txmp *TxMempool) CheckTx(
tx: tx,
hash: txHash,
timestamp: time.Now().UTC(),
height: txmp.height,
}
txmp.initTxCallback(wtx, res, txInfo)
@@ -300,12 +315,11 @@ func (txmp *TxMempool) Flush() {
txmp.mtx.RLock()
defer txmp.mtx.RUnlock()
txmp.heightIndex.Reset()
txmp.timestampIndex.Reset()
for _, wtx := range txmp.txStore.GetAllTxs() {
if !txmp.txStore.IsTxRemoved(wtx.hash) {
txmp.txStore.RemoveTx(wtx)
txmp.priorityIndex.RemoveTx(wtx)
txmp.gossipIndex.Remove(wtx.gossipEl)
}
txmp.removeTx(wtx, false)
}
atomic.SwapInt64(&txmp.sizeBytes, 0)
@@ -443,6 +457,8 @@ func (txmp *TxMempool) Update(
}
}
txmp.purgeExpiredTxs(blockHeight)
// If there any uncommitted transactions left in the mempool, we either
// initiate re-CheckTx per remaining transaction or notify that remaining
// transactions are left.
@@ -488,105 +504,110 @@ func (txmp *TxMempool) Update(
// - An explicit lock is NOT required.
func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) {
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
if ok {
var err error
if txmp.postCheck != nil {
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
if !ok {
return
}
var err error
if txmp.postCheck != nil {
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
}
if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK {
// ignore bad transactions
txmp.logger.Info(
"rejected bad transaction",
"priority", wtx.priority,
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"peer_id", txInfo.SenderNodeID,
"code", checkTxRes.CheckTx.Code,
"post_check_err", err,
)
txmp.metrics.FailedTxs.Add(1)
if !txmp.config.KeepInvalidTxsInCache {
txmp.cache.Remove(wtx.tx)
}
if err != nil {
checkTxRes.CheckTx.MempoolError = err.Error()
}
return
}
if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil {
sender := checkTxRes.CheckTx.Sender
priority := checkTxRes.CheckTx.Priority
sender := checkTxRes.CheckTx.Sender
priority := checkTxRes.CheckTx.Priority
if len(sender) > 0 {
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
txmp.logger.Error(
"rejected incoming good transaction; tx already exists for sender",
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"sender", sender,
)
txmp.metrics.RejectedTxs.Add(1)
return
}
}
if err := txmp.canAddTx(wtx); err != nil {
evictTxs := txmp.priorityIndex.GetEvictableTxs(
priority,
int64(wtx.Size()),
txmp.SizeBytes(),
txmp.config.MaxTxsBytes,
)
if len(evictTxs) == 0 {
// No room for the new incoming transaction so we just remove it from
// the cache.
txmp.cache.Remove(wtx.tx)
txmp.logger.Error(
"rejected incoming good transaction; mempool full",
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"err", err.Error(),
)
txmp.metrics.RejectedTxs.Add(1)
return
}
// evict an existing transaction(s)
//
// NOTE:
// - The transaction, toEvict, can be removed while a concurrent
// reCheckTx callback is being executed for the same transaction.
for _, toEvict := range evictTxs {
txmp.removeTx(toEvict, true)
txmp.logger.Debug(
"evicted existing good transaction; mempool full",
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
"old_priority", toEvict.priority,
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"new_priority", wtx.priority,
)
txmp.metrics.EvictedTxs.Add(1)
}
}
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
wtx.height = txmp.height
wtx.priority = priority
wtx.sender = sender
wtx.peers = map[uint16]struct{}{
txInfo.SenderID: {},
}
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
txmp.metrics.Size.Set(float64(txmp.Size()))
txmp.insertTx(wtx)
txmp.logger.Debug(
"inserted good transaction",
"priority", wtx.priority,
if len(sender) > 0 {
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
txmp.logger.Error(
"rejected incoming good transaction; tx already exists for sender",
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"height", txmp.height,
"num_txs", txmp.Size(),
"sender", sender,
)
txmp.notifyTxsAvailable()
} else {
// ignore bad transactions
txmp.logger.Info(
"rejected bad transaction",
"priority", wtx.priority,
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"peer_id", txInfo.SenderNodeID,
"code", checkTxRes.CheckTx.Code,
"post_check_err", err,
)
txmp.metrics.FailedTxs.Add(1)
if !txmp.config.KeepInvalidTxsInCache {
txmp.cache.Remove(wtx.tx)
}
txmp.metrics.RejectedTxs.Add(1)
return
}
}
if err := txmp.canAddTx(wtx); err != nil {
evictTxs := txmp.priorityIndex.GetEvictableTxs(
priority,
int64(wtx.Size()),
txmp.SizeBytes(),
txmp.config.MaxTxsBytes,
)
if len(evictTxs) == 0 {
// No room for the new incoming transaction so we just remove it from
// the cache.
txmp.cache.Remove(wtx.tx)
txmp.logger.Error(
"rejected incoming good transaction; mempool full",
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"err", err.Error(),
)
txmp.metrics.RejectedTxs.Add(1)
return
}
// evict an existing transaction(s)
//
// NOTE:
// - The transaction, toEvict, can be removed while a concurrent
// reCheckTx callback is being executed for the same transaction.
for _, toEvict := range evictTxs {
txmp.removeTx(toEvict, true)
txmp.logger.Debug(
"evicted existing good transaction; mempool full",
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
"old_priority", toEvict.priority,
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"new_priority", wtx.priority,
)
txmp.metrics.EvictedTxs.Add(1)
}
}
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
wtx.priority = priority
wtx.sender = sender
wtx.peers = map[uint16]struct{}{
txInfo.SenderID: {},
}
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
txmp.metrics.Size.Set(float64(txmp.Size()))
txmp.insertTx(wtx)
txmp.logger.Debug(
"inserted good transaction",
"priority", wtx.priority,
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"height", txmp.height,
"num_txs", txmp.Size(),
)
txmp.notifyTxsAvailable()
}
// defaultTxCallback performs the default CheckTx application callback. This is
@@ -721,6 +742,8 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error {
func (txmp *TxMempool) insertTx(wtx *WrappedTx) {
txmp.txStore.SetTx(wtx)
txmp.priorityIndex.PushTx(wtx)
txmp.heightIndex.Insert(wtx)
txmp.timestampIndex.Insert(wtx)
// Insert the transaction into the gossip index and mark the reference to the
// linked-list element, which will be needed at a later point when the
@@ -738,10 +761,13 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
txmp.txStore.RemoveTx(wtx)
txmp.priorityIndex.RemoveTx(wtx)
txmp.heightIndex.Remove(wtx)
txmp.timestampIndex.Remove(wtx)
// Remove the transaction from the gossip index and cleanup the linked-list
// element so it can be garbage collected.
txmp.gossipIndex.Remove(wtx.gossipEl)
wtx.gossipEl.DetachPrev()
atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size()))
@@ -750,6 +776,56 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
}
}
// purgeExpiredTxs removes all transactions that have exceeded their respective
// height and/or time based TTLs from their respective indexes. Every expired
// transaction will be removed from the mempool entirely, except for the cache.
//
// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which
// the caller has a write-lock on the mempool and so we can safely iterate over
// the height and time based indexes.
func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
now := time.Now()
expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx)
if txmp.config.TTLNumBlocks > 0 {
purgeIdx := -1
for i, wtx := range txmp.heightIndex.txs {
if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks {
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
purgeIdx = i
} else {
// since the index is sorted, we know no other txs can be be purged
break
}
}
if purgeIdx >= 0 {
txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:]
}
}
if txmp.config.TTLDuration > 0 {
purgeIdx := -1
for i, wtx := range txmp.timestampIndex.txs {
if now.Sub(wtx.timestamp) > txmp.config.TTLDuration {
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
purgeIdx = i
} else {
// since the index is sorted, we know no other txs can be be purged
break
}
}
if purgeIdx >= 0 {
txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:]
}
}
for _, wtx := range expiredTxs {
txmp.removeTx(wtx, false)
}
}
func (txmp *TxMempool) notifyTxsAvailable() {
if txmp.Size() == 0 {
panic("attempt to notify txs available but mempool is empty!")

View File

@@ -3,11 +3,13 @@ package v1
import (
"bytes"
"context"
"errors"
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
@@ -70,13 +72,13 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
}
}
func setup(t testing.TB, cacheSize int) *TxMempool {
func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
t.Helper()
app := &application{kvstore.NewApplication()}
cc := proxy.NewLocalClientCreator(app)
cfg := config.ResetTestRoot(t.Name())
cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
cfg.Mempool.CacheSize = cacheSize
appConnMem, err := cc.NewABCIClient()
@@ -88,7 +90,7 @@ func setup(t testing.TB, cacheSize int) *TxMempool {
require.NoError(t, appConnMem.Stop())
})
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0)
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
}
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
@@ -102,14 +104,10 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx
_, err := rng.Read(prefix)
require.NoError(t, err)
// sender := make([]byte, 10)
// _, err = rng.Read(sender)
// require.NoError(t, err)
priority := int64(rng.Intn(9999-1000) + 1000)
txs[i] = testTx{
tx: []byte(fmt.Sprintf("sender-%d=%X=%d", i, prefix, priority)),
tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
priority: priority,
}
require.NoError(t, txmp.CheckTx(context.Background(), txs[i].tx, nil, txInfo))
@@ -176,7 +174,7 @@ func TestTxMempool_Size(t *testing.T) {
txmp := setup(t, 0)
txs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
rawTxs := make([]types.Tx, len(txs))
for i, tx := range txs {
@@ -193,14 +191,14 @@ func TestTxMempool_Size(t *testing.T) {
txmp.Unlock()
require.Equal(t, len(rawTxs)/2, txmp.Size())
require.Equal(t, int64(2750), txmp.SizeBytes())
require.Equal(t, int64(2850), txmp.SizeBytes())
}
func TestTxMempool_Flush(t *testing.T) {
txmp := setup(t, 0)
txs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
rawTxs := make([]types.Tx, len(txs))
for i, tx := range txs {
@@ -225,7 +223,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
txmp := setup(t, 0)
tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
txMap := make(map[[mempool.TxKeySize]byte]testTx)
priorities := make([]int64, len(tTxs))
@@ -252,30 +250,30 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, 50)
// reap by transaction bytes only
reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Len(t, reapedTxs, 17)
require.Equal(t, int64(5690), txmp.SizeBytes())
require.GreaterOrEqual(t, len(reapedTxs), 16)
// Reap by both transaction bytes and gas, where the size yields 31 reaped
// transactions and the gas limit reaps 26 transactions.
// transactions and the gas limit reaps 25 transactions.
reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Len(t, reapedTxs, 26)
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, 25)
}
func TestTxMempool_ReapMaxTxs(t *testing.T) {
txmp := setup(t, 0)
tTxs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
txMap := make(map[[mempool.TxKeySize]byte]testTx)
priorities := make([]int64, len(tTxs))
@@ -302,21 +300,21 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
reapedTxs := txmp.ReapMaxTxs(-1)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, len(tTxs))
// reap a single transaction
reapedTxs = txmp.ReapMaxTxs(1)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, 1)
// reap half of the transactions
reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5490), txmp.SizeBytes())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, len(tTxs)/2)
}
@@ -324,11 +322,17 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
txmp := setup(t, 0)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxsBytes+1)
tx := make([]byte, txmp.config.MaxTxBytes+1)
_, err := rng.Read(tx)
require.NoError(t, err)
require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
tx = make([]byte, txmp.config.MaxTxBytes-1)
_, err = rng.Read(tx)
require.NoError(t, err)
require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
}
func TestTxMempool_CheckTxSamePeer(t *testing.T) {
@@ -431,3 +435,93 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
require.Zero(t, txmp.Size())
require.Zero(t, txmp.SizeBytes())
}
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
txmp := setup(t, 500)
txmp.height = 100
txmp.config.TTLNumBlocks = 10
tTxs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, 100, txmp.heightIndex.Size())
// reap 5 txs at the next height -- no txs should expire
reapedTxs := txmp.ReapMaxTxs(5)
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
txmp.Lock()
require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
txmp.Unlock()
require.Equal(t, 95, txmp.Size())
require.Equal(t, 95, txmp.heightIndex.Size())
// check more txs at height 101
_ = checkTxs(t, txmp, 50, 1)
require.Equal(t, 145, txmp.Size())
require.Equal(t, 145, txmp.heightIndex.Size())
// Reap 5 txs at a height that would expire all the transactions from before
// the previous Update (height 100).
//
// NOTE: When we reap txs below, we do not know if we're picking txs from the
// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
// cannot guarantee that all 95 txs are remaining that should be expired and
// removed. However, we do know that that at most 95 txs can be expired and
// removed.
reapedTxs = txmp.ReapMaxTxs(5)
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
txmp.Lock()
require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
txmp.Unlock()
require.GreaterOrEqual(t, txmp.Size(), 45)
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
}
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
cases := []struct {
name string
err error
}{
{
name: "error",
err: errors.New("test error"),
},
{
name: "no error",
err: nil,
},
}
for _, tc := range cases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
return testCase.err
}
txmp := setup(t, 0, WithPostCheck(postCheckFn))
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes-1)
_, err := rng.Read(tx)
require.NoError(t, err)
callback := func(res *abci.Response) {
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
require.True(t, ok)
expectedErrString := ""
if testCase.err != nil {
expectedErrString = testCase.err.Error()
}
require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
}
require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0}))
})
}
}

View File

@@ -9,6 +9,7 @@ import (
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p"
@@ -306,7 +307,7 @@ func (r *Reactor) processPeerUpdates() {
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
peerMempoolID := r.ids.GetForPeer(peerID)
var memTx *WrappedTx
var nextGossipTx *clist.CElement
// remove the peer ID from the map of routines and mark the waitgroup as done
defer func() {
@@ -333,10 +334,10 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
// This happens because the CElement we were looking at got garbage
// collected (removed). That is, .NextWait() returned nil. Go ahead and
// start from the beginning.
if memTx == nil {
if nextGossipTx == nil {
select {
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
if memTx = r.mempool.NextGossipTx(); memTx == nil {
if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil {
continue
}
@@ -352,6 +353,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
}
}
memTx := nextGossipTx.Value.(*WrappedTx)
if r.peerMgr != nil {
height := r.peerMgr.GetHeight(peerID)
if height > 0 && height < memTx.height-1 {
@@ -380,16 +383,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
}
select {
case <-memTx.gossipEl.NextWaitChan():
// If there is a next element in gossip index, we point memTx to that node's
// value, otherwise we reset memTx to nil which will be checked at the
// parent for loop.
next := memTx.gossipEl.Next()
if next != nil {
memTx = next.Value.(*WrappedTx)
} else {
memTx = nil
}
case <-nextGossipTx.NextWaitChan():
nextGossipTx = nextGossipTx.Next()
case <-closer.Done():
// The peer is marked for removal via a PeerUpdate as the doneCh was

View File

@@ -1,6 +1,7 @@
package v1
import (
"sort"
"time"
"github.com/tendermint/tendermint/internal/libs/clist"
@@ -198,3 +199,84 @@ func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID ui
wtx.peers[peerID] = struct{}{}
return wtx, false
}
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
// used to build generic transaction indexes in the mempool. It accepts a
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
// references which is used during Insert in order to determine sorted order. If
// less returns true, a <= b.
type WrappedTxList struct {
mtx tmsync.RWMutex
txs []*WrappedTx
less func(*WrappedTx, *WrappedTx) bool
}
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
return &WrappedTxList{
txs: make([]*WrappedTx, 0),
less: less,
}
}
// Size returns the number of WrappedTx objects in the list.
func (wtl *WrappedTxList) Size() int {
wtl.mtx.RLock()
defer wtl.mtx.RUnlock()
return len(wtl.txs)
}
// Reset resets the list of transactions to an empty list.
func (wtl *WrappedTxList) Reset() {
wtl.mtx.Lock()
defer wtl.mtx.Unlock()
wtl.txs = make([]*WrappedTx, 0)
}
// Insert inserts a WrappedTx reference into the sorted list based on the list's
// comparator function.
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
wtl.mtx.Lock()
defer wtl.mtx.Unlock()
i := sort.Search(len(wtl.txs), func(i int) bool {
return wtl.less(wtl.txs[i], wtx)
})
if i == len(wtl.txs) {
// insert at the end
wtl.txs = append(wtl.txs, wtx)
return
}
// Make space for the inserted element by shifting values at the insertion
// index up one index.
//
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
wtl.txs[i] = wtx
}
// Remove attempts to remove a WrappedTx from the sorted list.
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
wtl.mtx.Lock()
defer wtl.mtx.Unlock()
i := sort.Search(len(wtl.txs), func(i int) bool {
return wtl.less(wtl.txs[i], wtx)
})
// Since the list is sorted, we evaluate all elements starting at i. Note, if
// the element does not exist, we may potentially evaluate the entire remainder
// of the list. However, a caller should not be expected to call Remove with a
// non-existing element.
for i < len(wtl.txs) {
if wtl.txs[i] == wtx {
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
return
}
i++
}
}

View File

@@ -2,6 +2,8 @@ package v1
import (
"fmt"
"math/rand"
"sort"
"testing"
"time"
@@ -132,3 +134,97 @@ func TestTxStore_Size(t *testing.T) {
require.Equal(t, numTxs, txStore.Size())
}
func TestWrappedTxList_Reset(t *testing.T) {
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
return wtx1.height >= wtx2.height
})
require.Zero(t, list.Size())
for i := 0; i < 100; i++ {
list.Insert(&WrappedTx{height: int64(i)})
}
require.Equal(t, 100, list.Size())
list.Reset()
require.Zero(t, list.Size())
}
func TestWrappedTxList_Insert(t *testing.T) {
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
return wtx1.height >= wtx2.height
})
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
var expected []int
for i := 0; i < 100; i++ {
height := rng.Int63n(10000)
expected = append(expected, int(height))
list.Insert(&WrappedTx{height: height})
if i%10 == 0 {
list.Insert(&WrappedTx{height: height})
expected = append(expected, int(height))
}
}
got := make([]int, list.Size())
for i, wtx := range list.txs {
got[i] = int(wtx.height)
}
sort.Ints(expected)
require.Equal(t, expected, got)
}
func TestWrappedTxList_Remove(t *testing.T) {
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
return wtx1.height >= wtx2.height
})
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
var txs []*WrappedTx
for i := 0; i < 100; i++ {
height := rng.Int63n(10000)
tx := &WrappedTx{height: height}
txs = append(txs, tx)
list.Insert(tx)
if i%10 == 0 {
tx = &WrappedTx{height: height}
list.Insert(tx)
txs = append(txs, tx)
}
}
// remove a tx that does not exist
list.Remove(&WrappedTx{height: 20000})
// remove a tx that exists (by height) but not referenced
list.Remove(&WrappedTx{height: txs[0].height})
// remove a few existing txs
for i := 0; i < 25; i++ {
j := rng.Intn(len(txs))
list.Remove(txs[j])
txs = append(txs[:j], txs[j+1:]...)
}
expected := make([]int, len(txs))
for i, tx := range txs {
expected[i] = int(tx.height)
}
got := make([]int, list.Size())
for i, wtx := range list.txs {
got[i] = int(wtx.height)
}
sort.Ints(expected)
require.Equal(t, expected, got)
}

View File

@@ -421,7 +421,6 @@ func (c *MConnection) CanSend(chID byte) bool {
// sendRoutine polls for packets to send from channels.
func (c *MConnection) sendRoutine() {
defer c._recover()
protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
FOR_LOOP:

View File

@@ -1,4 +1,4 @@
// Code generated by mockery 2.7.4. DO NOT EDIT.
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks

View File

@@ -1,4 +1,4 @@
// Code generated by mockery 2.7.4. DO NOT EDIT.
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks

View File

@@ -1,4 +1,4 @@
// Code generated by mockery 2.7.4. DO NOT EDIT.
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks

View File

@@ -257,7 +257,11 @@ func (s *pqScheduler) process() {
s.metrics.PeerSendBytesTotal.With(
"chID", chIDStr,
"peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size))
s.dequeueCh <- pqEnv.envelope
select {
case s.dequeueCh <- pqEnv.envelope:
case <-s.closer.Done():
return
}
}
case <-s.closer.Done():

View File

@@ -0,0 +1,39 @@
package p2p
import (
"testing"
"time"
"github.com/tendermint/tendermint/libs/log"
)
func TestCloseWhileDequeueFull(t *testing.T) {
enqueueLength := 5
chDescs := []ChannelDescriptor{
{ID: 0x01, Priority: 1, MaxSendBytes: 4},
}
pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120)
for i := 0; i < enqueueLength; i++ {
pqueue.enqueue() <- Envelope{
channelID: 0x01,
Message: &testMessage{Value: "foo"}, // 5 bytes
}
}
go pqueue.process()
// sleep to allow context switch for process() to run
time.Sleep(10 * time.Millisecond)
doneCh := make(chan struct{})
go func() {
pqueue.close()
close(doneCh)
}()
select {
case <-doneCh:
case <-time.After(2 * time.Second):
t.Fatal("pqueue failed to close")
}
}

View File

@@ -1023,6 +1023,15 @@ func (r *Router) NodeInfo() types.NodeInfo {
// OnStart implements service.Service.
func (r *Router) OnStart() error {
netAddr, _ := r.nodeInfo.NetAddress()
r.Logger.Info(
"starting router",
"node_id", r.nodeInfo.NodeID,
"channels", r.nodeInfo.Channels,
"listen_addr", r.nodeInfo.ListenAddr,
"net_addr", netAddr,
)
go r.dialPeers()
go r.evictPeers()

View File

@@ -151,9 +151,6 @@ func TestMConnTransport_Listen(t *testing.T) {
[]*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}},
p2p.MConnTransportOptions{},
)
t.Cleanup(func() {
_ = transport.Close()
})
// Transport should not listen on any endpoints yet.
require.Empty(t, transport.Endpoints())
@@ -166,19 +163,6 @@ func TestMConnTransport_Listen(t *testing.T) {
}
require.NoError(t, err)
// Start a goroutine to just accept any connections.
go func() {
for {
conn, err := transport.Accept()
if err != nil {
return
}
defer func() {
_ = conn.Close()
}()
}
}()
// Check the endpoint.
endpoints := transport.Endpoints()
require.Len(t, endpoints, 1)
@@ -195,14 +179,40 @@ func TestMConnTransport_Listen(t *testing.T) {
require.NotZero(t, endpoint.Port)
require.Empty(t, endpoint.Path)
// Dialing the endpoint should work.
conn, err := transport.Dial(ctx, endpoint)
dialedChan := make(chan struct{})
var peerConn p2p.Connection
go func() {
// Dialing the endpoint should work.
var err error
peerConn, err = transport.Dial(ctx, endpoint)
require.NoError(t, err)
close(dialedChan)
}()
conn, err := transport.Accept()
require.NoError(t, err)
require.NoError(t, conn.Close())
_ = conn.Close()
<-dialedChan
time.Sleep(time.Minute)
// closing the connection should not error
require.NoError(t, peerConn.Close())
// try to read from the connection should error
_, _, err = peerConn.ReceiveMessage()
require.Error(t, err)
// Trying to listen again should error.
err = transport.Listen(tc.endpoint)
require.Error(t, err)
// close the transport
_ = transport.Close()
// Dialing the closed endpoint should error
_, err = transport.Dial(ctx, endpoint)
require.Error(t, err)
})
}
}

View File

@@ -403,6 +403,9 @@ func TestConnection_SendReceive(t *testing.T) {
_, _, err = ab.ReceiveMessage()
require.Error(t, err)
require.Equal(t, io.EOF, err)
_, err = ab.TrySendMessage(chID, []byte("closed try"))
require.Error(t, err)
require.Equal(t, io.EOF, err)
_, err = ab.SendMessage(chID, []byte("closed"))
require.Error(t, err)
require.Equal(t, io.EOF, err)
@@ -410,6 +413,9 @@ func TestConnection_SendReceive(t *testing.T) {
_, _, err = ba.ReceiveMessage()
require.Error(t, err)
require.Equal(t, io.EOF, err)
_, err = ba.TrySendMessage(chID, []byte("closed try"))
require.Error(t, err)
require.Equal(t, io.EOF, err)
_, err = ba.SendMessage(chID, []byte("closed"))
require.Error(t, err)
require.Equal(t, io.EOF, err)

View File

@@ -23,9 +23,10 @@ type blockQueue struct {
verifyHeight int64
// termination conditions
stopHeight int64
stopTime time.Time
terminal *types.LightBlock
initialHeight int64
stopHeight int64
stopTime time.Time
terminal *types.LightBlock
// track failed heights so we know what blocks to try fetch again
failed *maxIntHeap
@@ -45,21 +46,22 @@ type blockQueue struct {
}
func newBlockQueue(
startHeight, stopHeight int64,
startHeight, stopHeight, initialHeight int64,
stopTime time.Time,
maxRetries int,
) *blockQueue {
return &blockQueue{
stopHeight: stopHeight,
stopTime: stopTime,
fetchHeight: startHeight,
verifyHeight: startHeight,
pending: make(map[int64]lightBlockResponse),
failed: &maxIntHeap{},
retries: 0,
maxRetries: maxRetries,
waiters: make([]chan int64, 0),
doneCh: make(chan struct{}),
stopHeight: stopHeight,
initialHeight: initialHeight,
stopTime: stopTime,
fetchHeight: startHeight,
verifyHeight: startHeight,
pending: make(map[int64]lightBlockResponse),
failed: &maxIntHeap{},
retries: 0,
maxRetries: maxRetries,
waiters: make([]chan int64, 0),
doneCh: make(chan struct{}),
}
}
@@ -93,9 +95,10 @@ func (q *blockQueue) add(l lightBlockResponse) {
q.pending[l.block.Height] = l
}
// Lastly, if the incoming block is past the stop time and stop height then
// we mark it as the terminal block
if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) {
// Lastly, if the incoming block is past the stop time and stop height or
// is equal to the initial height then we mark it as the terminal block.
if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) ||
l.block.Height == q.initialHeight {
q.terminal = l.block
}
}
@@ -115,7 +118,7 @@ func (q *blockQueue) nextHeight() <-chan int64 {
return ch
}
if q.terminal == nil {
if q.terminal == nil && q.fetchHeight >= q.initialHeight {
// return and decrement the fetch height
ch <- q.fetchHeight
q.fetchHeight--

View File

@@ -25,7 +25,7 @@ func TestBlockQueueBasic(t *testing.T) {
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
wg := &sync.WaitGroup{}
// asynchronously fetch blocks and add it to the queue
@@ -72,7 +72,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 200)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 200)
wg := &sync.WaitGroup{}
failureRate := 4
@@ -121,7 +121,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
func TestBlockQueueBlocks(t *testing.T) {
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 2)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2)
expectedHeight := startHeight
retryHeight := stopHeight + 2
@@ -168,7 +168,7 @@ loop:
func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) {
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
defer queue.close()
loop:
@@ -194,7 +194,7 @@ func TestBlockQueueStopTime(t *testing.T) {
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
wg := &sync.WaitGroup{}
baseTime := stopTime.Add(-50 * time.Second)
@@ -233,6 +233,46 @@ func TestBlockQueueStopTime(t *testing.T) {
}
}
func TestBlockQueueInitialHeight(t *testing.T) {
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
const initialHeight int64 = 120
queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, 1)
wg := &sync.WaitGroup{}
// asynchronously fetch blocks and add it to the queue
for i := 0; i <= numWorkers; i++ {
wg.Add(1)
go func() {
for {
select {
case height := <-queue.nextHeight():
require.GreaterOrEqual(t, height, initialHeight)
queue.add(mockLBResp(t, peerID, height, endTime))
case <-queue.done():
wg.Done()
return
}
}
}()
}
loop:
for {
select {
case <-queue.done():
wg.Wait()
require.NoError(t, queue.error())
break loop
case resp := <-queue.verifyNext():
require.GreaterOrEqual(t, resp.block.Height, initialHeight)
queue.success(resp.block.Height)
}
}
}
func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse {
return lightBlockResponse{
block: mockLB(t, height, time, factory.MakeBlockID()),

View File

@@ -49,13 +49,12 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat
// in a list, tracks the call and waits for the reactor to pass along the response
func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) {
d.mtx.Lock()
outgoingCalls := len(d.calls)
d.mtx.Unlock()
// check to see that the dispatcher is connected to at least one peer
if d.availablePeers.Len() == 0 && outgoingCalls == 0 {
if d.availablePeers.Len() == 0 && len(d.calls) == 0 {
d.mtx.Unlock()
return nil, "", errNoConnectedPeers
}
d.mtx.Unlock()
// fetch the next peer id in the list and request a light block from that
// peer

View File

@@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -17,6 +18,7 @@ import (
)
func TestDispatcherBasic(t *testing.T) {
t.Cleanup(leaktest.Check(t))
ch := make(chan p2p.Envelope, 100)
closeCh := make(chan struct{})
@@ -49,7 +51,83 @@ func TestDispatcherBasic(t *testing.T) {
wg.Wait()
}
func TestDispatcherReturnsNoBlock(t *testing.T) {
t.Cleanup(leaktest.Check(t))
ch := make(chan p2p.Envelope, 100)
d := newDispatcher(ch, 1*time.Second)
peerFromSet := createPeerSet(1)[0]
d.addPeer(peerFromSet)
doneCh := make(chan struct{})
go func() {
<-ch
require.NoError(t, d.respond(nil, peerFromSet))
close(doneCh)
}()
lb, peerResult, err := d.LightBlock(context.Background(), 1)
<-doneCh
require.Nil(t, lb)
require.Nil(t, err)
require.Equal(t, peerFromSet, peerResult)
}
func TestDispatcherErrorsWhenNoPeers(t *testing.T) {
t.Cleanup(leaktest.Check(t))
ch := make(chan p2p.Envelope, 100)
d := newDispatcher(ch, 1*time.Second)
lb, peerResult, err := d.LightBlock(context.Background(), 1)
require.Nil(t, lb)
require.Empty(t, peerResult)
require.Equal(t, errNoConnectedPeers, err)
}
func TestDispatcherReturnsBlockOncePeerAvailable(t *testing.T) {
t.Cleanup(leaktest.Check(t))
dispatcherRequestCh := make(chan p2p.Envelope, 100)
d := newDispatcher(dispatcherRequestCh, 1*time.Second)
peerFromSet := createPeerSet(1)[0]
d.addPeer(peerFromSet)
ctx := context.Background()
wrapped, cancelFunc := context.WithCancel(ctx)
doneCh := make(chan struct{})
go func() {
lb, peerResult, err := d.LightBlock(wrapped, 1)
require.Nil(t, lb)
require.Equal(t, peerFromSet, peerResult)
require.Nil(t, err)
// calls to dispatcher.Lightblock write into the dispatcher's requestCh.
// we read from the requestCh here to unblock the requestCh for future
// calls.
<-dispatcherRequestCh
close(doneCh)
}()
cancelFunc()
<-doneCh
go func() {
<-dispatcherRequestCh
lb := &types.LightBlock{}
asProto, err := lb.ToProto()
require.Nil(t, err)
err = d.respond(asProto, peerFromSet)
require.Nil(t, err)
}()
lb, peerResult, err := d.LightBlock(context.Background(), 1)
require.NotNil(t, lb)
require.Equal(t, peerFromSet, peerResult)
require.Nil(t, err)
}
func TestDispatcherProviders(t *testing.T) {
t.Cleanup(leaktest.Check(t))
ch := make(chan p2p.Envelope, 100)
chainID := "state-sync-test"
@@ -78,6 +156,7 @@ func TestDispatcherProviders(t *testing.T) {
}
func TestPeerListBasic(t *testing.T) {
t.Cleanup(leaktest.Check(t))
peerList := newPeerList()
assert.Zero(t, peerList.Len())
numPeers := 10
@@ -108,7 +187,52 @@ func TestPeerListBasic(t *testing.T) {
}
func TestPeerListBlocksWhenEmpty(t *testing.T) {
t.Cleanup(leaktest.Check(t))
peerList := newPeerList()
require.Zero(t, peerList.Len())
doneCh := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
peerList.Pop(ctx)
close(doneCh)
}()
select {
case <-doneCh:
t.Error("empty peer list should not have returned result")
case <-time.After(100 * time.Millisecond):
}
}
func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) {
t.Cleanup(leaktest.Check(t))
peerList := newPeerList()
require.Zero(t, peerList.Len())
doneCh := make(chan struct{})
ctx := context.Background()
wrapped, cancel := context.WithCancel(ctx)
go func() {
peerList.Pop(wrapped)
close(doneCh)
}()
select {
case <-doneCh:
t.Error("empty peer list should not have returned result")
case <-time.After(100 * time.Millisecond):
}
cancel()
select {
case <-doneCh:
case <-time.After(100 * time.Millisecond):
t.Error("peer list should have returned after context canceled")
}
}
func TestPeerListConcurrent(t *testing.T) {
t.Cleanup(leaktest.Check(t))
peerList := newPeerList()
numPeers := 10

View File

@@ -0,0 +1,50 @@
package statesync
import (
"context"
"time"
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/state"
)
// MockSyncReactor is an autogenerated mock type for the SyncReactor type.
// Because of the stateprovider uses in Sync(), we use package statesync instead of mocks.
type MockSyncReactor struct {
mock.Mock
}
// Backfill provides a mock function with given fields: _a0
func (_m *MockSyncReactor) Backfill(_a0 state.State) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(state.State) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// Sync provides a mock function with given fields: _a0, _a1, _a2
func (_m *MockSyncReactor) Sync(_a0 context.Context, _a1 StateProvider, _a2 time.Duration) (state.State, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 state.State
if rf, ok := ret.Get(0).(func(context.Context, StateProvider, time.Duration) state.State); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Get(0).(state.State)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, StateProvider, time.Duration) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

View File

@@ -95,13 +95,19 @@ const (
// lightBlockResponseTimeout is how long the dispatcher waits for a peer to
// return a light block
lightBlockResponseTimeout = 10 * time.Second
lightBlockResponseTimeout = 30 * time.Second
// maxLightBlockRequestRetries is the amount of retries acceptable before
// the backfill process aborts
maxLightBlockRequestRetries = 20
)
// SyncReactor defines an interface used for testing abilities of node.startStateSync.
type SyncReactor interface {
Sync(context.Context, StateProvider, time.Duration) (sm.State, error)
Backfill(sm.State) error
}
// Reactor handles state sync, both restoring snapshots for the local node and
// serving snapshots for other nodes.
type Reactor struct {
@@ -222,6 +228,11 @@ func (r *Reactor) Sync(
return sm.State{}, errors.New("a state sync is already in progress")
}
if stateProvider == nil {
r.mtx.Unlock()
return sm.State{}, errors.New("the stateProvider should not be nil when doing the state sync")
}
r.syncer = newSyncer(
r.cfg,
r.Logger,
@@ -281,7 +292,9 @@ func (r *Reactor) Backfill(state sm.State) error {
return r.backfill(
context.Background(),
state.ChainID,
state.LastBlockHeight, stopHeight,
state.LastBlockHeight,
stopHeight,
state.InitialHeight,
state.LastBlockID,
stopTime,
)
@@ -290,7 +303,7 @@ func (r *Reactor) Backfill(state sm.State) error {
func (r *Reactor) backfill(
ctx context.Context,
chainID string,
startHeight, stopHeight int64,
startHeight, stopHeight, initialHeight int64,
trustedBlockID types.BlockID,
stopTime time.Time,
) error {
@@ -303,7 +316,7 @@ func (r *Reactor) backfill(
lastChangeHeight int64 = startHeight
)
queue := newBlockQueue(startHeight, stopHeight, stopTime, maxLightBlockRequestRetries)
queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, maxLightBlockRequestRetries)
// fetch light blocks across four workers. The aim with deploying concurrent
// workers is to equate the network messaging time with the verification
@@ -458,10 +471,11 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error {
}
for _, snapshot := range snapshots {
logger.Debug(
logger.Info(
"advertising snapshot",
"height", snapshot.Height,
"format", snapshot.Format,
"peer", envelope.From,
)
r.snapshotCh.Out <- p2p.Envelope{
To: envelope.From,

View File

@@ -463,6 +463,7 @@ func TestReactor_Backfill(t *testing.T) {
factory.DefaultTestChainID,
startHeight,
stopHeight,
1,
factory.MakeBlockIDWithHash(chain[startHeight].Header.Hash()),
stopTime,
)

View File

@@ -80,12 +80,12 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool {
// snapshot height is verified using the light client, and the expected app hash
// is set for the snapshot.
func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel()
appHash, err := p.stateProvider.AppHash(ctx, snapshot.Height)
if err != nil {
return false, err
return false, fmt.Errorf("failed to get app hash: %w", err)
}
snapshot.trustedAppHash = appHash
key := snapshot.Key()

View File

@@ -94,7 +94,7 @@ func NewLightClientStateProviderFromDispatcher(
trustOptions light.TrustOptions,
logger log.Logger,
) (StateProvider, error) {
providers := dispatcher.Providers(chainID, 10*time.Second)
providers := dispatcher.Providers(chainID, 30*time.Second)
if len(providers) < 2 {
return nil, fmt.Errorf("at least 2 peers are required, got %d", len(providers))
}

View File

@@ -277,7 +277,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu
go s.fetchChunks(fetchCtx, snapshot, chunks)
}
pctx, pcancel := context.WithTimeout(ctx, 10*time.Second)
pctx, pcancel := context.WithTimeout(ctx, 30*time.Second)
defer pcancel()
// Optimistically build new state, so we don't discover any light client failures at the end.

View File

@@ -33,11 +33,8 @@ func GetFreePort() (int, error) {
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}

View File

@@ -478,7 +478,7 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now
}
if !bytes.Equal(l.Hash(), newHeader.Hash()) {
return fmt.Errorf("light block header %X does not match newHeader %X", l.Hash(), newHeader.Hash())
return fmt.Errorf("header from primary %X does not match newHeader %X", l.Hash(), newHeader.Hash())
}
return c.verifyLightBlock(ctx, l, now)
@@ -586,7 +586,7 @@ func (c *Client) verifySequential(
}
// If some intermediate header is invalid, remove the primary and try again.
c.logger.Error("primary sent invalid header -> removing", "err", err, "primary", c.primary)
c.logger.Info("primary sent invalid header -> removing", "err", err, "primary", c.primary)
replacementBlock, removeErr := c.findNewPrimary(ctx, newLightBlock.Height, true)
if removeErr != nil {
@@ -630,6 +630,10 @@ func (c *Client) verifySequential(
// requested from source is kept such that when a verification is made, and the
// light client tries again to verify the new light block in the middle, the light
// client does not need to ask for all the same light blocks again.
//
// If this function errors, it should always wrap it in a `ErrVerifcationFailed`
// struct so that the calling function can determine where it failed and handle
// it accordingly.
func (c *Client) verifySkipping(
ctx context.Context,
source provider.Provider,
@@ -690,20 +694,10 @@ func (c *Client) verifySkipping(
// schedule what the next height we need to fetch is
pivotHeight := c.schedule(verifiedBlock.Height, blockCache[depth].Height)
interimBlock, providerErr := source.LightBlock(ctx, pivotHeight)
switch providerErr {
case nil:
blockCache = append(blockCache, interimBlock)
// if the error is benign, the client does not need to replace the primary
case provider.ErrLightBlockNotFound, provider.ErrNoResponse, provider.ErrHeightTooHigh:
return nil, err
// all other errors such as ErrBadLightBlock or ErrUnreliableProvider are seen as malevolent and the
// provider is removed
default:
if providerErr != nil {
return nil, ErrVerificationFailed{From: verifiedBlock.Height, To: pivotHeight, Reason: providerErr}
}
blockCache = append(blockCache, interimBlock)
}
depth++
@@ -729,33 +723,8 @@ func (c *Client) verifySkippingAgainstPrimary(
now time.Time) error {
trace, err := c.verifySkipping(ctx, c.primary, trustedBlock, newLightBlock, now)
switch errors.Unwrap(err).(type) {
case ErrInvalidHeader:
// If the target header is invalid, return immediately.
invalidHeaderHeight := err.(ErrVerificationFailed).To
if invalidHeaderHeight == newLightBlock.Height {
c.logger.Debug("target header is invalid", "err", err)
return err
}
// If some intermediate header is invalid, remove the primary and try again.
c.logger.Error("primary sent invalid header -> replacing", "err", err, "primary", c.primary)
replacementBlock, removeErr := c.findNewPrimary(ctx, newLightBlock.Height, true)
if removeErr != nil {
c.logger.Error("failed to replace primary. Returning original error", "err", removeErr)
return err
}
if !bytes.Equal(replacementBlock.Hash(), newLightBlock.Hash()) {
c.logger.Debug("replaced primary but new primary has a different block to the initial one. Returning original error")
return err
}
// attempt to verify the header again
return c.verifySkippingAgainstPrimary(ctx, trustedBlock, replacementBlock, now)
case nil:
// Compare header with the witnesses to ensure it's not a fork.
if err == nil {
// Success! Now compare the header with the witnesses to ensure it's not a fork.
// More witnesses we have, more chance to notice one.
//
// CORRECTNESS ASSUMPTION: there's at least 1 correct full node
@@ -763,11 +732,62 @@ func (c *Client) verifySkippingAgainstPrimary(
if cmpErr := c.detectDivergence(ctx, trace, now); cmpErr != nil {
return cmpErr
}
default:
}
var e = &ErrVerificationFailed{}
// all errors from verify skipping should be `ErrVerificationFailed`
// if it's not we just return the error directly
if !errors.As(err, e) {
return err
}
return nil
replace := true
switch e.Reason.(type) {
// Verification returned an invalid header
case ErrInvalidHeader:
// If it was the target header, return immediately.
if e.To == newLightBlock.Height {
c.logger.Debug("target header is invalid", "err", err)
return err
}
// If some intermediate header is invalid, remove the primary and try
// again.
// An intermediate header expired. We can no longer validate it as there is
// no longer the ability to punish invalid blocks as evidence of misbehavior
case ErrOldHeaderExpired:
return err
// This happens if there was a problem in finding the next block or a
// context was canceled.
default:
if errors.Is(e.Reason, context.Canceled) || errors.Is(e.Reason, context.DeadlineExceeded) {
return e.Reason
}
if !c.providerShouldBeRemoved(e.Reason) {
replace = false
}
}
// if we've reached here we're attempting to retry verification with a
// different provider
c.logger.Info("primary returned error", "err", e, "primary", c.primary, "replace", replace)
replacementBlock, removeErr := c.findNewPrimary(ctx, newLightBlock.Height, replace)
if removeErr != nil {
c.logger.Error("failed to replace primary. Returning original error", "err", removeErr)
return e.Reason
}
if !bytes.Equal(replacementBlock.Hash(), newLightBlock.Hash()) {
c.logger.Debug("replaced primary but new primary has a different block to the initial one. Returning original error")
return e.Reason
}
// attempt to verify the header again from the trusted block
return c.verifySkippingAgainstPrimary(ctx, trustedBlock, replacementBlock, now)
}
// LastTrustedHeight returns a last trusted height. -1 and nil are returned if
@@ -811,6 +831,15 @@ func (c *Client) Witnesses() []provider.Provider {
return c.witnesses
}
// AddProvider adds a providers to the light clients set
//
// NOTE: The light client does not check for uniqueness
func (c *Client) AddProvider(p provider.Provider) {
c.providerMutex.Lock()
defer c.providerMutex.Unlock()
c.witnesses = append(c.witnesses, p)
}
// Cleanup removes all the data (headers and validator sets) stored. Note: the
// client must be stopped at this point.
func (c *Client) Cleanup() error {
@@ -865,7 +894,7 @@ func (c *Client) backwards(
"newHash", interimHeader.Hash())
if err := VerifyBackwards(interimHeader, verifiedHeader); err != nil {
// verification has failed
c.logger.Error("backwards verification failed, replacing primary...", "err", err, "primary", c.primary)
c.logger.Info("backwards verification failed, replacing primary...", "err", err, "primary", c.primary)
// the client tries to see if it can get a witness to continue with the request
newPrimarysBlock, replaceErr := c.findNewPrimary(ctx, newHeader.Height, true)
@@ -915,14 +944,14 @@ func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*type
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
// we find a new witness to replace the primary
c.logger.Debug("error from light block request from primary, replacing...",
c.logger.Info("error from light block request from primary, replacing...",
"error", err, "height", height, "primary", c.primary)
return c.findNewPrimary(ctx, height, false)
default:
// The light client has most likely received either provider.ErrUnreliableProvider or provider.ErrBadLightBlock
// These errors mean that the light client should drop the primary and try with another provider instead
c.logger.Error("error from light block request from primary, removing...",
c.logger.Info("error from light block request from primary, removing...",
"error", err, "height", height, "primary", c.primary)
return c.findNewPrimary(ctx, height, true)
}
@@ -1022,7 +1051,7 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool)
// process benign errors by logging them only
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, provider.ErrHeightTooHigh:
lastError = response.err
c.logger.Debug("error on light block request from witness",
c.logger.Info("error on light block request from witness",
"error", response.err, "primary", c.witnesses[response.witnessIndex])
continue
@@ -1066,13 +1095,13 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S
case nil:
continue
case errConflictingHeaders:
c.logger.Error(fmt.Sprintf(`witness #%d has a different header. Please check primary is correct
and remove witness. Otherwise, use a different primary`, e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex])
c.logger.Error(`witness has a different header. Please check primary is
correct and remove witness. Otherwise, use a different primary`,
"Witness", c.witnesses[e.WitnessIndex], "ExpHeader", h.Hash(), "GotHeader", e.Block.Hash())
return err
case errBadWitness:
// If witness sent us an invalid header, then remove it
c.logger.Info("witness sent an invalid light block or didn't respond, removing...",
"witness", c.witnesses[e.WitnessIndex],
c.logger.Info("witness returned an error, removing...",
"err", err)
witnessesToRemove = append(witnessesToRemove, e.WitnessIndex)
default:
@@ -1082,7 +1111,7 @@ and remove witness. Otherwise, use a different primary`, e.WitnessIndex), "witne
}
// the witness either didn't respond or didn't have the block. We ignore it.
c.logger.Debug("unable to compare first header with witness",
c.logger.Debug("unable to compare first header with witness, ignoring",
"err", err)
}
@@ -1091,3 +1120,11 @@ and remove witness. Otherwise, use a different primary`, e.WitnessIndex), "witne
// remove all witnesses that misbehaved
return c.removeWitnesses(witnessesToRemove)
}
// providerShouldBeRemoved analyzes the nature of the error and whether the provider
// should be removed from the light clients set
func (c *Client) providerShouldBeRemoved(err error) bool {
return errors.As(err, &provider.ErrUnreliableProvider{}) ||
errors.As(err, &provider.ErrBadLightBlock{}) ||
errors.Is(err, provider.ErrConnectionClosed)
}

View File

@@ -10,8 +10,8 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
mockp "github.com/tendermint/tendermint/light/provider/mock"
dbs "github.com/tendermint/tendermint/light/store/db"
"github.com/tendermint/tendermint/types"
)
// NOTE: block is produced every minute. Make sure the verification time
@@ -21,12 +21,50 @@ import (
// or -benchtime 100x.
//
// Remember that none of these benchmarks account for network latency.
var (
benchmarkFullNode = mockp.New(genMockNode(chainID, 1000, 100, 1, bTime))
genesisBlock, _ = benchmarkFullNode.LightBlock(context.Background(), 1)
)
var ()
type providerBenchmarkImpl struct {
currentHeight int64
blocks map[int64]*types.LightBlock
}
func newProviderBenchmarkImpl(headers map[int64]*types.SignedHeader,
vals map[int64]*types.ValidatorSet) provider.Provider {
impl := providerBenchmarkImpl{
blocks: make(map[int64]*types.LightBlock, len(headers)),
}
for height, header := range headers {
if height > impl.currentHeight {
impl.currentHeight = height
}
impl.blocks[height] = &types.LightBlock{
SignedHeader: header,
ValidatorSet: vals[height],
}
}
return &impl
}
func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
if height == 0 {
return impl.blocks[impl.currentHeight], nil
}
lb, ok := impl.blocks[height]
if !ok {
return nil, provider.ErrLightBlockNotFound
}
return lb, nil
}
func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ types.Evidence) error {
panic("not implemented")
}
func BenchmarkSequence(b *testing.B) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime)
benchmarkFullNode := newProviderBenchmarkImpl(headers, vals)
genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1)
c, err := light.NewClient(
context.Background(),
chainID,
@@ -55,6 +93,10 @@ func BenchmarkSequence(b *testing.B) {
}
func BenchmarkBisection(b *testing.B) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime)
benchmarkFullNode := newProviderBenchmarkImpl(headers, vals)
genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1)
c, err := light.NewClient(
context.Background(),
chainID,
@@ -82,7 +124,10 @@ func BenchmarkBisection(b *testing.B) {
}
func BenchmarkBackwards(b *testing.B) {
headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime)
benchmarkFullNode := newProviderBenchmarkImpl(headers, vals)
trustedBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 0)
c, err := light.NewClient(
context.Background(),
chainID,

View File

@@ -2,11 +2,14 @@ package light_test
import (
"context"
"errors"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
@@ -15,7 +18,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
mockp "github.com/tendermint/tendermint/light/provider/mock"
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
dbs "github.com/tendermint/tendermint/light/store/db"
"github.com/tendermint/tendermint/types"
)
@@ -56,14 +59,9 @@ var (
// last header (3/3 signed)
3: h3,
}
l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals}
fullNode = mockp.New(
chainID,
headerSet,
valSet,
)
deadNode = mockp.NewDeadMock(chainID)
largeFullNode = mockp.New(genMockNode(chainID, 10, 3, 0, bTime))
l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals}
l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals}
l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals}
)
func TestValidateTrustOptions(t *testing.T) {
@@ -112,11 +110,6 @@ func TestValidateTrustOptions(t *testing.T) {
}
func TestMock(t *testing.T) {
l, _ := fullNode.LightBlock(ctx, 3)
assert.Equal(t, int64(3), l.Height)
}
func TestClient_SequentialVerification(t *testing.T) {
newKeys := genPrivKeys(4)
newVals := newKeys.ToValidators(10, 1)
@@ -215,28 +208,22 @@ func TestClient_SequentialVerification(t *testing.T) {
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
mockNode := mockNodeFromHeadersAndVals(testCase.otherHeaders, testCase.vals)
mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
),
[]provider.Provider{mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
)},
mockNode,
[]provider.Provider{mockNode},
dbs.New(dbm.NewMemDB()),
light.SequentialVerification(),
light.Logger(log.TestingLogger()),
)
if tc.initErr {
if testCase.initErr {
require.Error(t, err)
return
}
@@ -244,11 +231,12 @@ func TestClient_SequentialVerification(t *testing.T) {
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour))
if tc.verifyErr {
if testCase.verifyErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
mockNode.AssertExpectations(t)
})
}
}
@@ -342,20 +330,14 @@ func TestClient_SkippingVerification(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
mockNode := mockNodeFromHeadersAndVals(tc.otherHeaders, tc.vals)
mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
),
[]provider.Provider{mockp.New(
chainID,
tc.otherHeaders,
tc.vals,
)},
mockNode,
[]provider.Provider{mockNode},
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
light.Logger(log.TestingLogger()),
@@ -381,8 +363,23 @@ func TestClient_SkippingVerification(t *testing.T) {
// start from a large light block to make sure that the pivot height doesn't select a height outside
// the appropriate range
func TestClientLargeBisectionVerification(t *testing.T) {
veryLargeFullNode := mockp.New(genMockNode(chainID, 100, 3, 0, bTime))
trustedLightBlock, err := veryLargeFullNode.LightBlock(ctx, 5)
numBlocks := int64(300)
mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, numBlocks, 101, 2, bTime)
lastBlock := &types.LightBlock{SignedHeader: mockHeaders[numBlocks], ValidatorSet: mockVals[numBlocks]}
mockNode := &provider_mocks.Provider{}
mockNode.On("LightBlock", mock.Anything, numBlocks).
Return(lastBlock, nil)
mockNode.On("LightBlock", mock.Anything, int64(200)).
Return(&types.LightBlock{SignedHeader: mockHeaders[200], ValidatorSet: mockVals[200]}, nil)
mockNode.On("LightBlock", mock.Anything, int64(256)).
Return(&types.LightBlock{SignedHeader: mockHeaders[256], ValidatorSet: mockVals[256]}, nil)
mockNode.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil)
trustedLightBlock, err := mockNode.LightBlock(ctx, int64(200))
require.NoError(t, err)
c, err := light.NewClient(
ctx,
@@ -392,20 +389,25 @@ func TestClientLargeBisectionVerification(t *testing.T) {
Height: trustedLightBlock.Height,
Hash: trustedLightBlock.Hash(),
},
veryLargeFullNode,
[]provider.Provider{veryLargeFullNode},
mockNode,
[]provider.Provider{mockNode},
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
)
require.NoError(t, err)
h, err := c.Update(ctx, bTime.Add(100*time.Minute))
h, err := c.Update(ctx, bTime.Add(300*time.Minute))
assert.NoError(t, err)
h2, err := veryLargeFullNode.LightBlock(ctx, 100)
height, err := c.LastTrustedHeight()
require.NoError(t, err)
require.Equal(t, numBlocks, height)
h2, err := mockNode.LightBlock(ctx, numBlocks)
require.NoError(t, err)
assert.Equal(t, h, h2)
mockNode.AssertExpectations(t)
}
func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet)
c, err := light.NewClient(
ctx,
chainID,
@@ -414,8 +416,8 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
Height: 1,
Hash: h1.Hash(),
},
fullNode,
[]provider.Provider{fullNode},
mockFullNode,
[]provider.Provider{mockFullNode},
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
)
@@ -431,15 +433,18 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
// verify using bisection the light block between the two trusted light blocks
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
assert.NoError(t, err)
mockFullNode.AssertExpectations(t)
}
func TestClient_Cleanup(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{fullNode},
mockFullNode,
[]provider.Provider{mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -454,12 +459,14 @@ func TestClient_Cleanup(t *testing.T) {
l, err := c.TrustedLightBlock(1)
assert.Error(t, err)
assert.Nil(t, l)
mockFullNode.AssertExpectations(t)
}
// trustedHeader.Height == options.Height
func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
// 1. options.Hash == trustedHeader.Hash
{
t.Run("hashes should match", func(t *testing.T) {
mockNode := &provider_mocks.Provider{}
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@@ -468,8 +475,8 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{fullNode},
mockNode,
[]provider.Provider{mockNode},
trustedStore,
light.Logger(log.TestingLogger()),
)
@@ -480,10 +487,11 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
assert.NotNil(t, l)
assert.Equal(t, l.Hash(), h1.Hash())
assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes())
}
mockNode.AssertExpectations(t)
})
// 2. options.Hash != trustedHeader.Hash
{
t.Run("hashes should not match", func(t *testing.T) {
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@@ -491,15 +499,7 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
// header1 != h1
header1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals,
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys))
primary := mockp.New(
chainID,
map[int64]*types.SignedHeader{
// trusted header
1: header1,
},
valSet,
)
mockNode := &provider_mocks.Provider{}
c, err := light.NewClient(
ctx,
@@ -509,8 +509,8 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
Height: 1,
Hash: header1.Hash(),
},
primary,
[]provider.Provider{primary},
mockNode,
[]provider.Provider{mockNode},
trustedStore,
light.Logger(log.TestingLogger()),
)
@@ -523,16 +523,21 @@ func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) {
assert.Equal(t, l.Hash(), l1.Hash())
assert.NoError(t, l.ValidateBasic(chainID))
}
}
mockNode.AssertExpectations(t)
})
}
func TestClient_Update(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil)
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
mockFullNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{fullNode},
mockFullNode,
[]provider.Provider{mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -545,15 +550,19 @@ func TestClient_Update(t *testing.T) {
assert.EqualValues(t, 3, l.Height)
assert.NoError(t, l.ValidateBasic(chainID))
}
mockFullNode.AssertExpectations(t)
}
func TestClient_Concurrency(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil)
mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{fullNode},
mockFullNode,
[]provider.Provider{mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -586,15 +595,56 @@ func TestClient_Concurrency(t *testing.T) {
}
wg.Wait()
mockFullNode.AssertExpectations(t)
}
func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
func TestClient_AddProviders(t *testing.T) {
mockFullNode := mockNodeFromHeadersAndVals(map[int64]*types.SignedHeader{
1: h1,
2: h2,
}, valSet)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
deadNode,
[]provider.Provider{fullNode, fullNode},
mockFullNode,
[]provider.Provider{mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
closeCh := make(chan struct{})
go func() {
// run verification concurrently to make sure it doesn't dead lock
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour))
require.NoError(t, err)
close(closeCh)
}()
// NOTE: the light client doesn't check uniqueness of providers
c.AddProvider(mockFullNode)
require.Len(t, c.Witnesses(), 2)
select {
case <-closeCh:
case <-time.After(5 * time.Second):
t.Fatal("concurent light block verification failed to finish in 5s")
}
mockFullNode.AssertExpectations(t)
}
func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
mockFullNode := &provider_mocks.Provider{}
mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil)
mockDeadNode := &provider_mocks.Provider{}
mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockDeadNode,
[]provider.Provider{mockFullNode, mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -604,16 +654,25 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
require.NoError(t, err)
// the primary should no longer be the deadNode
assert.NotEqual(t, c.Primary(), deadNode)
assert.NotEqual(t, c.Primary(), mockDeadNode)
// we should still have the dead node as a witness because it
// hasn't repeatedly been unresponsive yet
assert.Equal(t, 2, len(c.Witnesses()))
mockDeadNode.AssertExpectations(t)
mockFullNode.AssertExpectations(t)
}
func TestClient_BackwardsVerification(t *testing.T) {
{
trustHeader, _ := largeFullNode.LightBlock(ctx, 6)
headers, vals, _ := genLightBlocksWithKeys(chainID, 9, 3, 0, bTime)
delete(headers, 1)
delete(headers, 2)
delete(vals, 1)
delete(vals, 2)
mockLargeFullNode := mockNodeFromHeadersAndVals(headers, vals)
trustHeader, _ := mockLargeFullNode.LightBlock(ctx, 6)
c, err := light.NewClient(
ctx,
chainID,
@@ -622,8 +681,8 @@ func TestClient_BackwardsVerification(t *testing.T) {
Height: trustHeader.Height,
Hash: trustHeader.Hash(),
},
largeFullNode,
[]provider.Provider{largeFullNode},
mockLargeFullNode,
[]provider.Provider{mockLargeFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -661,41 +720,36 @@ func TestClient_BackwardsVerification(t *testing.T) {
// so expect error
_, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute))
assert.Error(t, err)
mockLargeFullNode.AssertExpectations(t)
}
{
testCases := []struct {
provider provider.Provider
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
}{
{
// 7) provides incorrect height
mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)),
3: h3,
},
valSet,
),
headers: map[int64]*types.SignedHeader{
2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)),
3: h3,
},
vals: valSet,
},
{
// 8) provides incorrect hash
mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)),
3: h3,
},
valSet,
),
headers: map[int64]*types.SignedHeader{
2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)),
3: h3,
},
vals: valSet,
},
}
for idx, tc := range testCases {
mockNode := mockNodeFromHeadersAndVals(tc.headers, tc.vals)
c, err := light.NewClient(
ctx,
chainID,
@@ -704,8 +758,8 @@ func TestClient_BackwardsVerification(t *testing.T) {
Height: 3,
Hash: h3.Hash(),
},
tc.provider,
[]provider.Provider{tc.provider},
mockNode,
[]provider.Provider{mockNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -713,6 +767,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second))
assert.Error(t, err, idx)
mockNode.AssertExpectations(t)
}
}
}
@@ -722,60 +777,62 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) {
db := dbs.New(dbm.NewMemDB())
err := db.SaveLightBlock(l1)
require.NoError(t, err)
mockNode := &provider_mocks.Provider{}
c, err := light.NewClientFromTrustedStore(
chainID,
trustPeriod,
deadNode,
[]provider.Provider{deadNode},
mockNode,
[]provider.Provider{mockNode},
db,
)
require.NoError(t, err)
// 2) Check light block exists (deadNode is being used to ensure we're not getting
// it from primary)
// 2) Check light block exists
h, err := c.TrustedLightBlock(1)
assert.NoError(t, err)
assert.EqualValues(t, l1.Height, h.Height)
mockNode.AssertExpectations(t)
}
func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
// different headers hash then primary plus less than 1/3 signed (no fork)
badProvider1 := mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash"), hash("results_hash"),
len(keys), len(keys), types.BlockID{Hash: h1.Hash()}),
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
},
)
// header is empty
badProvider2 := mockp.New(
chainID,
map[int64]*types.SignedHeader{
1: h1,
2: h2,
},
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
},
)
headers1 := map[int64]*types.SignedHeader{
1: h1,
2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash"), hash("results_hash"),
len(keys), len(keys), types.BlockID{Hash: h1.Hash()}),
}
vals1 := map[int64]*types.ValidatorSet{
1: vals,
2: vals,
}
mockBadNode1 := mockNodeFromHeadersAndVals(headers1, vals1)
mockBadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
lb1, _ := badProvider1.LightBlock(ctx, 2)
// header is empty
headers2 := map[int64]*types.SignedHeader{
1: h1,
2: h2,
}
vals2 := map[int64]*types.ValidatorSet{
1: vals,
2: vals,
}
mockBadNode2 := mockNodeFromHeadersAndVals(headers2, vals2)
mockBadNode2.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet)
lb1, _ := mockBadNode1.LightBlock(ctx, 2)
require.NotEqual(t, lb1.Hash(), l1.Hash())
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{badProvider1, badProvider2},
mockFullNode,
[]provider.Provider{mockBadNode1, mockBadNode2},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -797,12 +854,13 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
}
// witness does not have a light block -> left in the list
assert.EqualValues(t, 1, len(c.Witnesses()))
mockBadNode1.AssertExpectations(t)
mockBadNode2.AssertExpectations(t)
}
func TestClient_TrustedValidatorSet(t *testing.T) {
differentVals, _ := factory.RandValidatorSet(10, 100)
badValSetNode := mockp.New(
chainID,
mockBadValSetNode := mockNodeFromHeadersAndVals(
map[int64]*types.SignedHeader{
1: h1,
// 3/3 signed, but validator set at height 2 below is invalid -> witness
@@ -810,21 +868,27 @@ func TestClient_TrustedValidatorSet(t *testing.T) {
2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals,
hash("app_hash2"), hash("cons_hash"), hash("results_hash"),
0, len(keys), types.BlockID{Hash: h1.Hash()}),
3: h3,
},
map[int64]*types.ValidatorSet{
1: vals,
2: differentVals,
3: differentVals,
})
mockFullNode := mockNodeFromHeadersAndVals(
map[int64]*types.SignedHeader{
1: h1,
2: h2,
},
)
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
})
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{badValSetNode, fullNode},
mockFullNode,
[]provider.Provider{mockBadValSetNode, mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
@@ -834,15 +898,29 @@ func TestClient_TrustedValidatorSet(t *testing.T) {
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second))
assert.NoError(t, err)
assert.Equal(t, 1, len(c.Witnesses()))
mockBadValSetNode.AssertExpectations(t)
mockFullNode.AssertExpectations(t)
}
func TestClientPrunesHeadersAndValidatorSets(t *testing.T) {
mockFullNode := mockNodeFromHeadersAndVals(
map[int64]*types.SignedHeader{
1: h1,
3: h3,
0: h3,
},
map[int64]*types.ValidatorSet{
1: vals,
3: vals,
0: vals,
})
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
fullNode,
[]provider.Provider{fullNode},
mockFullNode,
[]provider.Provider{mockFullNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.PruningSize(1),
@@ -857,6 +935,7 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) {
_, err = c.TrustedLightBlock(1)
assert.Error(t, err)
mockFullNode.AssertExpectations(t)
}
func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
@@ -868,64 +947,140 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
testCases := []struct {
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
err bool
errorToThrow error
errorHeight int64
err bool
}{
{
headerSet,
valSet,
false,
},
{
headerSet,
map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: nil,
},
true,
},
{
map[int64]*types.SignedHeader{
headers: map[int64]*types.SignedHeader{
1: h1,
2: h2,
3: nil,
3: h3,
},
valSet,
true,
vals: map[int64]*types.ValidatorSet{
1: vals,
3: vals,
},
err: false,
},
{
headerSet,
map[int64]*types.ValidatorSet{
headers: map[int64]*types.SignedHeader{
1: h1,
},
vals: map[int64]*types.ValidatorSet{
1: vals,
},
errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")},
errorHeight: 3,
err: true,
},
{
headers: map[int64]*types.SignedHeader{
1: h1,
},
errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")},
errorHeight: 3,
vals: valSet,
err: true,
},
{
headers: map[int64]*types.SignedHeader{
1: h1,
3: h3,
},
vals: map[int64]*types.ValidatorSet{
1: vals,
2: vals,
3: emptyValSet,
},
true,
err: true,
},
}
for _, tc := range testCases {
badNode := mockp.New(
chainID,
tc.headers,
tc.vals,
)
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
badNode,
[]provider.Provider{badNode, badNode},
dbs.New(dbm.NewMemDB()),
)
require.NoError(t, err)
for i, tc := range testCases {
testCase := tc
t.Run(fmt.Sprintf("case: %d", i), func(t *testing.T) {
mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals)
if testCase.errorToThrow != nil {
mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow)
}
_, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour))
if tc.err {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockBadNode,
[]provider.Provider{mockBadNode, mockBadNode},
dbs.New(dbm.NewMemDB()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour))
if testCase.err {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
mockBadNode.AssertExpectations(t)
})
}
}
func TestClientHandlesContexts(t *testing.T) {
mockNode := &provider_mocks.Provider{}
mockNode.On("LightBlock",
mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == nil }),
int64(1)).Return(l1, nil)
mockNode.On("LightBlock",
mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.DeadlineExceeded }),
mock.Anything).Return(nil, context.DeadlineExceeded)
mockNode.On("LightBlock",
mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.Canceled }),
mock.Anything).Return(nil, context.Canceled)
// instantiate the light client with a timeout
ctxTimeOut, cancel := context.WithTimeout(ctx, 1*time.Nanosecond)
defer cancel()
_, err := light.NewClient(
ctxTimeOut,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode, mockNode},
dbs.New(dbm.NewMemDB()),
)
require.Error(t, ctxTimeOut.Err())
require.Error(t, err)
require.True(t, errors.Is(err, context.DeadlineExceeded))
// instantiate the client for real
c, err := light.NewClient(
ctx,
chainID,
trustOptions,
mockNode,
[]provider.Provider{mockNode, mockNode},
dbs.New(dbm.NewMemDB()),
)
require.NoError(t, err)
// verify a block with a timeout
ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 1*time.Nanosecond)
defer cancel()
_, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute))
require.Error(t, ctxTimeOutBlock.Err())
require.Error(t, err)
require.True(t, errors.Is(err, context.DeadlineExceeded))
// verify a block with a cancel
ctxCancel, cancel := context.WithCancel(ctx)
cancel()
_, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute))
require.Error(t, ctxCancel.Err())
require.Error(t, err)
require.True(t, errors.Is(err, context.Canceled))
mockNode.AssertExpectations(t)
}

View File

@@ -78,7 +78,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
"witness", c.witnesses[e.WitnessIndex], "err", err)
witnessesToRemove = append(witnessesToRemove, e.WitnessIndex)
default:
c.logger.Debug("error in light block request to witness", "err", err)
if errors.Is(e, context.Canceled) || errors.Is(e, context.DeadlineExceeded) {
return e
}
c.logger.Info("error in light block request to witness", "err", err)
}
}
@@ -108,6 +111,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
witness provider.Provider, witnessIndex int) {
lightBlock, err := witness.LightBlock(ctx, h.Height)
// fmt.Println(lightBlock.SignedHeader)
switch err {
// no error means we move on to checking the hash of the two headers
case nil:
@@ -115,7 +119,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
// the witness hasn't been helpful in comparing headers, we mark the response and continue
// comparing with the rest of the witnesses
case provider.ErrNoResponse, provider.ErrLightBlockNotFound:
case provider.ErrNoResponse, provider.ErrLightBlockNotFound, context.DeadlineExceeded, context.Canceled:
errc <- err
return
@@ -128,7 +132,11 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
var isTargetHeight bool
isTargetHeight, lightBlock, err = c.getTargetBlockOrLatest(ctx, h.Height, witness)
if err != nil {
errc <- err
if c.providerShouldBeRemoved(err) {
errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex}
} else {
errc <- err
}
return
}
@@ -152,7 +160,11 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro
time.Sleep(2*c.maxClockDrift + c.maxBlockLag)
isTargetHeight, lightBlock, err = c.getTargetBlockOrLatest(ctx, h.Height, witness)
if err != nil {
errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex}
if c.providerShouldBeRemoved(err) {
errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex}
} else {
errc <- err
}
return
}
if isTargetHeight {

View File

@@ -1,10 +1,12 @@
package light_test
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
@@ -12,7 +14,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
mockp "github.com/tendermint/tendermint/light/provider/mock"
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
dbs "github.com/tendermint/tendermint/light/store/db"
"github.com/tendermint/tendermint/types"
)
@@ -20,15 +22,15 @@ import (
func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
// primary performs a lunatic attack
var (
latestHeight = int64(10)
latestHeight = int64(3)
valSize = 5
divergenceHeight = int64(6)
divergenceHeight = int64(2)
primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight)
primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight)
)
witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime)
witness := mockp.New(chainID, witnessHeaders, witnessValidators)
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime)
forgedKeys := chainKeys[divergenceHeight-1].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain)
forgedVals := forgedKeys.ToValidators(2, 0)
@@ -42,7 +44,38 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
nil, forgedVals, forgedVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(forgedKeys))
primaryValidators[height] = forgedVals
}
primary := mockp.New(chainID, primaryHeaders, primaryValidators)
// never called, delete it to make mockery asserts pass
delete(witnessHeaders, 2)
delete(primaryHeaders, 2)
mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators)
mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstPrimary := &types.LightClientAttackEvidence{
// after the divergence height the valset doesn't change so we expect the evidence to be for the latest height
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[latestHeight],
ValidatorSet: primaryValidators[latestHeight],
},
CommonHeight: 1,
}
return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash())
})).Return(nil)
mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstWitness := &types.LightClientAttackEvidence{
// when forming evidence against witness we learn that the canonical chain continued to change validator sets
// hence the conflicting block is at 7
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[divergenceHeight+1],
ValidatorSet: witnessValidators[divergenceHeight+1],
},
CommonHeight: divergenceHeight - 1,
}
return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash())
})).Return(nil)
c, err := light.NewClient(
ctx,
@@ -52,121 +85,134 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
primary,
[]provider.Provider{witness},
mockPrimary,
[]provider.Provider{mockWitness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
// Check verification returns an error.
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, latestHeight, bTime.Add(1*time.Hour))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
// Check evidence was sent to both full nodes.
evAgainstPrimary := &types.LightClientAttackEvidence{
// after the divergence height the valset doesn't change so we expect the evidence to be for height 10
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[10],
ValidatorSet: primaryValidators[10],
},
CommonHeight: 4,
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
evAgainstWitness := &types.LightClientAttackEvidence{
// when forming evidence against witness we learn that the canonical chain continued to change validator sets
// hence the conflicting block is at 7
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[7],
ValidatorSet: witnessValidators[7],
},
CommonHeight: 4,
}
assert.True(t, primary.HasEvidence(evAgainstWitness))
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}
func TestLightClientAttackEvidence_Equivocation(t *testing.T) {
verificationOptions := map[string]light.Option{
"sequential": light.SequentialVerification(),
"skipping": light.SkippingVerification(light.DefaultTrustLevel),
cases := []struct {
name string
lightOption light.Option
unusedWitnessBlockHeights []int64
unusedPrimaryBlockHeights []int64
latestHeight int64
divergenceHeight int64
}{
{
name: "sequential",
lightOption: light.SequentialVerification(),
unusedWitnessBlockHeights: []int64{4, 6},
latestHeight: int64(5),
divergenceHeight: int64(3),
},
{
name: "skipping",
lightOption: light.SkippingVerification(light.DefaultTrustLevel),
unusedWitnessBlockHeights: []int64{2, 4, 6},
unusedPrimaryBlockHeights: []int64{2, 4, 6},
latestHeight: int64(5),
divergenceHeight: int64(3),
},
}
for s, verificationOption := range verificationOptions {
t.Log("==> verification", s)
// primary performs an equivocation attack
var (
latestHeight = int64(10)
valSize = 5
divergenceHeight = int64(6)
primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight)
primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight)
)
// validators don't change in this network (however we still use a map just for convenience)
witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime)
witness := mockp.New(chainID, witnessHeaders, witnessValidators)
for height := int64(1); height <= latestHeight; height++ {
if height < divergenceHeight {
primaryHeaders[height] = witnessHeaders[height]
for _, tc := range cases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
// primary performs an equivocation attack
var (
valSize = 5
primaryHeaders = make(map[int64]*types.SignedHeader, testCase.latestHeight)
// validators don't change in this network (however we still use a map just for convenience)
primaryValidators = make(map[int64]*types.ValidatorSet, testCase.latestHeight)
)
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID,
testCase.latestHeight+1, valSize, 2, bTime)
for height := int64(1); height <= testCase.latestHeight; height++ {
if height < testCase.divergenceHeight {
primaryHeaders[height] = witnessHeaders[height]
primaryValidators[height] = witnessValidators[height]
continue
}
// we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for
// a different block (which we do by adding txs)
primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height,
bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")},
witnessValidators[height], witnessValidators[height+1], hash("app_hash"),
hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1)
primaryValidators[height] = witnessValidators[height]
continue
}
// we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for
// a different block (which we do by adding txs)
primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height,
bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")},
witnessValidators[height], witnessValidators[height+1], hash("app_hash"),
hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1)
primaryValidators[height] = witnessValidators[height]
}
primary := mockp.New(chainID, primaryHeaders, primaryValidators)
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 4 * time.Hour,
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
verificationOption,
)
require.NoError(t, err)
for _, height := range testCase.unusedWitnessBlockHeights {
delete(witnessHeaders, height)
}
mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
for _, height := range testCase.unusedPrimaryBlockHeights {
delete(primaryHeaders, height)
}
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators)
// Check verification returns an error.
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
// Check evidence was sent to both full nodes.
// Common height should be set to the height of the divergent header in the instance
// of an equivocation attack and the validator sets are the same as what the witness has
mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[testCase.divergenceHeight],
ValidatorSet: primaryValidators[testCase.divergenceHeight],
},
CommonHeight: testCase.divergenceHeight,
}
return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash())
})).Return(nil)
mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
evAgainstWitness := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[testCase.divergenceHeight],
ValidatorSet: witnessValidators[testCase.divergenceHeight],
},
CommonHeight: testCase.divergenceHeight,
}
return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash())
})).Return(nil)
// Check evidence was sent to both full nodes.
// Common height should be set to the height of the divergent header in the instance
// of an equivocation attack and the validator sets are the same as what the witness has
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[divergenceHeight],
ValidatorSet: primaryValidators[divergenceHeight],
},
CommonHeight: divergenceHeight,
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 4 * time.Hour,
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
mockPrimary,
[]provider.Provider{mockWitness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
testCase.lightOption,
)
require.NoError(t, err)
evAgainstWitness := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: witnessHeaders[divergenceHeight],
ValidatorSet: witnessValidators[divergenceHeight],
},
CommonHeight: divergenceHeight,
}
assert.True(t, primary.HasEvidence(evAgainstWitness))
// Check verification returns an error.
_, err = c.VerifyLightBlockAtHeight(ctx, testCase.latestHeight, bTime.Add(300*time.Second))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
})
}
}
@@ -182,7 +228,10 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
primaryValidators = make(map[int64]*types.ValidatorSet, forgedHeight)
)
witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime)
witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime)
for _, unusedHeader := range []int64{3, 5, 6, 8} {
delete(witnessHeaders, unusedHeader)
}
// primary has the exact same headers except it forges one extra header in the future using keys from 2/5ths of
// the validators
@@ -190,6 +239,9 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
primaryHeaders[h] = witnessHeaders[h]
primaryValidators[h] = witnessValidators[h]
}
for _, unusedHeader := range []int64{3, 5, 6, 8} {
delete(primaryHeaders, unusedHeader)
}
forgedKeys := chainKeys[latestHeight].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain)
primaryValidators[forgedHeight] = forgedKeys.ToValidators(2, 0)
primaryHeaders[forgedHeight] = forgedKeys.GenSignedHeader(
@@ -204,15 +256,36 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
hash("results_hash"),
0, len(forgedKeys),
)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators)
lastBlock, _ := mockPrimary.LightBlock(ctx, forgedHeight)
mockPrimary.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil)
mockPrimary.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound)
witness := mockp.New(chainID, witnessHeaders, witnessValidators)
primary := mockp.New(chainID, primaryHeaders, primaryValidators)
/*
for _, unusedHeader := range []int64{3, 5, 6, 8} {
delete(witnessHeaders, unusedHeader)
}
*/
mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
lastBlock, _ = mockWitness.LightBlock(ctx, latestHeight)
mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil).Once()
mockWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh)
laggingWitness := witness.Copy("laggingWitness")
mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool {
// Check evidence was sent to the witness against the full node
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[forgedHeight],
ValidatorSet: primaryValidators[forgedHeight],
},
CommonHeight: latestHeight,
}
return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash())
})).Return(nil).Twice()
// In order to perform the attack, the primary needs at least one accomplice as a witness to also
// send the forged block
accomplice := primary
accomplice := mockPrimary
c, err := light.NewClient(
ctx,
@@ -222,8 +295,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
primary,
[]provider.Provider{witness, accomplice},
mockPrimary,
[]provider.Provider{mockWitness, accomplice},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxClockDrift(1*time.Second),
@@ -251,7 +324,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
}
go func() {
time.Sleep(2 * time.Second)
witness.AddLightBlock(newLb)
mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(newLb, nil)
}()
// Now assert that verification returns an error. We craft the light clients time to be a little ahead of the chain
@@ -261,26 +334,19 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
// Check evidence was sent to the witness against the full node
evAgainstPrimary := &types.LightClientAttackEvidence{
ConflictingBlock: &types.LightBlock{
SignedHeader: primaryHeaders[forgedHeight],
ValidatorSet: primaryValidators[forgedHeight],
},
CommonHeight: latestHeight,
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
// We attempt the same call but now the supporting witness has a block which should
// immediately conflict in time with the primary
_, err = c.VerifyLightBlockAtHeight(ctx, forgedHeight, bTime.Add(time.Duration(forgedHeight)*time.Minute))
if assert.Error(t, err) {
assert.Equal(t, light.ErrLightClientAttack, err)
}
assert.True(t, witness.HasEvidence(evAgainstPrimary))
// Lastly we test the unfortunate case where the light clients supporting witness doesn't update
// in enough time
mockLaggingWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators)
mockLaggingWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh)
lastBlock, _ = mockLaggingWitness.LightBlock(ctx, latestHeight)
mockLaggingWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil)
c, err = light.NewClient(
ctx,
chainID,
@@ -289,8 +355,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
Height: 1,
Hash: primaryHeaders[1].Hash(),
},
primary,
[]provider.Provider{laggingWitness, accomplice},
mockPrimary,
[]provider.Provider{mockLaggingWitness, accomplice},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxClockDrift(1*time.Second),
@@ -300,17 +366,20 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) {
_, err = c.Update(ctx, bTime.Add(time.Duration(forgedHeight)*time.Minute))
assert.NoError(t, err)
mockPrimary.AssertExpectations(t)
mockWitness.AssertExpectations(t)
}
// 1. Different nodes therefore a divergent header is produced.
// => light client returns an error upon creation because primary and witness
// have a different view.
func TestClientDivergentTraces1(t *testing.T) {
primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime))
firstBlock, err := primary.LightBlock(ctx, 1)
headers, vals, _ := genLightBlocksWithKeys(chainID, 1, 5, 2, bTime)
mockPrimary := mockNodeFromHeadersAndVals(headers, vals)
firstBlock, err := mockPrimary.LightBlock(ctx, 1)
require.NoError(t, err)
witness := mockp.New(genMockNode(chainID, 10, 5, 2, bTime))
headers, vals, _ = genLightBlocksWithKeys(chainID, 1, 5, 2, bTime)
mockWitness := mockNodeFromHeadersAndVals(headers, vals)
_, err = light.NewClient(
ctx,
@@ -320,20 +389,25 @@ func TestClientDivergentTraces1(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
primary,
[]provider.Provider{witness},
mockPrimary,
[]provider.Provider{mockWitness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.Error(t, err)
assert.Contains(t, err.Error(), "does not match primary")
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}
// 2. Two out of three nodes don't respond but the third has a header that matches
// => verification should be successful and all the witnesses should remain
func TestClientDivergentTraces2(t *testing.T) {
primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime))
firstBlock, err := primary.LightBlock(ctx, 1)
headers, vals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockPrimaryNode := mockNodeFromHeadersAndVals(headers, vals)
mockDeadNode := &provider_mocks.Provider{}
mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse)
firstBlock, err := mockPrimaryNode.LightBlock(ctx, 1)
require.NoError(t, err)
c, err := light.NewClient(
ctx,
@@ -343,31 +417,35 @@ func TestClientDivergentTraces2(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
primary,
[]provider.Provider{deadNode, deadNode, primary},
mockPrimaryNode,
[]provider.Provider{mockDeadNode, mockDeadNode, mockPrimaryNode},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
assert.NoError(t, err)
assert.Equal(t, 3, len(c.Witnesses()))
mockDeadNode.AssertExpectations(t)
mockPrimaryNode.AssertExpectations(t)
}
// 3. witness has the same first header, but different second header
// => creation should succeed, but the verification should fail
//nolint: dupl
func TestClientDivergentTraces3(t *testing.T) {
_, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime)
primary := mockp.New(chainID, primaryHeaders, primaryVals)
//
primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals)
firstBlock, err := primary.LightBlock(ctx, 1)
firstBlock, err := mockPrimary.LightBlock(ctx, 1)
require.NoError(t, err)
_, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime)
mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockHeaders[1] = primaryHeaders[1]
mockVals[1] = primaryVals[1]
witness := mockp.New(chainID, mockHeaders, mockVals)
mockWitness := mockNodeFromHeadersAndVals(mockHeaders, mockVals)
c, err := light.NewClient(
ctx,
@@ -377,33 +455,35 @@ func TestClientDivergentTraces3(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
primary,
[]provider.Provider{witness},
mockPrimary,
[]provider.Provider{mockWitness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
assert.Error(t, err)
assert.Equal(t, 1, len(c.Witnesses()))
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}
// 4. Witness has a divergent header but can not produce a valid trace to back it up.
// It should be ignored
//nolint: dupl
func TestClientDivergentTraces4(t *testing.T) {
_, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime)
primary := mockp.New(chainID, primaryHeaders, primaryVals)
//
primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals)
firstBlock, err := primary.LightBlock(ctx, 1)
firstBlock, err := mockPrimary.LightBlock(ctx, 1)
require.NoError(t, err)
_, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime)
witness := primary.Copy("witness")
witness.AddLightBlock(&types.LightBlock{
SignedHeader: mockHeaders[10],
ValidatorSet: mockVals[10],
})
witnessHeaders, witnessVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime)
primaryHeaders[2] = witnessHeaders[2]
primaryVals[2] = witnessVals[2]
mockWitness := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals)
c, err := light.NewClient(
ctx,
@@ -413,14 +493,16 @@ func TestClientDivergentTraces4(t *testing.T) {
Hash: firstBlock.Hash(),
Period: 4 * time.Hour,
},
primary,
[]provider.Provider{witness},
mockPrimary,
[]provider.Provider{mockWitness},
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour))
_, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour))
assert.Error(t, err)
assert.Equal(t, 1, len(c.Witnesses()))
mockWitness.AssertExpectations(t)
mockPrimary.AssertExpectations(t)
}

View File

@@ -2,7 +2,6 @@ package light_test
import (
"context"
"fmt"
"io/ioutil"
stdlog "log"
"os"
@@ -19,88 +18,12 @@ import (
rpctest "github.com/tendermint/tendermint/rpc/test"
)
// Automatically getting new headers and verifying them.
func ExampleClient_Update() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf := rpctest.CreateConfig("ExampleClient_Update")
// Start a test application
app := kvstore.NewApplication()
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
if err != nil {
stdlog.Fatal(err)
}
defer func() { _ = closer(ctx) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
dbDir, err := ioutil.TempDir("", "light-client-example")
if err != nil {
stdlog.Fatal(err)
}
defer os.RemoveAll(dbDir)
chainID := conf.ChainID()
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
if err != nil {
stdlog.Fatal(err)
}
block, err := primary.LightBlock(ctx, 2)
if err != nil {
stdlog.Fatal(err)
}
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
if err != nil {
stdlog.Fatal(err)
}
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 504 * time.Hour, // 21 days
Height: 2,
Hash: block.Hash(),
},
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db),
light.Logger(log.TestingLogger()),
)
if err != nil {
stdlog.Fatal(err)
}
defer func() {
if err := c.Cleanup(); err != nil {
stdlog.Fatal(err)
}
}()
time.Sleep(2 * time.Second)
h, err := c.Update(ctx, time.Now())
if err != nil {
stdlog.Fatal(err)
}
if h != nil && h.Height > 2 {
fmt.Println("successful update")
} else {
fmt.Println("update failed")
}
// Output: successful update
}
// Manually getting light blocks and verifying them.
func ExampleClient_VerifyLightBlockAtHeight() {
func ExampleClient() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight")
logger := log.TestingLogger()
// Start a test application
app := kvstore.NewApplication()
@@ -111,9 +34,6 @@ func ExampleClient_VerifyLightBlockAtHeight() {
}
defer func() { _ = closer(ctx) }()
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
dbDir, err := ioutil.TempDir("", "light-client-example")
if err != nil {
stdlog.Fatal(err)
@@ -127,6 +47,9 @@ func ExampleClient_VerifyLightBlockAtHeight() {
stdlog.Fatal(err)
}
// give Tendermint time to generate some blocks
time.Sleep(5 * time.Second)
block, err := primary.LightBlock(ctx, 2)
if err != nil {
stdlog.Fatal(err)
@@ -147,7 +70,7 @@ func ExampleClient_VerifyLightBlockAtHeight() {
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db),
light.Logger(log.TestingLogger()),
light.Logger(logger),
)
if err != nil {
stdlog.Fatal(err)
@@ -158,16 +81,26 @@ func ExampleClient_VerifyLightBlockAtHeight() {
}
}()
// wait for a few more blocks to be produced
time.Sleep(2 * time.Second)
// veify the block at height 3
_, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now())
if err != nil {
stdlog.Fatal(err)
}
h, err := c.TrustedLightBlock(3)
// retrieve light block at height 3
_, err = c.TrustedLightBlock(3)
if err != nil {
stdlog.Fatal(err)
}
fmt.Println("got header", h.Height)
// Output: got header 3
// update to the latest height
lb, err := c.Update(ctx, time.Now())
if err != nil {
stdlog.Fatal(err)
}
logger.Info("verified light block", "light-block", lb)
}

View File

@@ -3,10 +3,12 @@ package light_test
import (
"time"
"github.com/stretchr/testify/mock"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/tmhash"
tmtime "github.com/tendermint/tendermint/libs/time"
provider_mocks "github.com/tendermint/tendermint/light/provider/mocks"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
@@ -169,12 +171,12 @@ func (pkz privKeys) ChangeKeys(delta int) privKeys {
return newKeys.Extend(delta)
}
// Generates the header and validator set to create a full entire mock node with blocks to height (
// blockSize) and with variation in validator sets. BlockIntervals are in per minute.
// genLightBlocksWithKeys generates the header and validator set to create
// blocks to height. BlockIntervals are in per minute.
// NOTE: Expected to have a large validator set size ~ 100 validators.
func genMockNodeWithKeys(
func genLightBlocksWithKeys(
chainID string,
blockSize int64,
numBlocks int64,
valSize int,
valVariation float32,
bTime time.Time) (
@@ -183,9 +185,9 @@ func genMockNodeWithKeys(
map[int64]privKeys) {
var (
headers = make(map[int64]*types.SignedHeader, blockSize)
valset = make(map[int64]*types.ValidatorSet, blockSize+1)
keymap = make(map[int64]privKeys, blockSize+1)
headers = make(map[int64]*types.SignedHeader, numBlocks)
valset = make(map[int64]*types.ValidatorSet, numBlocks+1)
keymap = make(map[int64]privKeys, numBlocks+1)
keys = genPrivKeys(valSize)
totalVariation = valVariation
valVariationInt int
@@ -207,7 +209,7 @@ func genMockNodeWithKeys(
valset[1] = keys.ToValidators(2, 0)
keys = newKeys
for height := int64(2); height <= blockSize; height++ {
for height := int64(2); height <= numBlocks; height++ {
totalVariation += valVariation
valVariationInt = int(totalVariation)
totalVariation = -float32(valVariationInt)
@@ -226,17 +228,14 @@ func genMockNodeWithKeys(
return headers, valset, keymap
}
func genMockNode(
chainID string,
blockSize int64,
valSize int,
valVariation float32,
bTime time.Time) (
string,
map[int64]*types.SignedHeader,
map[int64]*types.ValidatorSet) {
headers, valset, _ := genMockNodeWithKeys(chainID, blockSize, valSize, valVariation, bTime)
return chainID, headers, valset
func mockNodeFromHeadersAndVals(headers map[int64]*types.SignedHeader,
vals map[int64]*types.ValidatorSet) *provider_mocks.Provider {
mockNode := &provider_mocks.Provider{}
for i, header := range headers {
lb := &types.LightBlock{SignedHeader: header, ValidatorSet: vals[i]}
mockNode.On("LightBlock", mock.Anything, i).Return(lb, nil)
}
return mockNode
}
func hash(s string) []byte {

173
light/light_test.go Normal file
View File

@@ -0,0 +1,173 @@
package light_test
import (
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/light"
"github.com/tendermint/tendermint/light/provider"
httpp "github.com/tendermint/tendermint/light/provider/http"
dbs "github.com/tendermint/tendermint/light/store/db"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
// NOTE: these are ports of the tests from example_test.go but
// rewritten as more conventional tests.
// Automatically getting new headers and verifying them.
func TestClientIntegration_Update(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf := rpctest.CreateConfig(t.Name())
// Start a test application
app := kvstore.NewApplication()
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
require.NoError(t, err)
defer func() {
fmt.Println("close")
require.NoError(t, closer(ctx))
}()
dbDir, err := ioutil.TempDir("", "light-client-test-update-example")
require.NoError(t, err)
defer os.RemoveAll(dbDir)
chainID := conf.ChainID()
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
require.NoError(t, err)
// give Tendermint time to generate some blocks
block, err := waitForBlock(ctx, primary, 2)
require.NoError(t, err)
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
require.NoError(t, err)
c, err := light.NewClient(
ctx,
chainID,
light.TrustOptions{
Period: 504 * time.Hour, // 21 days
Height: 2,
Hash: block.Hash(),
},
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
defer func() { require.NoError(t, c.Cleanup()) }()
// ensure Tendermint is at height 3 or higher
_, err = waitForBlock(ctx, primary, 3)
require.NoError(t, err)
for i := 0; i < 100; i++ {
b, pErr := primary.LightBlock(ctx, 0)
if pErr != nil {
fmt.Println("err")
fmt.Println(pErr)
fmt.Println(b)
}
}
h, err := c.Update(ctx, time.Now())
require.NoError(t, err)
require.NotNil(t, h)
require.True(t, h.Height > 2)
}
// Manually getting light blocks and verifying them.
func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf := rpctest.CreateConfig(t.Name())
// Start a test application
app := kvstore.NewApplication()
_, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout)
require.NoError(t, err)
defer func() { require.NoError(t, closer(ctx)) }()
dbDir, err := ioutil.TempDir("", "light-client-test-verify-example")
require.NoError(t, err)
defer os.RemoveAll(dbDir)
chainID := conf.ChainID()
primary, err := httpp.New(chainID, conf.RPC.ListenAddress)
require.NoError(t, err)
// give Tendermint time to generate some blocks
block, err := waitForBlock(ctx, primary, 2)
require.NoError(t, err)
db, err := dbm.NewGoLevelDB("light-client-db", dbDir)
require.NoError(t, err)
c, err := light.NewClient(ctx,
chainID,
light.TrustOptions{
Period: 504 * time.Hour, // 21 days
Height: 2,
Hash: block.Hash(),
},
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
defer func() { require.NoError(t, c.Cleanup()) }()
// ensure Tendermint is at height 3 or higher
_, err = waitForBlock(ctx, primary, 3)
require.NoError(t, err)
_, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now())
require.NoError(t, err)
h, err := c.TrustedLightBlock(3)
require.NoError(t, err)
require.EqualValues(t, 3, h.Height)
}
func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*types.LightBlock, error) {
for {
block, err := p.LightBlock(ctx, height)
switch err {
case nil:
return block, nil
// node isn't running yet, wait 1 second and repeat
case provider.ErrNoResponse, provider.ErrHeightTooHigh:
timer := time.NewTimer(1 * time.Second)
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-timer.C:
}
default:
return nil, err
}
}
}

View File

@@ -114,6 +114,7 @@ func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock,
sh, err := p.signedHeader(ctx, h)
if err != nil {
fmt.Println("nil signed header")
return nil, err
}
@@ -138,6 +139,9 @@ func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock,
SignedHeader: sh,
ValidatorSet: vs,
}
if lb.SignedHeader.Commit == nil {
fmt.Println(lb)
}
err = lb.ValidateBasic(p.chainID)
if err != nil {

View File

@@ -1,30 +0,0 @@
package mock
import (
"context"
"fmt"
"github.com/tendermint/tendermint/light/provider"
"github.com/tendermint/tendermint/types"
)
type deadMock struct {
id string
}
// NewDeadMock creates a mock provider that always errors. id is used in case of multiple providers.
func NewDeadMock(id string) provider.Provider {
return &deadMock{id: id}
}
func (p *deadMock) String() string {
return fmt.Sprintf("DeadMock-%s", p.id)
}
func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) {
return nil, provider.ErrNoResponse
}
func (p *deadMock) ReportEvidence(_ context.Context, ev types.Evidence) error {
return provider.ErrNoResponse
}

View File

@@ -1,118 +0,0 @@
package mock
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"github.com/tendermint/tendermint/light/provider"
"github.com/tendermint/tendermint/types"
)
type Mock struct {
id string
mtx sync.Mutex
headers map[int64]*types.SignedHeader
vals map[int64]*types.ValidatorSet
evidenceToReport map[string]types.Evidence // hash => evidence
latestHeight int64
}
var _ provider.Provider = (*Mock)(nil)
// New creates a mock provider with the given set of headers and validator
// sets.
func New(id string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) *Mock {
height := int64(0)
for h := range headers {
if h > height {
height = h
}
}
return &Mock{
id: id,
headers: headers,
vals: vals,
evidenceToReport: make(map[string]types.Evidence),
latestHeight: height,
}
}
func (p *Mock) String() string {
var headers strings.Builder
for _, h := range p.headers {
fmt.Fprintf(&headers, " %d:%X", h.Height, h.Hash())
}
var vals strings.Builder
for _, v := range p.vals {
fmt.Fprintf(&vals, " %X", v.Hash())
}
return fmt.Sprintf("Mock{id: %s, headers: %s, vals: %v}", p.id, headers.String(), vals.String())
}
func (p *Mock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) {
p.mtx.Lock()
defer p.mtx.Unlock()
var lb *types.LightBlock
if height > p.latestHeight {
return nil, provider.ErrHeightTooHigh
}
if height == 0 && len(p.headers) > 0 {
height = p.latestHeight
}
if _, ok := p.headers[height]; ok {
sh := p.headers[height]
vals := p.vals[height]
lb = &types.LightBlock{
SignedHeader: sh,
ValidatorSet: vals,
}
}
if lb == nil {
return nil, provider.ErrLightBlockNotFound
}
if lb.SignedHeader == nil || lb.ValidatorSet == nil {
return nil, provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}
}
if err := lb.ValidateBasic(lb.ChainID); err != nil {
return nil, provider.ErrBadLightBlock{Reason: err}
}
return lb, nil
}
func (p *Mock) ReportEvidence(_ context.Context, ev types.Evidence) error {
p.evidenceToReport[string(ev.Hash())] = ev
return nil
}
func (p *Mock) HasEvidence(ev types.Evidence) bool {
_, ok := p.evidenceToReport[string(ev.Hash())]
return ok
}
func (p *Mock) AddLightBlock(lb *types.LightBlock) {
p.mtx.Lock()
defer p.mtx.Unlock()
if err := lb.ValidateBasic(lb.ChainID); err != nil {
panic(fmt.Sprintf("unable to add light block, err: %v", err))
}
p.headers[lb.Height] = lb.SignedHeader
p.vals[lb.Height] = lb.ValidatorSet
if lb.Height > p.latestHeight {
p.latestHeight = lb.Height
}
}
func (p *Mock) Copy(id string) *Mock {
return New(id, p.headers, p.vals)
}

Some files were not shown because too many files have changed in this diff Show More