Compare commits

...

153 Commits

Author SHA1 Message Date
William Banfield
3c3089f834 fixups from retrying 2022-01-07 14:16:42 -05:00
William Banfield
f629b61821 add crash description 2022-01-07 13:40:00 -05:00
William Banfield
5b51d8a270 Merge remote-tracking branch 'origin/wb/builtin-tutorial-fixup' into wb/builtin-tutorial-fixup 2022-01-07 13:21:36 -05:00
William Banfield
822ea05a8d add documentation link 2022-01-07 13:19:39 -05:00
William Banfield
47edfbe9db Apply suggestions from code review
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-01-07 13:14:41 -05:00
William Banfield
0d44fc4637 fix copy+paste mistakes 2022-01-06 19:00:19 -05:00
William Banfield
5f93bd450c check db close error 2022-01-06 18:50:02 -05:00
William Banfield
13f7af1310 link fix 2022-01-06 18:47:13 -05:00
William Banfield
425c6a0d2a language fix 2022-01-06 18:45:47 -05:00
William Banfield
bf147c2ea5 add intuitin about checktx -> delivertx validity 2022-01-06 18:40:42 -05:00
William Banfield
3854f3b69c more explanation to checktx per review feedback 2022-01-06 18:35:58 -05:00
William Banfield
28cf6fcf52 remove superfluous 0 check and add bytes to import 2022-01-06 18:33:43 -05:00
William Banfield
bdc3ea22d6 finalize code description 2022-01-06 18:21:31 -05:00
William Banfield
c52de2595d wip 2022-01-06 16:17:35 -05:00
William Banfield
f78acf21a6 pr feedback 2022-01-06 15:37:53 -05:00
William Banfield
557b860e7b pr feedback 2022-01-06 15:36:47 -05:00
William Banfield
48450444ea cr feedback 2022-01-06 09:38:24 -05:00
William Banfield
17a214bcc0 Apply suggestions from code review
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
Co-authored-by: Sergio Mena <sergio@informal.systems>
2022-01-05 19:05:27 -05:00
William Banfield
7241c2cce4 Update docs/tutorials/go-built-in.md
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-01-05 18:35:53 -05:00
William Banfield
d081283101 builtin-tutorial-fixup 2021-12-22 18:27:15 -05:00
dependabot[bot]
d3abe15aae build(deps): Bump github.com/spf13/viper from 1.10.0 to 1.10.1 (#7468)
Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.10.0 to 1.10.1.
- [Release notes](https://github.com/spf13/viper/releases)
- [Commits](https://github.com/spf13/viper/compare/v1.10.0...v1.10.1)

---
updated-dependencies:
- dependency-name: github.com/spf13/viper
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-17 12:15:46 -05:00
dependabot[bot]
44afb4f61a build(deps): Bump github.com/rs/zerolog from 1.26.0 to 1.26.1 (#7467)
Bumps [github.com/rs/zerolog](https://github.com/rs/zerolog) from 1.26.0 to 1.26.1.
- [Release notes](https://github.com/rs/zerolog/releases)
- [Commits](https://github.com/rs/zerolog/compare/v1.26.0...v1.26.1)

---
updated-dependencies:
- dependency-name: github.com/rs/zerolog
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-17 08:25:10 -05:00
dependabot[bot]
ca857805a3 build(deps): Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 (#7457)
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.2.1 to 1.3.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md)
- [Commits](https://github.com/spf13/cobra/compare/v1.2.1...v1.3.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-15 12:25:19 -05:00
dependabot[bot]
08bf5db85c build(deps): Bump google.golang.org/grpc from 1.42.0 to 1.43.0 (#7458)
* build(deps): Bump google.golang.org/grpc from 1.42.0 to 1.43.0

Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.42.0 to 1.43.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.42.0...v1.43.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Fix deprecated usage.

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-12-15 08:07:49 -08:00
mergify[bot]
8bbec93c77 Fix broken documentation link. (#7439) (#7440)
A follow-up to #7416 and #7412.

(cherry picked from commit a872dd75b7)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-12-14 09:30:32 +01:00
dependabot[bot]
8bcec21f54 build(deps): Bump github.com/adlio/schema from 1.2.2 to 1.2.3 (#7436)
Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.2.2 to 1.2.3.
- [Release notes](https://github.com/adlio/schema/releases)
- [Commits](https://github.com/adlio/schema/compare/v1.2.2...v1.2.3)

---
updated-dependencies:
- dependency-name: github.com/adlio/schema
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-13 06:23:50 -08:00
dependabot[bot]
540aea1030 build(deps): Bump github.com/spf13/viper from 1.9.0 to 1.10.0 (#7435)
Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.9.0 to 1.10.0.
- [Release notes](https://github.com/spf13/viper/releases)
- [Commits](https://github.com/spf13/viper/compare/v1.9.0...v1.10.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/viper
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-13 08:47:22 -05:00
dependabot[bot]
22b0b67c63 build(deps): Bump github.com/adlio/schema from 1.1.15 to 1.2.2 (#7422)
* build(deps): Bump github.com/adlio/schema from 1.1.15 to 1.2.2

Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.1.15 to 1.2.2.
- [Release notes](https://github.com/adlio/schema/releases)
- [Commits](https://github.com/adlio/schema/compare/v1.1.15...v1.2.2)

---
updated-dependencies:
- dependency-name: github.com/adlio/schema
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Work around API changes in the migrator package.

A recent update inadvertently broke the API by changing the receiver types of
the methods without updating the constructor.

See: https://github.com/adlio/schema/issues/13

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-12-10 11:34:26 -08:00
mergify[bot]
1919a93708 internal/libs/protoio: optimize MarshalDelimited by plain byteslice allocations+sync.Pool (#7325) (#7426)
Noticed in profiles that invoking *VoteSignBytes always created a
bytes.Buffer, then discarded it inside protoio.MarshalDelimited.
I dug further and examined the call paths and noticed that we
unconditionally create the bytes.Buffer, even though we might
have proto messages (in the common case) that implement
MarshalTo([]byte), and invoked varintWriter. Instead by inlining
this case, we skip a bunch of allocations and CPU cycles,
which then reflects properly on all calling functions. Here
are the benchmark results:

```shell
$ benchstat before.txt after.txt
name                                        old time/op    new time/op      delta
types.VoteSignBytes-8                       705ns ± 3%     573ns ± 6%       -18.74% (p=0.000 n=18+20)
types.CommitVoteSignBytes-8                 8.15µs ± 9%    6.81µs ± 4%      -16.51% (p=0.000 n=20+19)
protoio.MarshalDelimitedWithMarshalTo-8     788ns ± 8%     772ns ± 3%       -2.01%  (p=0.050 n=20+20)
protoio.MarshalDelimitedNoMarshalTo-8       989ns ± 4%     845ns ± 2%       -14.51% (p=0.000 n=20+18)

name                                        old alloc/op   new alloc/op    delta
types.VoteSignBytes-8                       792B ± 0%      600B ± 0%       -24.24%  (p=0.000 n=20+20)
types.CommitVoteSignBytes-8                 9.52kB ± 0%    7.60kB ± 0%     -20.17%  (p=0.000 n=20+20)
protoio.MarshalDelimitedNoMarshalTo-8       808B ± 0%      440B ± 0%       -45.54%  (p=0.000 n=20+20)

name                                        old allocs/op  new allocs/op   delta
types.VoteSignBytes-8                       13.0 ± 0%      10.0 ± 0%       -23.08%  (p=0.000 n=20+20)
types.CommitVoteSignBytes-8                 140 ± 0%       110 ± 0%        -21.43%  (p=0.000 n=20+20)
protoio.MarshalDelimitedNoMarshalTo-8       10.0 ± 0%      7.0 ± 0%        -30.00%  (p=0.000 n=20+20)
```

Thanks to Tharsis who tasked me to help them increase TPS and who
are keen on improving Tendermint and efficiency.

(cherry picked from commit 3e92899bd9)

Co-authored-by: Emmanuel T Odeke <emmanuel@orijtech.com>
2021-12-10 10:05:26 -08:00
mergify[bot]
ed164e4272 Fix links to Terraform/Ansible documentation. (backport #7416) (#7417)
* Fix link to Terraform/Ansible documentation. (#7416)

(cherry picked from commit f79b77036f)
2021-12-09 08:21:58 -08:00
dependabot[bot]
cd7d91c389 build(deps): Bump github.com/adlio/schema from 1.1.14 to 1.1.15 (#7406)
Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.1.14 to 1.1.15.
- [Release notes](https://github.com/adlio/schema/releases)
- [Commits](https://github.com/adlio/schema/compare/v1.1.14...v1.1.15)

---
updated-dependencies:
- dependency-name: github.com/adlio/schema
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-08 09:03:24 -05:00
mergify[bot]
c6fadc1e1f cmd: cosmetic changes for errors and print statements (#7377) (#7408) 2021-12-08 12:09:47 +01:00
mergify[bot]
0b3764e3e3 ci: skip docker image builds during PRs (#7397) (#7398)
(cherry picked from commit 0b3e00a6b5)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-12-07 10:57:58 -05:00
mergify[bot]
ab7de40d1d ci: cleanup build/test targets (backport #7393) (#7395) 2021-12-07 10:39:52 -05:00
mergify[bot]
df47c41921 Update Mergify configuration. (backport #7388) (#7390)
Per https://docs.mergify.com/actions/merge/#commit-message, the
commit_message option is deprecated and will be removed in 2022.
Replace it with the template suggested here:

https://docs.mergify.com/actions/queue/

(cherry picked from commit 02d456b8b8)
2021-12-06 13:34:05 -08:00
mergify[bot]
ec2361eaf9 Update mergify configuration. (backport #7385) (#7387)
Per https://blog.mergify.com/strict-mode-deprecation/, the strict mode
has been deprecated and will be turned off on 10-Jan-2022. This updates
the config to use the new, approved thing instead of the old thing.

(cherry picked from commit 2d4844f97f)
2021-12-06 12:42:33 -08:00
Sam Kleinman
9978a3c99b ci: move test execution to makefile (#7372) (#7374)
(cherry picked from commit b3be1d7d7a)
2021-12-02 17:25:13 -05:00
mergify[bot]
b4396d79f2 rpc: implement header and header_by_hash queries (backport #7270) (#7367) 2021-12-02 14:13:48 +01:00
Sam Kleinman
a3021be8e6 e2e: limit legacyp2p and statesyncp2p (#7361) 2021-12-01 16:22:17 -05:00
mergify[bot]
657afc7322 Remove now-unused nolint:lll directives. (backport #7356) (#7358)
(cherry picked from commit 76dea94a01)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-11-30 17:01:51 -05:00
Sam Kleinman
4bc36c8ba9 e2e: app hash test cleanup (0.35 backport) (#7350) 2021-11-30 16:42:01 -05:00
mergify[bot]
260f4250b0 Fix incorrect tests using the PSQL sink. (backport #7349) (#7355)
Some of our tests were creating a psql event sink and expecting
it to report (or not report) certain kinds of errors. These tests
were ill-founded in a couple of ways:

1. Tests that required the Postgres driver were not loading it.
   This led to spurious successes on tests that wanted "some error"
   from the sink constructor, but didn't exercise the right path.

2. Tests that wanted a Postgres sink to succeed without a database.
   These tests "passed" because they weren't actually establishing a
   connection to the database, but if they had would have failed for
   the lack of one.

To fix this:
- Load the postgres driver in tests that need it.
- Verify connectivity before reporting successful creation of a PSQL event sink.
- Remove tests that wanted a psql sink without a database, since that case
  is already tested elsewhere.

(cherry picked from commit ab1788b922)
2021-11-30 13:39:07 -08:00
mergify[bot]
44c8b410f7 lint: remove lll check (#7346) (#7357)
(cherry picked from commit 502f92bb97)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-11-30 16:36:56 -05:00
mergify[bot]
bd5a7428b9 e2e: generate keys for more stable load (#7344) (#7353)
(cherry picked from commit babd3acb70)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-11-30 15:52:42 -05:00
mergify[bot]
151103042a e2e: clarify apphash reporting (#7348) (#7352)
(cherry picked from commit 24dcba9230)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-11-30 15:16:38 -05:00
mergify[bot]
ebe57960f7 e2e: stabilize validator update form (#7340) (#7351)
This might be a source of non-determinism in the e2e test.

(cherry picked from commit c4033f95c1)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-11-30 14:53:40 -05:00
William Banfield
7bf7ff6639 internal/proxy: add initial set of abci metrics backport (#7342)
* internal/proxy: add initial set of abci metrics (#7115)

This PR adds an initial set of metrics for use ABCI. The initial metrics enable the calculation of timing histograms and call counts for each of the ABCI methods. The metrics are also labeled as either 'sync' or 'async' to determine if the method call was performed using ABCI's `*Async` methods.

An example of these metrics is included here for reference:
```
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.0001"} 0
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.0004"} 5
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.002"} 12
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.009"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.02"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.1"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.65"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="2"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="6"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="25"} 13
tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="+Inf"} 13
tendermint_abci_connection_method_timing_sum{chain_id="ci",method="commit",type="sync"} 0.007802058000000001
tendermint_abci_connection_method_timing_count{chain_id="ci",method="commit",type="sync"} 13
```

These metrics can easily be graphed using prometheus's `histogram_quantile(...)` method to pick out a particular quantile to graph or examine. I chose buckets that were somewhat of an estimate of expected range of times for ABCI operations. They start at .0001 seconds and range to 25 seconds. The hope is that this range captures enough possible times to be useful for us and operators.

* lint++

* docs: add abci timing metrics to the metrics docs (#7311)

* cherry-pick fixup
2021-11-30 10:48:07 -05:00
M. J. Fromberger
1c1ce83e2d Performance improvements for the event query API (#7338)
A manual backport of #7319 and #7336.
2021-11-30 06:23:27 -08:00
mergify[bot]
3e41def0eb docs: go tutorial fixed for 0.35.0 version (#7329) (#7330) (#7331)
(cherry picked from commit a36dd49eae)

Co-authored-by: Piotr Pędziwiatr <84311757+ppedziwiatr@users.noreply.github.com>
2021-11-27 09:27:16 -08:00
mergify[bot]
333d7f7068 Update example code in OpenAPI docs. (#7318) (#7322)
The event examples for the query filter language were not updated after the
change of key and value types from []byte to string. Also, the attributes need
to be a slice not a bare value.

(cherry picked from commit da3449599f)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-11-25 08:33:07 -08:00
M. J. Fromberger
778be06de8 pubsub: Report a non-nil error when shutting down. (#7310)
If a subscriber arrives while the pubsub service is shutting down, the existing
code will return a nil subscription without error. With unlucky timing, this
may lead to a nil indirection panic in the RPC service.

To avoid that problem, make sure that when a subscription fails for this
reason, we report a non-nil error so that the client will detect it and give up
gracefully.
2021-11-23 12:26:22 -08:00
M. J. Fromberger
a97b081df1 Partial backport of protobuf generation changes. (#7302)
This is a manual backport of the changes to how we build and run the protobuf
toolchain images in Docker. The main effect here is to point to the new image
from ghcr.io/tendermint/docker-proto-builder, but to make that work it is also
necessary to update some of the branch pointers.

This change does NOT include the changes from #7269 and #7291 to point to the
proto files in the spec repo. To do that, we will need to create a branch or
tag on the spec that has the released version, which does not exist in the spec
history as it currently stands.
2021-11-22 12:03:24 -08:00
dependabot[bot]
b506801b2a build(deps): Bump github.com/tendermint/tm-db from 0.6.4 to 0.6.6 (#7285) 2021-11-16 17:29:09 +01:00
mergify[bot]
97f888ea30 Add v0.35 to the configs for building the docs website. (#7055) (#7278)
(cherry picked from commit b3b1279d1f)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-11-14 20:46:18 -08:00
Thane Thomson
035da42a91 rpc: backport experimental buffer size control parameters from #7230 (tm v0.35.x) (#7276)
* Update error message to correspond to changes in v0.34.x
* Add buffer size and client-close config parameters

Signed-off-by: Thane Thomson <connect@thanethomson.com>
2021-11-13 13:48:45 -08:00
mergify[bot]
37e0779d6d p2p: reduce peer score for dial failures (backport #7265) (#7271)
* p2p: reduce peer score for dial failures (#7265)

When dialing fails to succeed we should reduce the score of the peer,
which puts the peer at (potentially) greater chances of being removed
from the peer manager, and reduces the chance of the peer being
gossiped by the PEX reactor.

(cherry picked from commit 27560cf7a4)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-11-10 11:10:06 -05:00
mergify[bot]
38f9078435 evidence: remove source of non-determinism from test (#7266) (#7268)
The evidence test produces a set of mock evidence in the evidence pool of the 'Primary' node. The test then fills the evidence pools of secondaries with half of this mock evidence. Finally, the test waits until the secondary has an evidence pool as full as the primary.

The assertions that are removed here were checking that the primary and secondaries' evidence channels were empty. However, nothing in the test actually ensures that the channels are empty. The test only waits for the secondaries to have received the complete set of evidence, and the secondaries already received half of the evidence at the beginning. It's more than possible that the secondaries can receive the complete set of evidence and not finish reading the duplicate evidence off the channels.

(cherry picked from commit 4acd117b5e)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
2021-11-09 16:36:00 -05:00
mergify[bot]
052b08160a Set a cap on the length of subscription queries. (backport #7263) (#7264)
As a safety measure, don't allow a query string to be unreasonably
long. The query filter is not especially efficient, so a query that
needs more than basic detail should filter coarsely in the subscriber
and refine on the client side.

This affects Subscribe and TxSearch queries.

(cherry picked from commit 9dc3d7f9a2)
2021-11-09 19:53:44 +01:00
mergify[bot]
4a664931b4 consensus: add some more checks to vote counting (#7253) (#7262)
(cherry picked from commit b3b90f820c)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2021-11-09 10:47:38 -05:00
dependabot[bot]
cf018baa88 build(deps): Bump github.com/lib/pq from 1.10.3 to 1.10.4 (#7260)
Bumps [github.com/lib/pq](https://github.com/lib/pq) from 1.10.3 to 1.10.4.
- [Release notes](https://github.com/lib/pq/releases)
- [Commits](https://github.com/lib/pq/compare/v1.10.3...v1.10.4)

---
updated-dependencies:
- dependency-name: github.com/lib/pq
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-11-09 08:09:35 -05:00
M. J. Fromberger
643a3f56f6 backport: Add basic metrics to the indexer package. (#7250) (#7252) 2021-11-08 06:48:38 -08:00
dependabot[bot]
1c5bb6e921 build(deps): Bump google.golang.org/grpc from 1.41.0 to 1.42.0 (#7218)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.41.0 to 1.42.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.41.0...v1.42.0)

This required a patch to Unix-domain socket addresses.
Per https://github.com/grpc/grpc/blob/master/doc/naming.md, the socket path
using the unix://... address format must be absolute. This recently started
being enforced in the library. That change was not documented.

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-11-05 19:11:09 -07:00
M. J. Fromberger
97a3e44e07 Prepare CHANGELOG and config settings for v0.35.0 release. (#7228) 2021-11-04 08:22:39 -07:00
mergify[bot]
d021d068da docs: add upgrading info about node service (#7241) (#7242) 2021-11-04 15:10:15 +01:00
Sam Kleinman
d59565d050 pex: avoid starting reactor twice (#7239) 2021-11-04 07:58:26 -04:00
Sam Kleinman
003d15fa4b lint: cleanup branch lint errors (#7238) 2021-11-04 07:44:13 -04:00
dependabot[bot]
8629d31de3 build(deps): Bump github.com/rs/zerolog from 1.25.0 to 1.26.0 (#7222)
Bumps [github.com/rs/zerolog](https://github.com/rs/zerolog) from 1.25.0 to 1.26.0.
- [Release notes](https://github.com/rs/zerolog/releases)
- [Commits](https://github.com/rs/zerolog/compare/v1.25.0...v1.26.0)

---
updated-dependencies:
- dependency-name: github.com/rs/zerolog
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-11-03 12:28:52 -04:00
dependabot[bot]
113bebb314 build(deps): Bump github.com/golangci/golangci-lint (#7224)
Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.42.1 to 1.43.0.
- [Release notes](https://github.com/golangci/golangci-lint/releases)
- [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md)
- [Commits](https://github.com/golangci/golangci-lint/compare/v1.42.1...v1.43.0)

---
updated-dependencies:
- dependency-name: github.com/golangci/golangci-lint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-11-03 11:19:06 -04:00
Sam Kleinman
5591d08b4a ci: backport lint configuration changes (#7226)
(cherry picked from commit 6f66c60397)
2021-11-03 15:41:41 +01:00
dependabot[bot]
56607d406b build(deps): Bump github.com/adlio/schema from 1.1.13 to 1.1.14 (#7217)
Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.1.13 to 1.1.14.
- [Release notes](https://github.com/adlio/schema/releases)
- [Commits](https://github.com/adlio/schema/compare/v1.1.13...v1.1.14)

---
updated-dependencies:
- dependency-name: github.com/adlio/schema
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-11-03 10:22:33 -04:00
mergify[bot]
5fca090e6a pex: allow disabled pex reactor (backport #7198) (#7201)
This ensures the implementation respects disabling the pex reactor.

(cherry picked from commit ffcd347ef6)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-11-03 09:41:11 -04:00
M. J. Fromberger
3e9ecd8197 Prepare changelog for v0.35.0-rc4. (#7181) 2021-10-29 09:57:23 -07:00
Sam Kleinman
e40a8468a4 config: backport file writing changes (#7182) 2021-10-29 06:38:52 -04:00
M. J. Fromberger
85086d7452 Fix metric cardinality left over from backport (#7180)
One of the patched uses in #7161 missed the message type field,
triggering panic failures from Prometheus.
2021-10-28 15:29:53 -07:00
mergify[bot]
8314f24d79 pubsub: Use distinct client IDs for test subscriptions. (#7178) (#7179)
Fixes #7176. Some of the benchmarks create a bunch of different subscriptions all sharing the same query. These were all using the same client ID, which violates one of the subscriber rules. Ensure each subscriber gets a unique ID.

This has been broken as long as this library has been in the repo—I tracked it back to bb9aa85d and it was already failing there, so I think this never really worked. I'm not sure these test anything useful, but at least now they run.

(cherry picked from commit 1fd7060542)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-10-28 05:59:39 -04:00
mergify[bot]
dd1471da91 p2p: add message type into the send/recv bytes metrics (backport #7155) (#7161)
* p2p: add message type into the send/recv bytes metrics (#7155)

This pull request adds a new "mesage_type" label to the send/recv bytes metrics calculated in the p2p code.

Below is a snippet of the updated metrics that includes the updated label:
```
tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_HasVote",peer_id="2551a13ed720101b271a5df4816d1e4b3d3bd133"} 652
tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_HasVote",peer_id="4b1068420ef739db63377250553562b9a978708a"} 631
tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_HasVote",peer_id="927c50a5e508c747830ce3ba64a3f70fdda58ef2"} 631
tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_NewRoundStep",peer_id="2551a13ed720101b271a5df4816d1e4b3d3bd133"} 393
tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_NewRoundStep",peer_id="4b1068420ef739db63377250553562b9a978708a"} 357
tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_NewRoundStep",peer_id="927c50a5e508c747830ce3ba64a3f70fdda58ef2"} 386
```

(cherry picked from commit b4bc6bb4e8)
2021-10-27 07:34:24 -04:00
mergify[bot]
c6d62cc8b2 docs: fix broken links and layout (#7154) (#7163)
This PR does a few minor touch ups to the docs

(cherry picked from commit ce89292712)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2021-10-27 05:14:35 -04:00
mergify[bot]
ce6014ddf5 docs: add reactor sections (backport #6510) (#7151) 2021-10-22 18:29:33 +02:00
mergify[bot]
e62a75b627 state: add height assertion to rollback function (#7143) (#7148)
(cherry picked from commit a8ff617773)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2021-10-21 18:07:51 +02:00
mergify[bot]
dbc72e0d69 mempool: remove panic when recheck-tx was not sent to ABCI application (#7134) (#7142)
This pull request fixes a panic that exists in both mempools. The panic occurs when the ABCI client misses a response from the ABCI application. This happen when the ABCI client drops the request as a result of a full client queue. The fix here was to loop through the ordered list of recheck-tx in the callback until one matches the currently observed recheck request.

(cherry picked from commit b0130c88fb)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
2021-10-19 10:21:47 -04:00
mergify[bot]
57e4e18ba3 build: Fix build-docker to include the full context. (#7114) (#7116)
Fixes #7068. The build-docker rule relies on being able to run make
build-linux, but did not pull the Makefile into the build context.
There are various ways to fix this, but this was probably the smallest.

(cherry picked from commit 6538776e6a)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-10-12 16:50:34 -07:00
mergify[bot]
b7fe214b81 Revert "abci: change client to use multi-reader mutexes (#6306)" (backport #7106) (#7110)
* Revert "abci: change client to use multi-reader mutexes (#6306)" (#7106)

This reverts commit 1c4dbe30d4.

(cherry picked from commit 34a3fcd8fc)
2021-10-12 12:03:00 -04:00
mergify[bot]
66e8eec194 light: Update links in package docs. (#7099) (#7101)
Fixes #7098. The light client documentation moved to the spec repository.

I was not able to figure out what happened to light-client-protocol.md, it was removed in #5252 but no corresponding file exists in the spec repository. Since the spec also discusses the protocol, this change simply links to the spec and removes the non-functional reference.

Alternatively we could link to the top-level [light client doc](https://docs.tendermint.com/master/tendermint-core/light-client.html) if you think that's better.

(cherry picked from commit 48295955ed)

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2021-10-11 19:49:25 -07:00
mergify[bot]
22e33aba98 e2e: light nodes should use builtin abci app (#7095) (#7097)
(cherry picked from commit befd669794)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-10-09 00:32:53 -04:00
mergify[bot]
af85f7e917 e2e: abci protocol should be consistent across networks (#7078) (#7086)
It seems weird in retrospect that we allow networks to contain
applications that use different ABCI protocols.

(cherry picked from commit f2a8f5e054)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-10-08 10:15:15 -04:00
mergify[bot]
f0cd54825f cli: allow node operator to rollback last state (backport #7033) (#7081) 2021-10-08 09:56:18 +02:00
M. J. Fromberger
98bc4f0e2b Update changelog for v0.35.0-rc3. (#7074) 2021-10-06 11:19:26 -07:00
mergify[bot]
bff85fc07b mempool,rpc: add removetx rpc method (#7047) (#7065)
Addresses one of the concerns with #7041.

Provides a mechanism (via the RPC interface) to delete a single transaction, described by its hash, from the mempool. The method returns an error if the transaction cannot be found. Once the transaction is removed it remains in the cache and cannot be resubmitted until the cache is cleared or it expires from the cache.

(cherry picked from commit 851d2e3bde)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-10-05 16:36:21 -04:00
mergify[bot]
4a952885c5 e2e: automatically prune old app snapshots (#7034) (#7063)
This PR tackles the case of using the e2e application in a long lived testnet. The application continually saves snapshots (usually every 100 blocks) which after a while bloats the size of the application. This PR prunes older snapshots so that only the most recent 10 snapshots remain.

(cherry picked from commit 5703ae2fb3)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2021-10-05 15:26:08 -04:00
William Banfield
42ed5d75a5 consensus: wait until peerUpdates channel is closed to close remaining peers (#7058) (#7060)
The race occurred as a result of a goroutine launched by `processPeerUpdate` racing with the `OnStop` method. The `processPeerUpdates` goroutine deletes from the map as `OnStop` is reading from it. This change updates the `OnStop` method to wait for the peer updates channel to be done before closing the peers. It also copies the map contents to a new map so that it will not conflict with the view of the map that the goroutine created in `processPeerUpdate` sees.
2021-10-05 10:49:26 -04:00
M. J. Fromberger
be684091ae Revert "Consolidate related changelog entries. (#7056)" (#7061)
This reverts commits:
  c16cd72c0a
  6ef847fdfe

We decided on another release candidate to sort out SDK merge issues.
2021-10-05 06:38:23 -07:00
M. J. Fromberger
c16cd72c0a Consolidate related changelog entries. (#7056) 2021-10-04 14:05:25 -07:00
M. J. Fromberger
6ef847fdfe Consolidate release candidate changelogs for v0.35. (#7052) 2021-10-04 12:22:32 -07:00
M. J. Fromberger
f361ce09b3 Update Go toolchains to 1.17 in Actions workflows. (#7049) 2021-10-04 15:40:50 +00:00
William Banfield
243c62cc68 statesync: improve rare p2p race condition (#7042)
This is intended to fix a test failure that occurs in the p2p state provider. The issue presents as the state provider timing out waiting for the consensus params response. 

The reason that this can occur is because the statesync reactor has the possibility of attempting to respond to the params request before the state provider is ready to read it. This results in the reactor hitting the `default` case seen here and then never sending on the channel. The stateprovider will then block waiting for a response and never receive one because the reactor opted not to send it.
2021-10-01 20:33:12 +00:00
William Banfield
177850a2c9 statesync: remove deadlock on init fail (#7029)
When statesync is stopped during shutdown, it has the possibility of deadlocking. A dump of goroutines reveals that this is related to the peerUpdates channel not returning anything on its `Done()` channel when `OnStop` is called. As this is occuring, `processPeerUpdate` is attempting to acquire the reactor lock. It appears that this lock can never be acquired. I looked for the places where the lock may remain locked accidentally and cleaned them up in hopes to eradicate the issue. Dumps of the relevant goroutines may be found below. Note that the line numbers below are relative to the code in the `v0.35.0-rc1` tag.

```
goroutine 36 [chan receive]:
github.com/tendermint/tendermint/internal/statesync.(*Reactor).OnStop(0xc00058f200)
        github.com/tendermint/tendermint/internal/statesync/reactor.go:243 +0x117
github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc00058f200, 0x0, 0x0)
        github.com/tendermint/tendermint/libs/service/service.go:171 +0x323
github.com/tendermint/tendermint/node.(*nodeImpl).OnStop(0xc0001ea240)
        github.com/tendermint/tendermint/node/node.go:769 +0x132
github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc0001ea240, 0x0, 0x0)
        github.com/tendermint/tendermint/libs/service/service.go:171 +0x323
github.com/tendermint/tendermint/cmd/tendermint/commands.NewRunNodeCmd.func1.1()
        github.com/tendermint/tendermint/cmd/tendermint/commands/run_node.go:143 +0x62
github.com/tendermint/tendermint/libs/os.TrapSignal.func1(0xc000629500, 0x7fdb52f96358, 0xc0002b5030, 0xc00000daa0)
        github.com/tendermint/tendermint/libs/os/os.go:26 +0x102
created by github.com/tendermint/tendermint/libs/os.TrapSignal
        github.com/tendermint/tendermint/libs/os/os.go:22 +0xe6

goroutine 188 [semacquire]:
sync.runtime_SemacquireMutex(0xc00026b1cc, 0x0, 0x1)
        runtime/sema.go:71 +0x47
sync.(*Mutex).lockSlow(0xc00026b1c8)
        sync/mutex.go:138 +0x105
sync.(*Mutex).Lock(...)
        sync/mutex.go:81
sync.(*RWMutex).Lock(0xc00026b1c8)
        sync/rwmutex.go:111 +0x90
github.com/tendermint/tendermint/internal/statesync.(*Reactor).processPeerUpdate(0xc00026b080, 0xc000650008, 0x28, 0x124de90, 0x4)
        github.com/tendermint/tendermint/internal/statesync/reactor.go:849 +0x1a5
github.com/tendermint/tendermint/internal/statesync.(*Reactor).processPeerUpdates(0xc00026b080)
        github.com/tendermint/tendermint/internal/statesync/reactor.go:883 +0xab
created by github.com/tendermint/tendermint/internal/statesync.(*Reactor.OnStart
        github.com/tendermint/tendermint/internal/statesync/reactor.go:219 +0xcd)
```
2021-09-30 19:19:10 +00:00
M. J. Fromberger
bdd815ebc9 Align atomic struct field for compatibility in 32-bit ABIs. (#7037)
The layout of struct fields means that interior fields may not be properly
aligned for 64-bit access.

Fixes #7000.
2021-09-30 10:53:05 -07:00
M. J. Fromberger
77052370cc Update default config template to match mapstructure keys. (#7036)
Fix a couple of cases where we updated the keys in the config reader, but
forgot to update some of their uses in the default template.

Fixes #7031.
2021-09-30 17:13:32 +00:00
William Banfield
6a0d9c832a blocksync: fix shutdown deadlock issue (#7030)
When shutting down blocksync, it is observed that the process can hang completely. A dump of running goroutines reveals that this is due to goroutines not listening on the correct shutdown signal. Namely, the `poolRoutine` goroutine does not wait on `pool.Quit`. The `poolRoutine` does not receive any other shutdown signal during `OnStop` becuase it must stop before the `r.closeCh` is closed. Currently the `poolRoutine` listens in the `closeCh` which will not close until the `poolRoutine` stops and calls `poolWG.Done()`.

This change also puts the `requestRoutine()` in the `OnStart` method to make it more visible since it does not rely on anything that is spawned in the `poolRoutine`.

```
goroutine 183 [semacquire]:
sync.runtime_Semacquire(0xc0000d3bd8)
        runtime/sema.go:56 +0x45
sync.(*WaitGroup).Wait(0xc0000d3bd0)
        sync/waitgroup.go:130 +0x65
github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStop(0xc0000d3a00)
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:193 +0x47
github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc0000d3a00, 0x0, 0x0)
        github.com/tendermint/tendermint/libs/service/service.go:171 +0x323
github.com/tendermint/tendermint/node.(*nodeImpl).OnStop(0xc00052c000)
        github.com/tendermint/tendermint/node/node.go:758 +0xc62
github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc00052c000, 0x0, 0x0)
        github.com/tendermint/tendermint/libs/service/service.go:171 +0x323
github.com/tendermint/tendermint/cmd/tendermint/commands.NewRunNodeCmd.func1.1()
        github.com/tendermint/tendermint/cmd/tendermint/commands/run_node.go:143 +0x62
github.com/tendermint/tendermint/libs/os.TrapSignal.func1(0xc000df6d20, 0x7f04a68da900, 0xc0004a8930, 0xc0005a72d8)
        github.com/tendermint/tendermint/libs/os/os.go:26 +0x102
created by github.com/tendermint/tendermint/libs/os.TrapSignal
        github.com/tendermint/tendermint/libs/os/os.go:22 +0xe6


goroutine 161 [select]:
github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).poolRoutine(0xc0000d3a00, 0x0)
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:464 +0x2b3
created by github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStart
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:174 +0xf1

goroutine 162 [select]:
github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).processBlockSyncCh(0xc0000d3a00)
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:310 +0x151
created by github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStart
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:177 +0x54

goroutine 163 [select]:
github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).processPeerUpdates(0xc0000d3a00)
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:363 +0x12b
created by github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStart
        github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:178 +0x76
```
2021-09-30 16:19:18 +00:00
Tess Rinearson
c9d92f5f19 .github: remove tessr and bez from codeowners (#7028) 2021-09-29 22:02:28 +02:00
Sam Kleinman
23fe6fd2f9 statesync: ensure test network properly configured (#7026)
This test reliably gets hung up on network configuration, (which may
be a real issue,) but it's network setup is handcranked and we should
ensure that the test focuses on it's core assertions and doesn't fail for 
test architecture reasons.
2021-09-29 16:38:27 +00:00
M. J. Fromberger
962caeae65 Make doc site index default to the latest release (#7023)
Fix the order of lines in docs/versions so that v0.34 is last (the current release).

Related changes:

- Update docs/DOCS_README.md to reflect the current state of how we publish the site.
- Fix the build-docs target in Makefile to not perturb the package-lock.json during the build.
- Fix the Makefile rule to not clobber package-lock.json.
2021-09-29 08:38:52 -07:00
Sam Kleinman
8758078786 consensus: avoid unbuffered channel in state test (#7025) 2021-09-29 11:19:34 -04:00
Sam Kleinman
b1dfbb8bc3 e2e: generator ensure p2p modes (#7021) 2021-09-28 17:04:37 -04:00
Sam Kleinman
c18470a5f1 e2e: use network size in load generator (#7019) 2021-09-28 16:47:35 -04:00
M. J. Fromberger
ea539dcb98 Update changelog for v0.35.0-rc2. (#7011)
Also, relinkify and update the bounty URL.
2021-09-28 07:59:47 -07:00
Sam Kleinman
e35a42fc68 e2e: use smaller transactions (#7016)
75% of the failures in the last run all ran with the 10kb
transactions. I'd like to dial it back and see if things improve more.
2021-09-28 14:39:26 +00:00
lklimek
1bd1593f20 fix: race condition in p2p_switch and pex_reactor (#7015)
Closes https://github.com/tendermint/tendermint/issues/7014
2021-09-28 09:32:14 -04:00
Sam Kleinman
6be36613c9 e2e: reduce number of stateless nodes in test networks (#7010) 2021-09-27 17:00:05 -04:00
Sam Kleinman
9a16d930c6 statesync: add logging while waiting for peers (#7007) 2021-09-27 16:46:40 -04:00
Sam Kleinman
8023a2aeef e2e: add generator tests (#7008) 2021-09-27 15:38:03 -04:00
Sam Kleinman
6eaa3b24d6 ci: use cheaper codecov data collection (#7009) 2021-09-27 15:22:25 -04:00
Sam Kleinman
b150ea6b3e e2e: avoid seed nodes when statesyncing (#7006) 2021-09-27 14:08:08 -04:00
Sam Kleinman
b879f71e8e e2e: reduce log noise (#7004) 2021-09-27 13:27:08 -04:00
dependabot[bot]
bce7c2f73b build(deps): Bump google.golang.org/grpc from 1.40.0 to 1.41.0 (#7003) 2021-09-27 17:12:56 +02:00
Callum Waters
60a6c6fb1a e2e: allow running of single node using the e2e app (#6982) 2021-09-27 15:43:07 +02:00
Sam Kleinman
fb9eaf576a e2e: improve chances of statesyncing success (#7001)
This reduces this situation where a node will get stuck block syncing,
which seemed to happen a lot in last nights run.
2021-09-26 16:10:36 +00:00
Sam Kleinman
37ca98a544 e2e: reduce number of statesyncs in test networks (#6999) 2021-09-25 19:14:38 -04:00
Sam Kleinman
c101fa17ab e2e: add limit and sort to generator (#6998)
I observed a couple of problems with the generator in some recent tests: 

- there were a couple of hybrid test cases which did not have any
  legacy nodes (randomness and all.) I change the probability to
  produce more reliable results.

- added options to the generation to be able to add a max (to
  compliment the earlier min) number of nodes for local testing. 

- added an option to support reversing the sort order so "more
  complex" networks were first, as well as tweaked some of the point
  values. 

- this refactored the generators cli parsing to be a bit more clear.
2021-09-25 15:53:04 +00:00
M. J. Fromberger
118bfe2087 abci: Flush socket requests and responses immediately. (#6997)
The main effect of this change is to flush the socket client and server message
encoding buffers immediately once the message is fully and correctly encoded.
This allows us to remove the timer and some other special cases, without
changing the observed behaviour of the system.

-- Background

The socket protocol client and server each use a buffered writer to encode
request and response messages onto the underlying connection. This reduces the
possibility of a single message being split across multiple writes, but has the
side-effect that a request may remain buffered for some time.

The implementation worked around this by keeping a ticker that occasionally
triggers a flush, and by flushing the writer in response to an explicit request
baked into the client/server protocol (see also #6994).

These workarounds are both unnecessary: Once a message has been dequeued for
sending and fully encoded in wire format, there is no real use keeping all or
part of it buffered locally.  Moreover, using an asynchronous process to flush
the buffer makes the round-trip performance of the request unpredictable.

-- Benchmarks

Code: https://play.golang.org/p/0ChUOxJOiHt

I found no pre-existing performance benchmarks to justify the flush pattern,
but a natural question is whether this will significantly harm client/server
performance.  To test this, I implemented a simple benchmark that transfers
randomly-sized byte buffers from a no-op "client" to a no-op "server" over a
Unix-domain socket, using a buffered writer, both with and without explicit
flushes after each write.

As the following data show, flushing every time (FLUSH=true) does reduce raw
throughput, but not by a significant amount except for very small request
sizes, where the transfer time is already trivial (1.9μs).  Given that the
client is calibrated for 1MiB transactions, the overhead is not meaningful.

The percentage in each section is the speedup for flushing only when the buffer
is full, relative to flushing every block.  The benchmark uses the default
buffer size (4096 bytes), which is the same value used by the socket client and
server implementation:

  FLUSH  NBLOCKS  MAX      AVG     TOTAL       ELAPSED       TIME/BLOCK
  false  3957471  512      255     1011165416  2.00018873s   505ns
  true   1068568  512      255     273064368   2.000217051s  1.871µs
                                                             (73%)

  false  536096   4096     2048    1098066401  2.000229108s  3.731µs
  true   477911   4096     2047    978746731   2.000177825s  4.185µs
                                                             (10.8%)

  false  124595   16384    8181    1019340160  2.000235086s  16.053µs
  true   120995   16384    8179    989703064   2.000329349s  16.532µs
                                                             (2.9%)

  false  2114     1048576  525693  1111316541  2.000479928s  946.3µs
  true   2083     1048576  526379  1096449173  2.001817137s  961.025µs
                                                             (1.5%)

Note also that the FLUSH=false baseline is actually faster than the production
code, which flushes more often than is required by the buffer filling up.

Moreover, the timer slows down the overall transaction rate of the client and
server, indepenedent of how fast the socket transfer is, so the loss on a real
workload is probably much less.
2021-09-24 15:37:25 -07:00
Sam Kleinman
71c6682b57 statesync: clean up reactor/syncer lifecylce (#6995)
I've been noticing that there are a number of situations where the
statesync reactor blocks waiting for peers (or similar,) I've moved
things around to improve outcomes in local tests.
2021-09-24 21:40:12 +00:00
Sam Kleinman
dbad701515 ci: use smart merges (#6993) 2021-09-24 14:59:10 -04:00
Sam Kleinman
5e45676875 e2e: do not inject evidence through light proxy (#6992)
In the last run, there were two problems at the RPC layer returned
from light nodes' RPC end points. I think exercising the light client
proxy RPC system is something that can/should be done via unit
testing, and that likely these errors are (in production) transient
and (in CI) very likely to fail for test environment issues.
2021-09-24 18:27:00 +00:00
Sam Kleinman
08982c81fc e2e: skip validation of status apphash (#6991)
I believe this assertion is likely redundant given that we're checking the block apphash.
2021-09-24 17:49:06 +00:00
Sam Kleinman
b203c91799 rpc: implement BroadcastTxCommit without event subscriptions (#6984) 2021-09-24 13:01:35 -04:00
Sam Kleinman
ab8cfb9f57 e2e: tighten timing for load generation (#6990) 2021-09-24 12:28:51 -04:00
Sam Kleinman
c909f8a236 e2e: avoid non-determinism in app hash check (#6985) 2021-09-24 11:52:47 -04:00
Sam Kleinman
363b87e8ea changelog: add entry for interanlizations (#6989) 2021-09-24 10:50:19 -04:00
dependabot[bot]
dd4141e76f build(deps): Bump github.com/go-kit/kit from 0.11.0 to 0.12.0 (#6988)
Bumps [github.com/go-kit/kit](https://github.com/go-kit/kit) from 0.11.0 to 0.12.0.
- [Release notes](https://github.com/go-kit/kit/releases)
- [Commits](https://github.com/go-kit/kit/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: github.com/go-kit/kit
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-09-24 10:15:30 -04:00
Sam Kleinman
5ccd668c78 e2e: load should be proportional to network (#6983) 2021-09-23 16:58:10 -04:00
Sam Kleinman
e94c418ad9 e2e: always preserve failed networks (#6981) 2021-09-23 14:52:14 -04:00
Sam Kleinman
3d410e4a6b e2e: only check validator sets after statesync (#6980) 2021-09-23 14:31:59 -04:00
Sam Kleinman
8a171b8426 e2e: improve manifest sorting algorithim (#6979) 2021-09-23 12:42:20 -04:00
Sam Kleinman
bb8ffcb95b store: move pacakge to internal (#6978) 2021-09-23 11:46:42 -04:00
M. J. Fromberger
cf7537ea5f cleanup: Reduce and normalize import path aliasing. (#6975)
The code in the Tendermint repository makes heavy use of import aliasing.
This is made necessary by our extensive reuse of common base package names, and
by repetition of similar names across different subdirectories.

Unfortunately we have not been very consistent about which packages we alias in
various circumstances, and the aliases we use vary. In the spirit of the advice
in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports,
his change makes an effort to clean up and normalize import aliasing.

This change makes no API or behavioral changes. It is a pure cleanup intended
o help make the code more readable to developers (including myself) trying to
understand what is being imported where.

Only unexported names have been modified, and the changes were generated and
applied mechanically with gofmt -r and comby, respecting the lexical and
syntactic rules of Go.  Even so, I did not fix every inconsistency. Where the
changes would be too disruptive, I left it alone.

The principles I followed in this cleanup are:

- Remove aliases that restate the package name.
- Remove aliases where the base package name is unambiguous.
- Move overly-terse abbreviations from the import to the usage site.
- Fix lexical issues (remove underscores, remove capitalization).
- Fix import groupings to more closely match the style guide.
- Group blank (side-effecting) imports and ensure they are commented.
- Add aliases to multiple imports with the same base package name.
2021-09-23 07:52:07 -07:00
Marko
c9beef796d proto: regenerate code (#6977)
## Description

Replace all seemed to have been used causing proto files to be changed without being regenerated
2021-09-23 12:37:11 +00:00
M. J. Fromberger
41ac5b90c5 Fix script paths in go:generate directives. (#6973)
We moved some files further down in the directory structure in #6964, which
caused the relative paths to the mockery wrapper to stop working.

There does not seem to be an obvious way to get the module root as a default
environment variable, so for now I just added the extra up-slashes.
2021-09-23 01:02:24 +00:00
M. J. Fromberger
7e4cc595d3 Remove the unused rpc/client/mocks package. (#6974)
This package is not used in the tendermint repository since 31e7cdee.

Note that this is not the same package as rpc/client/mock (N.B. singular) which
is still used in some tests.

A search of GitHub turns up only 11 uses, all of which are in clones of the
tendermint repo at old commits..
2021-09-22 17:45:11 -07:00
M. J. Fromberger
1995ef2572 rpc: Strip down the base RPC client interface. (#6971)
* rpc: Strip down the base RPC client interface.

Prior to this change, the RPC client interface requires implementing the entire
Service interface, but most of the methods of Service are not needed by the
concrete clients. Dissociate the Client interface from the Service interface.

- Extract only those methods of Service that are necessary to make the existing
  clients work.

- Update the clients to combine Start/Onstart and Stop/OnStop. This does not
  change what the clients do to start or stop. Only the websocket clients make
  use of this functionality anyway.

  The websocket implementation uses some plumbing from the BaseService helper.
  We should be able to excising that entirely, but the current interface
  dependencies among the clients would require a much larger change, and one
  that leaks into other (non-RPC) packages.

  As a less-invasive intermediate step, preserve the existing client behaviour
  (and tests) by extracting the necessary subset of the BaseService
  functionality to an analogous RunState helper for clients. I plan to obsolete
  that type in a future PR, but for now this makes a useful waypoint.

Related:
- Clean up client implementations.
- Update mocks.
2021-09-22 14:26:35 -07:00
Sam Kleinman
d04b6c2a5e e2e: run multiple should use preserve (#6972) 2021-09-22 13:13:31 -04:00
Sam Kleinman
1c4950dbd2 state: move package to internal (#6964) 2021-09-22 13:04:25 -04:00
Sam Kleinman
638346500d ci: reduce number of groups for 0.34 e2e runs (#6968) 2021-09-22 12:48:38 -04:00
Sam Kleinman
07d10184a1 inspect: remove duplicated construction path (#6966) 2021-09-22 12:32:49 -04:00
Sam Kleinman
5a13c7075b rfc: event system (#6957) 2021-09-21 11:22:55 -04:00
Daria K
0f53a590ff readme: update discord links (#6965)
This updates the Discord invite links.
2021-09-21 10:35:40 +00:00
Marko
df2d744ea9 config/docs: update and deprecated (#6879)
## Description

- Add deprecated to config values in toml
- update config in configuration doc
- explain how to set up a node with the new network

- add sentence about not needing to fork tendermint for built-in tutorial
 - closes #6865 
- add note to use a released version of tendermint with the tutorials. This is to avoid unknown issues prior to a release.
2021-09-21 10:32:00 +00:00
JayT106
84ffaaaf37 statesync/rpc: metrics for the statesync and the rpc SyncInfo (#6795) 2021-09-21 09:22:16 +02:00
Sam Kleinman
9dfdc62eb7 proxy: move proxy package to internal (#6953) 2021-09-20 15:18:48 -04:00
dependabot[bot]
cf59b8b38e build(deps): Bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#6961)
Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.8.1 to 1.9.0.
- [Release notes](https://github.com/spf13/viper/releases)
- [Commits](https://github.com/spf13/viper/compare/v1.8.1...v1.9.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/viper
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-09-20 13:03:10 -04:00
Sam Kleinman
87b876a73b crypto/armor: remove unused package (#6963) 2021-09-20 12:30:26 -04:00
Ismail Khoffi
ad067d73b9 rfc: Fix a few typos and formatting glitches p2p roadmap (#6960) 2021-09-20 09:02:29 -04:00
dependabot[bot]
ea6eecbb91 build(deps): Bump github.com/vektra/mockery/v2 from 2.9.3 to 2.9.4 (#6956)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.9.3 to 2.9.4.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.9.3...v2.9.4)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-09-17 10:06:40 -04:00
William Banfield
bf9232e99f e2e: cleanup on all errors if preserve not specified (#6950)
If the e2e tests error, they leave all of the e2e state around including containers and networks etc. 
We should clean this up when the tests shuts down, even if it exits in error.
2021-09-17 08:35:49 +00:00
407 changed files with 10208 additions and 20003 deletions

2
.github/CODEOWNERS vendored
View File

@@ -7,4 +7,4 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair

29
.github/mergify.yml vendored
View File

@@ -1,18 +1,19 @@
pull_request_rules:
- name: Automerge to master
queue_rules:
- name: default
conditions:
- base=master
- base=v0.35.x
- label=S:automerge
pull_request_rules:
- name: Automerge to v0.35.x
conditions:
- base=v0.35.x
- label=S:automerge
actions:
merge:
queue:
method: squash
strict: true
commit_message: title+body
- name: backport patches to v0.34.x branch
conditions:
- base=master
- label=S:backport-to-v0.34.x
actions:
backport:
branches:
- v0.34.x
name: default
commit_message_template: |
{{ title }} (#{{ number }})
{{ body }}

82
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,82 @@
name: Build
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
# This workflow runs on every push to master or release branch and every pull requests
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
on:
pull_request:
push:
branches:
- master
- release/**
jobs:
build:
name: Build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
goarch: ["arm", "amd64"]
goos: ["linux"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- name: install
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
if: "env.GIT_DIFF != ''"
test_abci_cli:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- name: install
run: make install_abci
if: "env.GIT_DIFF != ''"
- run: abci/tests/test_cli/test.sh
shell: bash
if: "env.GIT_DIFF != ''"
test_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- name: test_apps
run: test/app/test.sh
shell: bash
if: "env.GIT_DIFF != ''"

View File

@@ -1,133 +0,0 @@
name: Test Coverage
on:
pull_request:
push:
paths:
- "**.go"
- "!test/"
branches:
- master
- release/**
jobs:
split-test-files:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- name: Create a file with all the pkgs
run: go list ./... > pkgs.txt
- name: Split pkgs into 4 files
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
# cache multiple
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-00"
path: ./pkgs.txt.part.00
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-01"
path: ./pkgs.txt.part.01
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-02"
path: ./pkgs.txt.part.02
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-03"
path: ./pkgs.txt.part.03
build-linux:
name: Build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
goarch: ["arm", "amd64"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- name: install
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
if: "env.GIT_DIFF != ''"
tests:
runs-on: ubuntu-latest
needs: split-test-files
strategy:
fail-fast: false
matrix:
part: ["00", "01", "02", "03"]
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-${{ matrix.part }}"
if: env.GIT_DIFF
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: test & coverage report creation
run: |
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./${{ matrix.part }}profile.out
upload-coverage-report:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -1,8 +1,7 @@
name: Build & Push
name: Docker
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
on:
pull_request:
push:
branches:
- master
@@ -39,7 +38,7 @@ jobs:
with:
platforms: all
- name: Set up Docker Buildx
- name: Set up Docker Build
uses: docker/setup-buildx-action@v1.6.0
- name: Login to DockerHub

View File

@@ -17,13 +17,13 @@ jobs:
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03']
group: ['00', '01']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
go-version: '1.17'
- uses: actions/checkout@v2.3.4
with:
@@ -37,7 +37,7 @@ jobs:
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 4 -d networks/nightly
run: ./build/generator -g 2 -d networks/nightly
- name: Run testnets in group ${{ matrix.group }}
working-directory: test/e2e

View File

@@ -17,13 +17,13 @@ jobs:
fail-fast: false
matrix:
p2p: ['legacy', 'new', 'hybrid']
group: ['00', '01']
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
go-version: '1.17'
- uses: actions/checkout@v2.3.4
@@ -35,7 +35,7 @@ jobs:
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
working-directory: test/e2e

View File

@@ -16,7 +16,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
go-version: '1.17'
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
with:

View File

@@ -1,5 +1,5 @@
# Runs fuzzing nightly.
name: fuzz-nightly
name: Fuzz Tests
on:
workflow_dispatch: # allow running workflow manually
schedule:
@@ -15,7 +15,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
go-version: '1.17'
- uses: actions/checkout@v2.3.4

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
@@ -23,7 +23,7 @@ jobs:
- uses: golangci/golangci-lint-action@v2.5.2
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.38
version: v1.42.1
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v2.3.4
uses: actions/checkout@v2.4.0
- name: Lint Code Base
uses: docker://github/super-linter:v3
env:

View File

@@ -18,7 +18,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
go-version: '1.17'
- name: Build
uses: goreleaser/goreleaser-action@v2

View File

@@ -1,106 +1,75 @@
name: Tests
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
# This workflow runs on every push to master or release branch and every pull requests
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
name: Test
on:
pull_request:
push:
paths:
- "**.go"
branches:
- master
- release/**
jobs:
build:
name: Build
tests:
runs-on: ubuntu-latest
timeout-minutes: 5
strategy:
fail-fast: false
matrix:
part: ["00", "01", "02", "03"]
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
go-version: "1.17"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- uses: actions/cache@v2.1.6
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
Makefile
- name: Run Go Tests
run: |
make test-group-${{ matrix.part }} NUM_SPLIT=4
if: env.GIT_DIFF
# Cache binaries for use by other jobs
- uses: actions/cache@v2.1.6
- uses: actions/upload-artifact@v2
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./build/${{ matrix.part }}.profile.out
test_abci_cli:
upload-coverage-report:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
needs: tests
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/cache@v2.1.6
Makefile
- uses: actions/download-artifact@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/download-artifact@v2
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- run: abci/tests/test_cli/test.sh
shell: bash
if: env.GIT_DIFF
test_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/download-artifact@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/download-artifact@v2
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- name: test_apps
run: test/app/test.sh
shell: bash
- run: |
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
with:
file: ./coverage.txt
if: env.GIT_DIFF

1
.gitignore vendored
View File

@@ -25,6 +25,7 @@ docs/_build
docs/dist
docs/node_modules/
docs/spec
docs/.vuepress/public/rpc
index.html.md
libs/pubsub/query/fuzz_test/output
profile\.out

View File

@@ -13,18 +13,18 @@ linters:
# - gochecknoinits
# - gocognit
- goconst
- gocritic
# - gocritic
# - gocyclo
# - godox
- gofmt
- goimports
- golint
- revive
- gosec
- gosimple
- govet
- ineffassign
# - interfacer
- lll
# - lll
# - maligned
- misspell
- nakedret
@@ -33,7 +33,7 @@ linters:
- staticcheck
- structcheck
- stylecheck
- typecheck
# - typecheck
- unconvert
# - unparam
- unused
@@ -46,9 +46,6 @@ issues:
- path: _test\.go
linters:
- gosec
- linters:
- lll
source: "https://"
max-same-issues: 50
linters-settings:

View File

@@ -1,160 +1,178 @@
# Changelog
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35
## v0.35.0
Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis,
@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
November 4, 2021
### BREAKING CHANGES
- CLI/RPC/Config
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fastsync.version = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [blocksync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
- [blocksync/v2] \#6730 Fast Sync v2 is deprecated, please use v0
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
- [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
- [cli] \#6854 Remove deprecated snake case commands. (@tychoish)
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] \#6684 Delete counter example app
- Go API
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
- [p2p] \#6618 \#6583 Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
Special thanks to external contributors on this release: @JayT106,
@bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman,
@JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
### FEATURES
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of an incorrect app hash. (@cmwaters)
- [config] [\#7174](https://github.com/tendermint/tendermint/pull/7174) expose ability to write config to arbitrary paths. (@tychoish)
- [mempool, rpc] [\#7065](https://github.com/tendermint/tendermint/pull/7065) add removetx rpc method (backport of #7047) (@tychoish).
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters).
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
exchange reactors behave the same. (@cmwaters)
- [crypto] \#6376 Enable sr25519 as a validator key type
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez)
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
`mempool.version` configuration, where `v1` is the default configuration.
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
- [blocksync/event] \#6619 Emit blocksync status event when switching consensus/blocksync (@JayT106)
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blocksync/v1] \#5728 Remove blocksync v1 (@melekes)
- [blocksync/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blocksync/v2] \#5774 Send status request when new peer joins (@melekes)
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
- [consensus] \#5987 and \#5792 Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
- [types] \#6120 use batch verification for verifying commits signatures.
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
- [privval] \#6240 Add `context.Context` to privval interface.
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106)
- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106)
- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
### BUG FIXES
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [blocksync] \#6590 Update the metrics during blocksync (@JayT106)
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield).
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060)
wait until peerUpdates channel is closed to close remaining peers (@williambanfield)
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106)
- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106)
### BREAKING CHANGES
- Go API
- [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on
deprecated fundamentals. Downstream users should maintain this
library. (@tychoish)
- [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to
`internal` to prevent consumption of these internal APIs by
external users. (@tychoish)
- [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
- [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
- [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] [\#6547](https://github.com/tendermint/tendermint/pull/6547) Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] [\#6534](https://github.com/tendermint/tendermint/pull/6534) Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
- [libs/time] [\#6495](https://github.com/tendermint/tendermint/pull/6495) Move types/time to libs/time to improve consistency. (@tychoish)
- [mempool] [\#6529](https://github.com/tendermint/tendermint/pull/6529) The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
- [abci/client, proxy] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] [\#5720](https://github.com/tendermint/tendermint/pull/5720) Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] [\#5848](https://github.com/tendermint/tendermint/pull/5848) Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] [\#5864](https://github.com/tendermint/tendermint/pull/5864) Use an iterator when pruning state (@cmwaters)
- [types] [\#6023](https://github.com/tendermint/tendermint/pull/6023) Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] [\#6054](https://github.com/tendermint/tendermint/pull/6054) Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] [\#6077](https://github.com/tendermint/tendermint/pull/6077) Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
- [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] [\#6176](https://github.com/tendermint/tendermint/pull/6176) `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] [\#6366](https://github.com/tendermint/tendermint/pull/6366) Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] [\#6364](https://github.com/tendermint/tendermint/pull/6364) Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Move `NodeKey` to types to make the type public.
- [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
- CLI/RPC/Config
- [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
- [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes)
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
- [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters)
- [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106)
- [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated.
- [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
- [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
- [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
- [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish)
- [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106)
- Apps
- [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app
- Data Storage
- [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters)
- [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
- [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish)
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] [\#6478](https://github.com/tendermint/tendermint/pull/6478) Add `block_id` to `newblock` event (@jeebster)
- [crypto/ed25519] [\#5632](https://github.com/tendermint/tendermint/pull/5632) Adopt zip215 `ed25519` verification. (@marbar3778)
- [crypto/ed25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
- [privval] [\#5603](https://github.com/tendermint/tendermint/pull/5603) Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] [\#5725](https://github.com/tendermint/tendermint/pull/5725) Add gRPC support to private validator.
- [privval] [\#5876](https://github.com/tendermint/tendermint/pull/5876) `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` requests return an error if queue is full (@melekes)
- [mempool] [\#5673](https://github.com/tendermint/tendermint/pull/5673) Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] [\#5706](https://github.com/tendermint/tendermint/pull/5706) Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blocksync/v1] [\#5728](https://github.com/tendermint/tendermint/pull/5728) Remove blocksync v1 (@melekes)
- [blocksync/v0] [\#5741](https://github.com/tendermint/tendermint/pull/5741) Relax termination conditions and increase sync timeout (@melekes)
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blocksync/v2] [\#5774](https://github.com/tendermint/tendermint/pull/5774) Send status request when new peer joins (@melekes)
- [store] [\#5888](https://github.com/tendermint/tendermint/pull/5888) store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
- [consensus] [\#5987](https://github.com/tendermint/tendermint/pull/5987) and [\#5792](https://github.com/tendermint/tendermint/pull/5792) Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
- [types] [\#5994](https://github.com/tendermint/tendermint/pull/5994) Reduce the use of protobuf types in core logic. (@marbar3778)
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
- [rpc/client/http] [\#6163](https://github.com/tendermint/tendermint/pull/6163) Do not drop events even if the `out` channel is full (@melekes)
- [node] [\#6059](https://github.com/tendermint/tendermint/pull/6059) Validate and complete genesis doc before saving to state store (@silasdavis)
- [state] [\#6067](https://github.com/tendermint/tendermint/pull/6067) Batch save state data (@githubsands & @cmwaters)
- [crypto] [\#6120](https://github.com/tendermint/tendermint/pull/6120) Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
- [types] [\#6120](https://github.com/tendermint/tendermint/pull/6120) use batch verification for verifying commits signatures.
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
- [privval/file] [\#6185](https://github.com/tendermint/tendermint/pull/6185) Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
- [privval] [\#6240](https://github.com/tendermint/tendermint/pull/6240) Add `context.Context` to privval interface.
- [rpc] [\#6265](https://github.com/tendermint/tendermint/pull/6265) set cache control in http-rpc response header (@JayT106)
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
- [node/state] [\#6370](https://github.com/tendermint/tendermint/pull/6370) graceful shutdown in the consensus reactor (@JayT106)
- [crypto/merkle] [\#6443](https://github.com/tendermint/tendermint/pull/6443) Improve HashAlternatives performance (@cuonglm)
- [crypto/merkle] [\#6513](https://github.com/tendermint/tendermint/pull/6513) Optimize HashAlternatives (@marbar3778)
- [p2p/pex] [\#6509](https://github.com/tendermint/tendermint/pull/6509) Improve addrBook.hash performance (@cuonglm)
- [consensus/metrics] [\#6549](https://github.com/tendermint/tendermint/pull/6549) Change block_size gauge to a histogram for better observability over time (@marbar3778)
- [statesync] [\#6587](https://github.com/tendermint/tendermint/pull/6587) Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
- [state/privval] [\#6578](https://github.com/tendermint/tendermint/pull/6578) No GetPubKey retry beyond the proposal/voting window (@JayT106)
- [rpc] [\#6615](https://github.com/tendermint/tendermint/pull/6615) Add TotalGasUsed to block_results response (@crypto-facs)
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
## v0.34.13

View File

@@ -1,26 +1,38 @@
# Unreleased Changes
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## vX.X
Special thanks to external contributors on this release:
Month, DD, YYYY
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
Special thanks to external contributors on this release:
### BREAKING CHANGES
- CLI/RPC/Config
- [config] [\#7276](https://github.com/tendermint/tendermint/pull/7276) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
- Apps
- P2P Protocol
- [p2p] [\#7265](https://github.com/tendermint/tendermint/pull/7265) Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
- Go API
- Blockchain Protocol
### FEATURES
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze) (@cmwaters)
### IMPROVEMENTS
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
- [\#7338](https://github.com/tendermint/tendermint/pull/7338) pubsub: Performance improvements for the event query API (backport of #7319) (@creachadair)
### BUG FIXES
- [\#7310](https://github.com/tendermint/tendermint/issues/7310) pubsub: Report a non-nil error when shutting down (fixes #7306).

View File

@@ -249,6 +249,7 @@ The author of the original pull request is responsible for solving the conflicts
merging the pull request.
#### Creating a backport branch
If this is the first release candidate for a major release, you get to have the honor of creating
the backport branch!
@@ -334,6 +335,14 @@ If there were no release candidates, begin by creating a backport branch, as des
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
- `git push origin v0.35.0`
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
8. Add the release to the documentation site generator config (see
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
- Start on branch `master`.
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
ensure the newest release is the default for the landing page.
- Add a new entry to `themeConfig.versions` in
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
release in the dropdown versions menu.
#### Minor release (point releases)

View File

@@ -1,6 +1,5 @@
#!/usr/bin/make -f
PACKAGES=$(shell go list ./...)
BUILDDIR ?= $(CURDIR)/build
BUILD_TAGS?=tendermint
@@ -15,7 +14,9 @@ endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
HTTPS_GIT := https://github.com/tendermint/tendermint.git
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
BASE_BRANCH := v0.35.x
DOCKER_PROTO := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
CGO_ENABLED ?= 0
# handle nostrip
@@ -83,26 +84,28 @@ proto-all: proto-gen proto-lint proto-check-breaking
.PHONY: proto-all
proto-gen:
@docker pull -q tendermintdev/docker-build-proto
@echo "Generating Protobuf files"
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
@echo "Generating Go packages for .proto files"
@$(DOCKER_PROTO) sh ./scripts/protocgen.sh
.PHONY: proto-gen
proto-lint:
@$(DOCKER_BUF) check lint --error-format=json
@echo "Running lint checks for .proto files"
@$(DOCKER_PROTO) buf lint --error-format=json
.PHONY: proto-lint
proto-format:
@echo "Formatting Protobuf files"
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
@echo "Formatting .proto files"
@$(DOCKER_PROTO) find ./ -not -path "./third_party/*" -name '*.proto' -exec clang-format -i {} \;
.PHONY: proto-format
proto-check-breaking:
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
@echo "Checking for breaking changes in .proto files"
@$(DOCKER_PROTO) buf breaking --against .git#branch=$(BASE_BRANCH)
.PHONY: proto-check-breaking
proto-check-breaking-ci:
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
@echo "Checking for breaking changes in .proto files"
@$(DOCKER_PROTO) buf breaking --against $(HTTPS_GIT)#branch=$(BASE_BRANCH)
.PHONY: proto-check-breaking-ci
###############################################################################
@@ -118,7 +121,7 @@ install_abci:
.PHONY: install_abci
###############################################################################
### Privval Server ###
### Privval Server ###
###############################################################################
build_privval_server:
@@ -131,11 +134,11 @@ generate_test_cert:
# generate server cerificate
@certstrap request-cert -cn server -ip 127.0.0.1
# self-sign server cerificate with rootCA
@certstrap sign server --CA "root CA"
@certstrap sign server --CA "root CA"
# generate client cerificate
@certstrap request-cert -cn client -ip 127.0.0.1
# self-sign client cerificate with rootCA
@certstrap sign client --CA "root CA"
@certstrap sign client --CA "root CA"
.PHONY: generate_test_cert
###############################################################################
@@ -214,7 +217,7 @@ DESTINATION = ./index.html.md
build-docs:
@cd docs && \
while read -r branch path_prefix; do \
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
mkdir -p ~/output/$${path_prefix} ; \
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
cp ~/output/$${path_prefix}/index.html ~/output ; \
@@ -227,13 +230,13 @@ build-docs:
build-docker: build-linux
cp $(BUILDDIR)/tendermint DOCKER/tendermint
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
rm -rf DOCKER/tendermint
.PHONY: build-docker
###############################################################################
### Mocks ###
### Mocks ###
###############################################################################
mockery:
@@ -303,3 +306,23 @@ build-reproducible:
--name latest-build cosmossdk/rbuilder:latest
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
.PHONY: build-reproducible
# Implements test splitting and running. This is pulled directly from
# the github action workflows for better local reproducibility.
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
# default to four splits by default
NUM_SPLIT ?= 4
$(BUILDDIR):
mkdir -p $@
# the format statement filters out all packages that don't have tests.
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
go list -f "{{ if .TestGoFiles }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
split-test-packages:$(BUILDDIR)/packages.txt
split -d -n l/$(NUM_SPLIT) $< $<.
test-group-%:split-test-packages
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out

View File

@@ -9,7 +9,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/cosmosnetwork)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
[![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
@@ -29,16 +29,15 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork).
## Security
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/tendermint).
To report a security vulnerability, see our [bug bounty program](https://hackerone.com/cosmos).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
@@ -61,8 +60,8 @@ See the [install instructions](/docs/introduction/install.md).
### Quick Start
- [Single node](/docs/introduction/quick-start.md)
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
- [Local cluster using docker-compose](/docs/tools/docker-compose.md)
- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
## Contributing
@@ -71,7 +70,7 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
and familiarize yourself with our
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
@@ -95,7 +94,7 @@ In an effort to avoid accumulating technical debt prior to 1.0.0,
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
will work with existing Tendermint blockchains. In these cases you will
have to start a new blockchain, or write something custom to get the old
data into the new chain. However, any bump in the PATCH version should be
data into the new chain. However, any bump in the PATCH version should be
compatible with existing blockchain histories.

View File

@@ -98,7 +98,7 @@ are:
- `blockchain`
- `evidence`
Accordingly, the `node` package was changed to reduce access to
Accordingly, the `node` package changed to reduce access to
tendermint internals: applications that use tendermint as a library
will need to change to accommodate these changes. Most notably:
@@ -109,6 +109,20 @@ will need to change to accommodate these changes. Most notably:
longer exported and have been replaced with `node.New` and
`node.NewDefault` which provide more functional interfaces.
To access any of the functionality previously available via the
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
the full RPC interface provided as direct function calls. Import the
`github.com/tendermint/tendermint/rpc/client/local` package and pass
the node service as in the following:
```go
node := node.NewDefault() //construct the node object
// start and set up the node service
client := local.New(node.(local.NodeService))
// use client object to interact with the node
```
### gRPC Support
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.

View File

@@ -1,4 +1,4 @@
package abcicli
package abciclient
import (
"context"
@@ -87,7 +87,7 @@ type ReqRes struct {
*sync.WaitGroup
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx tmsync.RWMutex
mtx tmsync.Mutex
done bool // Gets set to true once *after* WaitGroup.Done().
cb func(*types.Response) // A single callback that may be set.
}
@@ -137,16 +137,16 @@ func (r *ReqRes) InvokeCallback() {
//
// ref: https://github.com/tendermint/tendermint/issues/5439
func (r *ReqRes) GetCallback() func(*types.Response) {
r.mtx.RLock()
defer r.mtx.RUnlock()
r.mtx.Lock()
defer r.mtx.Unlock()
return r.cb
}
// SetDone marks the ReqRes object as done.
func (r *ReqRes) SetDone() {
r.mtx.Lock()
defer r.mtx.Unlock()
r.done = true
r.mtx.Unlock()
}
func waitGroup1() (wg *sync.WaitGroup) {

35
abci/client/creators.go Normal file
View File

@@ -0,0 +1,35 @@
package abciclient
import (
"fmt"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
)
// Creator creates new ABCI clients.
type Creator func() (Client, error)
// NewLocalCreator returns a Creator for the given app,
// which will be running locally.
func NewLocalCreator(app types.Application) Creator {
mtx := new(tmsync.Mutex)
return func() (Client, error) {
return NewLocalClient(mtx, app), nil
}
}
// NewRemoteCreator returns a Creator for the given address (e.g.
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
// want the client to connect before reporting success.
func NewRemoteCreator(addr, transport string, mustConnect bool) Creator {
return func() (Client, error) {
remoteApp, err := NewClient(addr, transport, mustConnect)
if err != nil {
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
}
return remoteApp, nil
}
}

View File

@@ -1,4 +1,4 @@
// Package abcicli provides an ABCI implementation in Go.
// Package abciclient provides an ABCI implementation in Go.
//
// There are 3 clients available:
// 1. socket (unix or TCP)
@@ -26,4 +26,4 @@
//
// sync: waits for all Async calls to complete (essentially what Flush does in
// the socket client) and calls Sync method.
package abcicli
package abciclient

View File

@@ -1,4 +1,4 @@
package abcicli
package abciclient
import (
"context"
@@ -8,6 +8,7 @@ import (
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
@@ -24,7 +25,7 @@ type grpcClient struct {
conn *grpc.ClientConn
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
mtx tmsync.RWMutex
mtx tmsync.Mutex
addr string
err error
resCb func(*types.Request, *types.Response) // listens to all callbacks
@@ -95,7 +96,10 @@ func (cli *grpcClient) OnStart() error {
RETRY_LOOP:
for {
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
conn, err := grpc.Dial(cli.addr,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialerFunc),
)
if err != nil {
if cli.mustConnect {
return err
@@ -149,8 +153,8 @@ func (cli *grpcClient) StopForError(err error) {
}
func (cli *grpcClient) Error() error {
cli.mtx.RLock()
defer cli.mtx.RUnlock()
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
@@ -158,8 +162,8 @@ func (cli *grpcClient) Error() error {
// NOTE: callback may get internally generated flush responses.
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.resCb = resCb
cli.mtx.Unlock()
}
//----------------------------------------

View File

@@ -1,4 +1,4 @@
package abcicli
package abciclient
import (
"context"
@@ -15,7 +15,7 @@ import (
type localClient struct {
service.BaseService
mtx *tmsync.RWMutex
mtx *tmsync.Mutex
types.Application
Callback
}
@@ -26,24 +26,22 @@ var _ Client = (*localClient)(nil)
// methods of the given app.
//
// Both Async and Sync methods ignore the given context.Context parameter.
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
if mtx == nil {
mtx = &tmsync.RWMutex{}
mtx = new(tmsync.Mutex)
}
cli := &localClient{
mtx: mtx,
Application: app,
}
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
return cli
}
func (app *localClient) SetResponseCallback(cb Callback) {
app.mtx.Lock()
defer app.mtx.Unlock()
app.Callback = cb
app.mtx.Unlock()
}
// TODO: change types.Application to include Error()?
@@ -67,8 +65,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err
}
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
app.mtx.RLock()
defer app.mtx.RUnlock()
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
return app.callback(
@@ -100,8 +98,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
}
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
app.mtx.RLock()
defer app.mtx.RUnlock()
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
return app.callback(
@@ -215,8 +213,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon
}
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.RLock()
defer app.mtx.RUnlock()
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
return &res, nil
@@ -249,8 +247,8 @@ func (app *localClient) QuerySync(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
app.mtx.RLock()
defer app.mtx.RUnlock()
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
return &res, nil

View File

@@ -5,7 +5,7 @@ package mocks
import (
context "context"
abcicli "github.com/tendermint/tendermint/abci/client"
abciclient "github.com/tendermint/tendermint/abci/client"
log "github.com/tendermint/tendermint/libs/log"
@@ -20,15 +20,15 @@ type Client struct {
}
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -66,15 +66,15 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
}
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -112,15 +112,15 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
}
// CheckTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -158,15 +158,15 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t
}
// CommitAsync provides a mock function with given fields: _a0
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -204,15 +204,15 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -250,15 +250,15 @@ func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx)
}
// EchoAsync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) {
ret := _m.Called(ctx, msg)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok {
r0 = rf(ctx, msg)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -296,15 +296,15 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
}
// EndBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -356,15 +356,15 @@ func (_m *Client) Error() error {
}
// FlushAsync provides a mock function with given fields: _a0
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -393,15 +393,15 @@ func (_m *Client) FlushSync(_a0 context.Context) error {
}
// InfoAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -439,15 +439,15 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R
}
// InitChainAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -499,15 +499,15 @@ func (_m *Client) IsRunning() bool {
}
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -545,15 +545,15 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
}
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -591,15 +591,15 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
}
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -670,15 +670,15 @@ func (_m *Client) OnStop() {
}
// QueryAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -751,7 +751,7 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
}
// SetResponseCallback provides a mock function with given fields: _a0
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) {
_m.Called(_a0)
}

View File

@@ -1,4 +1,4 @@
package abcicli
package abciclient
import (
"bufio"
@@ -13,7 +13,6 @@ import (
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/libs/timer"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
)
@@ -22,8 +21,6 @@ const (
// reqQueueSize is the max number of queued async requests.
// (memory: 256MB max assuming 1MB transactions)
reqQueueSize = 256
// Don't wait longer than...
flushThrottleMS = 20
)
type reqResWithContext struct {
@@ -40,10 +37,9 @@ type socketClient struct {
mustConnect bool
conn net.Conn
reqQueue chan *reqResWithContext
flushTimer *timer.ThrottleTimer
reqQueue chan *reqResWithContext
mtx tmsync.RWMutex
mtx tmsync.Mutex
err error
reqSent *list.List // list of requests sent, waiting for response
resCb func(*types.Request, *types.Response) // called on all requests, if set.
@@ -57,7 +53,6 @@ var _ Client = (*socketClient)(nil)
func NewSocketClient(addr string, mustConnect bool) Client {
cli := &socketClient{
reqQueue: make(chan *reqResWithContext, reqQueueSize),
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
mustConnect: mustConnect,
addr: addr,
@@ -102,14 +97,13 @@ func (cli *socketClient) OnStop() {
cli.conn.Close()
}
cli.flushQueue()
cli.flushTimer.Stop()
cli.drainQueue()
}
// Error returns an error if the client was stopped abruptly.
func (cli *socketClient) Error() error {
cli.mtx.RLock()
defer cli.mtx.RUnlock()
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
@@ -119,45 +113,32 @@ func (cli *socketClient) Error() error {
// NOTE: callback may get internally generated flush responses.
func (cli *socketClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.resCb = resCb
cli.mtx.Unlock()
}
//----------------------------------------
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
w := bufio.NewWriter(conn)
bw := bufio.NewWriter(conn)
for {
select {
case reqres := <-cli.reqQueue:
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
if reqres.C.Err() != nil {
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
continue
}
cli.willSendReq(reqres.R)
err := types.WriteMessage(reqres.R.Request, w)
if err != nil {
if err := types.WriteMessage(reqres.R.Request, bw); err != nil {
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
return
}
if err := bw.Flush(); err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
}
// If it's a flush request, flush the current buffer.
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
err = w.Flush()
if err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
}
}
case <-cli.flushTimer.Ch: // flush queue
select {
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
default:
// Probably will fill the buffer, or retry later.
}
case <-cli.Quit():
return
}
@@ -492,14 +473,6 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
}
}
// Maybe auto-flush, or unset auto-flush
switch req.Value.(type) {
case *types.Request_Flush:
cli.flushTimer.Unset()
default:
cli.flushTimer.Set()
}
return reqres, nil
}
@@ -537,7 +510,9 @@ func queueErr(e error) error {
return fmt.Errorf("can't queue req: %w", e)
}
func (cli *socketClient) flushQueue() {
// drainQueue marks as complete and discards all remaining pending requests
// from the queue.
func (cli *socketClient) drainQueue() {
cli.mtx.Lock()
defer cli.mtx.Unlock()
@@ -547,14 +522,17 @@ func (cli *socketClient) flushQueue() {
reqres.Done()
}
// mark all queued messages as resolved
LOOP:
// Mark all queued messages as resolved.
//
// TODO(creachadair): We can't simply range the channel, because it is never
// closed, and the writer continues to add work.
// See https://github.com/tendermint/tendermint/issues/6996.
for {
select {
case reqres := <-cli.reqQueue:
reqres.R.Done()
default:
break LOOP
return
}
}
}

View File

@@ -1,4 +1,4 @@
package abcicli_test
package abciclient_test
import (
"context"
@@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abcicli "github.com/tendermint/tendermint/abci/client"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/service"
@@ -100,7 +100,7 @@ func TestHangingSyncCalls(t *testing.T) {
}
func setupClientServer(t *testing.T, app types.Application) (
service.Service, abcicli.Client) {
service.Service, abciclient.Client) {
// some port between 20k and 30k
port := 20000 + rand.Int31()%10000
addr := fmt.Sprintf("localhost:%d", port)
@@ -110,7 +110,7 @@ func setupClientServer(t *testing.T, app types.Application) (
err = s.Start()
require.NoError(t, err)
c := abcicli.NewSocketClient(addr, true)
c := abciclient.NewSocketClient(addr, true)
err = c.Start()
require.NoError(t, err)

View File

@@ -15,7 +15,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
abcicli "github.com/tendermint/tendermint/abci/client"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
@@ -27,7 +27,7 @@ import (
// client is a global variable so it can be reused by the console
var (
client abcicli.Client
client abciclient.Client
logger log.Logger
ctx = context.Background()
@@ -67,7 +67,7 @@ var RootCmd = &cobra.Command{
if client == nil {
var err error
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
client, err = abciclient.NewClient(flagAddress, flagAbci, false)
if err != nil {
return err
}

View File

@@ -13,11 +13,12 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
abcicli "github.com/tendermint/tendermint/abci/client"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
@@ -61,7 +62,7 @@ func testStream(t *testing.T, app types.Application) {
})
// Connect to the socket
client := abcicli.NewSocketClient(socket, false)
client := abciclient.NewSocketClient(socket, false)
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
err = client.Start()
require.NoError(t, err)
@@ -129,7 +130,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
numDeliverTxs := 2000
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
@@ -147,7 +148,10 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
})
// Connect to the socket
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
conn, err := grpc.Dial(socket,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialerFunc),
)
if err != nil {
t.Fatalf("Error dialing GRPC server: %v", err.Error())
}

View File

@@ -12,7 +12,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
abcicli "github.com/tendermint/tendermint/abci/client"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
@@ -165,7 +165,7 @@ func TestValUpdates(t *testing.T) {
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
vals1 = append(vals[:nInit-2], vals[nInit+1])
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
@@ -229,7 +229,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
}
}
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) {
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
@@ -241,7 +241,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
}
// Connect to the socket
client := abcicli.NewSocketClient(socket, false)
client := abciclient.NewSocketClient(socket, false)
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
if err = server.Stop(); err != nil {
@@ -253,7 +253,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
return client, server, nil
}
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) {
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
@@ -265,7 +265,7 @@ func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, s
return nil, nil, err
}
client := abcicli.NewGRPCClient(socket, true)
client := abciclient.NewGRPCClient(socket, true)
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
if err := server.Stop(); err != nil {
@@ -296,7 +296,7 @@ func TestClientServer(t *testing.T) {
// set up grpc app
kvstore = NewApplication()
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc")
require.NoError(t, err)
t.Cleanup(func() {
@@ -313,7 +313,7 @@ func TestClientServer(t *testing.T) {
runClientTests(t, gclient)
}
func runClientTests(t *testing.T, client abcicli.Client) {
func runClientTests(t *testing.T, client abciclient.Client) {
// run some tests....
key := testKey
value := key
@@ -325,7 +325,7 @@ func runClientTests(t *testing.T, client abcicli.Client) {
testClient(t, client, tx, key, value)
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)

View File

@@ -11,9 +11,9 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log"
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
const (
@@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct {
// validator set
ValUpdates []types.ValidatorUpdate
valAddrToPubKeyMap map[string]pc.PublicKey
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
logger log.Logger
}
@@ -46,7 +46,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
return &PersistentKVStoreApplication{
app: &Application{state: state},
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
logger: log.NewNopLogger(),
}
}
@@ -194,8 +194,8 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
return
}
func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte {
pk, err := cryptoenc.PubKeyFromProto(pubkey)
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
pk, err := encoding.PubKeyFromProto(pubkey)
if err != nil {
panic(err)
}
@@ -243,7 +243,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err))
}

View File

@@ -240,22 +240,15 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
// Pull responses from 'responses' and write them to conn.
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
var count int
var bufWriter = bufio.NewWriter(conn)
for {
var res = <-responses
err := types.WriteMessage(res, bufWriter)
if err != nil {
bw := bufio.NewWriter(conn)
for res := range responses {
if err := types.WriteMessage(res, bw); err != nil {
closeConn <- fmt.Errorf("error writing message: %w", err)
return
}
if _, ok := res.Value.(*types.Response_Flush); ok {
err = bufWriter.Flush()
if err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
}
if err := bw.Flush(); err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
}
count++
}
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/assert"
abciclient "github.com/tendermint/tendermint/abci/client"
abciclientent "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
)
@@ -20,7 +20,7 @@ func TestClientServerNoAddrPrefix(t *testing.T) {
err = server.Start()
assert.NoError(t, err, "expected no error on server.Start")
client, err := abciclient.NewClient(addr, transport, true)
client, err := abciclientent.NewClient(addr, transport, true)
assert.NoError(t, err, "expected no error on NewClient")
err = client.Start()
assert.NoError(t, err, "expected no error on client.Start")

View File

@@ -7,14 +7,14 @@ import (
"fmt"
mrand "math/rand"
abcicli "github.com/tendermint/tendermint/abci/client"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
tmrand "github.com/tendermint/tendermint/libs/rand"
)
var ctx = context.Background()
func InitChain(client abcicli.Client) error {
func InitChain(client abciclient.Client) error {
total := 10
vals := make([]types.ValidatorUpdate, total)
for i := 0; i < total; i++ {
@@ -34,7 +34,7 @@ func InitChain(client abcicli.Client) error {
return nil
}
func Commit(client abcicli.Client, hashExp []byte) error {
func Commit(client abciclient.Client, hashExp []byte) error {
res, err := client.CommitSync(ctx)
data := res.Data
if err != nil {
@@ -51,7 +51,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
return nil
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
@@ -70,7 +70,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
return nil
}
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {

1
abci/types/client.go Normal file
View File

@@ -0,0 +1 @@
package types

View File

@@ -4,7 +4,7 @@ import (
fmt "fmt"
"github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/sr25519"
)
@@ -12,7 +12,7 @@ import (
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
pke := ed25519.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
pkp, err := encoding.PubKeyToProto(pke)
if err != nil {
panic(err)
}
@@ -29,7 +29,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
return Ed25519ValidatorUpdate(pk, power)
case secp256k1.KeyType:
pke := secp256k1.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
pkp, err := encoding.PubKeyToProto(pke)
if err != nil {
panic(err)
}
@@ -39,7 +39,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
}
case sr25519.KeyType:
pke := sr25519.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
pkp, err := encoding.PubKeyToProto(pke)
if err != nil {
panic(err)
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
)
@@ -65,9 +65,9 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
}
home := viper.GetString(cli.HomeFlag)
conf := cfg.DefaultConfig()
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
cfg.EnsureRoot(conf.RootDir)
config.EnsureRoot(conf.RootDir)
dumpDebugData(outDir, conf, rpc)
@@ -79,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
return nil
}
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
start := time.Now().UTC()
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")

View File

@@ -14,7 +14,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
)
@@ -50,9 +50,9 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
}
home := viper.GetString(cli.HomeFlag)
conf := cfg.DefaultConfig()
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
cfg.EnsureRoot(conf.RootDir)
config.EnsureRoot(conf.RootDir)
// Create a temporary directory which will contain all the state dumps and
// relevant files and directories that will be compressed into a file.

View File

@@ -9,7 +9,7 @@ import (
"path"
"path/filepath"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
)
@@ -48,7 +48,7 @@ func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error {
// copyWAL copies the Tendermint node's WAL file. It returns an error if the
// WAL file cannot be read or copied.
func copyWAL(conf *cfg.Config, dir string) error {
func copyWAL(conf *config.Config, dir string) error {
walPath := conf.Consensus.WalFile()
walFile := filepath.Base(walPath)

View File

@@ -121,7 +121,9 @@ func initFilesWithConfig(config *cfg.Config) error {
}
// write config file
cfg.WriteConfigFile(config.RootDir, config)
if err := cfg.WriteConfigFile(config.RootDir, config); err != nil {
return err
}
logger.Info("Generated config", "mode", config.Mode)
return nil

View File

@@ -8,12 +8,7 @@ import (
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer/sink"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/internal/inspect"
)
// InspectCmd is the command for starting an inspect server.
@@ -55,29 +50,10 @@ func runInspect(cmd *cobra.Command, args []string) error {
cancel()
}()
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
ins, err := inspect.NewFromConfig(logger, config)
if err != nil {
return err
}
blockStore := store.NewBlockStore(blockStoreDB)
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
if err != nil {
if err := blockStoreDB.Close(); err != nil {
logger.Error("error closing block store db", "error", err)
}
return err
}
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return err
}
sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
if err != nil {
return err
}
stateStore := state.NewStore(stateDB)
ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger)
logger.Info("starting inspect server")
if err := ins.Run(ctx); err != nil {

View File

@@ -11,7 +11,6 @@ import (
"time"
"github.com/spf13/cobra"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/libs/log"

View File

@@ -3,20 +3,22 @@ package commands
import (
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
dbm "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/progressbar"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/internal/state/indexer"
"github.com/tendermint/tendermint/internal/state/indexer/sink/kv"
"github.com/tendermint/tendermint/internal/state/indexer/sink/psql"
"github.com/tendermint/tendermint/internal/store"
"github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types"
)
@@ -29,11 +31,12 @@ var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
either or both arguments.
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to
replace the backend. The default start-height is 0, meaning the tooling will start
reindex from the base block height(inclusive); and the default end-height is 0, meaning
the tooling will reindex until the latest block height(inclusive). User can omit
either or both arguments.
`,
Example: `
tendermint reindex-event
@@ -129,17 +132,25 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
}
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
dbType := dbm.BackendType(cfg.DBBackend)
if !os.FileExists(filepath.Join(cfg.DBDir(), "blockstore.db")) {
return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir())
}
// Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
blockStore := store.NewBlockStore(blockStoreDB)
if !os.FileExists(filepath.Join(cfg.DBDir(), "state.db")) {
return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir())
}
// Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
@@ -221,14 +232,15 @@ func checkValidHeight(bs state.BlockStore) error {
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
return fmt.Errorf("%s (requested start height: %d, base height: %d)",
coretypes.ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
"%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
@@ -238,13 +250,13 @@ func checkValidHeight(bs state.BlockStore) error {
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
"%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
coretypes.ErrInvalidRequest, startHeight, endHeight)
}
return nil

View File

@@ -11,10 +11,13 @@ import (
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/state/indexer"
"github.com/tendermint/tendermint/internal/state/mocks"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
_ "github.com/lib/pq" // for the psql sink
)
const (
@@ -107,12 +110,29 @@ func TestLoadEventSink(t *testing.T) {
}
func TestLoadBlockStore(t *testing.T) {
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
testCfg, err := tmcfg.ResetTestRoot(t.Name())
require.NoError(t, err)
testCfg.DBBackend = "goleveldb"
_, _, err = loadStateAndBlockStore(testCfg)
// we should return an error because the state store and block store
// don't yet exist
require.Error(t, err)
dbType := dbm.BackendType(testCfg.DBBackend)
bsdb, err := dbm.NewDB("blockstore", dbType, testCfg.DBDir())
require.NoError(t, err)
bsdb.Close()
ssdb, err := dbm.NewDB("state", dbType, testCfg.DBDir())
require.NoError(t, err)
ssdb.Close()
bs, ss, err := loadStateAndBlockStore(testCfg)
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}

View File

@@ -0,0 +1,46 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/state"
)
var RollbackStateCmd = &cobra.Command{
Use: "rollback",
Short: "rollback tendermint state by one height",
Long: `
A state rollback is performed to recover from an incorrect application state transition,
when Tendermint has persisted an incorrect app hash and is thus unable to make
progress. Rollback overwrites a state at height n with the state at height n - 1.
The application should also roll back to height n - 1. No blocks are removed, so upon
restarting Tendermint the transactions in block n will be re-executed against the
application.
`,
RunE: func(cmd *cobra.Command, args []string) error {
height, hash, err := RollbackState(config)
if err != nil {
return fmt.Errorf("failed to rollback state: %w", err)
}
fmt.Printf("Rolled back state to height %d and hash %X", height, hash)
return nil
},
}
// RollbackState takes the state at the current height n and overwrites it with the state
// at height n - 1. Note state here refers to tendermint state not application state.
// Returns the latest state height and app hash alongside an error if there was one.
func RollbackState(config *cfg.Config) (int64, []byte, error) {
// use the parsed config to load the block and state store
blockStore, stateStore, err := loadStateAndBlockStore(config)
if err != nil {
return -1, nil, err
}
// rollback the last state
return state.Rollback(blockStore, stateStore)
}

View File

@@ -65,7 +65,7 @@ func AddNodeFlags(cmd *cobra.Command) {
"proxy-app",
config.ProxyApp,
"proxy app address, or one of: 'kvstore',"+
" 'persistent_kvstore' or 'noop' for local testing.")
" 'persistent_kvstore', 'e2e' or 'noop' for local testing.")
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
// rpc flags

View File

@@ -240,7 +240,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
}
config.Moniker = moniker(i)
cfg.WriteConfigFile(nodeDir, config)
if err := cfg.WriteConfigFile(nodeDir, config); err != nil {
return err
}
}
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)

View File

@@ -6,9 +6,9 @@ import (
cmd "github.com/tendermint/tendermint/cmd/tendermint/commands"
"github.com/tendermint/tendermint/cmd/tendermint/commands/debug"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/node"
)
func main() {
@@ -29,6 +29,7 @@ func main() {
cmd.GenNodeKeyCmd,
cmd.VersionCmd,
cmd.InspectCmd,
cmd.RollbackStateCmd,
cmd.MakeKeyMigrateCommand(),
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
@@ -42,12 +43,12 @@ func main() {
// * Provide their own DB implementation
// can copy this file and use something other than the
// node.NewDefault function
nodeFunc := nm.NewDefault
nodeFunc := node.NewDefault
// Create & start node
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir)))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir)))
if err := cmd.Execute(); err != nil {
panic(err)
}

View File

@@ -64,6 +64,9 @@ var (
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
minSubscriptionBufferSize = 100
defaultSubscriptionBufferSize = 200
)
// Config defines the top level configuration for a Tendermint node
@@ -496,6 +499,29 @@ type RPCConfig struct {
// to the estimated maximum number of broadcast_tx_commit calls per block.
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
// The number of events that can be buffered per subscription before
// returning `ErrOutOfCapacity`.
SubscriptionBufferSize int `mapstructure:"experimental-subscription-buffer-size"`
// The maximum number of responses that can be buffered per WebSocket
// client. If clients cannot read from the WebSocket endpoint fast enough,
// they will be disconnected, so increasing this parameter may reduce the
// chances of them being disconnected (but will cause the node to use more
// memory).
//
// Must be at least the same as `SubscriptionBufferSize`, otherwise
// connections may be dropped unnecessarily.
WebSocketWriteBufferSize int `mapstructure:"experimental-websocket-write-buffer-size"`
// If a WebSocket client cannot read fast enough, at present we may
// silently drop events instead of generating an error or disconnecting the
// client.
//
// Enabling this parameter will cause the WebSocket connection to be closed
// instead if it cannot read fast enough, allowing for greater
// predictability in subscription behavior.
CloseOnSlowClient bool `mapstructure:"experimental-close-on-slow-client"`
// How long to wait for a tx to be committed during /broadcast_tx_commit
// WARNING: Using a value larger than 10s will result in increasing the
// global HTTP write timeout, which applies to all connections and endpoints.
@@ -545,7 +571,9 @@ func DefaultRPCConfig() *RPCConfig {
MaxSubscriptionClients: 100,
MaxSubscriptionsPerClient: 5,
SubscriptionBufferSize: defaultSubscriptionBufferSize,
TimeoutBroadcastTxCommit: 10 * time.Second,
WebSocketWriteBufferSize: defaultSubscriptionBufferSize,
MaxBodyBytes: int64(1000000), // 1MB
MaxHeaderBytes: 1 << 20, // same as the net/http default
@@ -579,6 +607,18 @@ func (cfg *RPCConfig) ValidateBasic() error {
if cfg.MaxSubscriptionsPerClient < 0 {
return errors.New("max-subscriptions-per-client can't be negative")
}
if cfg.SubscriptionBufferSize < minSubscriptionBufferSize {
return fmt.Errorf(
"experimental-subscription-buffer-size must be >= %d",
minSubscriptionBufferSize,
)
}
if cfg.WebSocketWriteBufferSize < cfg.SubscriptionBufferSize {
return fmt.Errorf(
"experimental-websocket-write-buffer-size must be >= experimental-subscription-buffer-size (%d)",
cfg.SubscriptionBufferSize,
)
}
if cfg.TimeoutBroadcastTxCommit < 0 {
return errors.New("timeout-broadcast-tx-commit can't be negative")
}

View File

@@ -138,7 +138,6 @@ func TestBlockSyncConfigValidateBasic(t *testing.T) {
}
func TestConsensusConfig_ValidateBasic(t *testing.T) {
// nolint: lll
testcases := map[string]struct {
modify func(*ConsensusConfig)
expectErr bool

View File

@@ -1,9 +1,10 @@
package config
import (
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
db "github.com/tendermint/tm-db"
)
// ServiceProvider takes a config and a logger and returns a ready to go Node.
@@ -16,11 +17,11 @@ type DBContext struct {
}
// DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (db.DB, error)
type DBProvider func(*DBContext) (dbm.DB, error)
// DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
dbType := db.BackendType(ctx.Config.DBBackend)
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -45,23 +45,29 @@ func EnsureRoot(rootDir string) {
// WriteConfigFile renders config using the template and writes it to configFilePath.
// This function is called by cmd/tendermint/commands/init.go
func WriteConfigFile(rootDir string, config *Config) {
var buffer bytes.Buffer
if err := configTemplate.Execute(&buffer, config); err != nil {
panic(err)
}
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
mustWriteFile(configFilePath, buffer.Bytes(), 0644)
func WriteConfigFile(rootDir string, config *Config) error {
return config.WriteToTemplate(filepath.Join(rootDir, defaultConfigFilePath))
}
func writeDefaultConfigFileIfNone(rootDir string) {
// WriteToTemplate writes the config to the exact file specified by
// the path, in the default toml template and does not mangle the path
// or filename at all.
func (cfg *Config) WriteToTemplate(path string) error {
var buffer bytes.Buffer
if err := configTemplate.Execute(&buffer, cfg); err != nil {
return err
}
return writeFile(path, buffer.Bytes(), 0644)
}
func writeDefaultConfigFileIfNone(rootDir string) error {
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
if !tmos.FileExists(configFilePath) {
WriteConfigFile(rootDir, DefaultConfig())
return WriteConfigFile(rootDir, DefaultConfig())
}
return nil
}
// Note: any changes to the comments/variables/mapstructure
@@ -159,15 +165,15 @@ state-file = "{{ js .PrivValidator.State }}"
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
laddr = "{{ .PrivValidator.ListenAddr }}"
# Client certificate generated while creating needed files for secure connection.
# Path to the client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}"
# Client key generated while creating certificates for secure connection
validator-client-key-file = "{{ js .PrivValidator.ClientKey }}"
client-key-file = "{{ js .PrivValidator.ClientKey }}"
# Path Root Certificate Authority used to sign both client and server certificates
certificate-authority = "{{ js .PrivValidator.RootCA }}"
# Path to the Root Certificate Authority used to sign both client and server certificates
root-ca-file = "{{ js .PrivValidator.RootCA }}"
#######################################################################
@@ -230,6 +236,33 @@ max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
# the estimated # maximum number of broadcast_tx_commit calls per block.
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
# Experimental parameter to specify the maximum number of events a node will
# buffer, per subscription, before returning an error and closing the
# subscription. Must be set to at least 100, but higher values will accommodate
# higher event throughput rates (and will use more memory).
experimental-subscription-buffer-size = {{ .RPC.SubscriptionBufferSize }}
# Experimental parameter to specify the maximum number of RPC responses that
# can be buffered per WebSocket client. If clients cannot read from the
# WebSocket endpoint fast enough, they will be disconnected, so increasing this
# parameter may reduce the chances of them being disconnected (but will cause
# the node to use more memory).
#
# Must be at least the same as "experimental-subscription-buffer-size",
# otherwise connections could be dropped unnecessarily. This value should
# ideally be somewhat higher than "experimental-subscription-buffer-size" to
# accommodate non-subscription-related RPC responses.
experimental-websocket-write-buffer-size = {{ .RPC.WebSocketWriteBufferSize }}
# If a WebSocket client cannot read fast enough, at present we may
# silently drop events instead of generating an error or disconnecting the
# client.
#
# Enabling this experimental parameter will cause the WebSocket connection to
# be closed instead if it cannot read fast enough, allowing for greater
# predictability in subscription behavior.
experimental-close-on-slow-client = {{ .RPC.CloseOnSlowClient }}
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
@@ -331,18 +364,28 @@ max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}"
# Time to wait before flushing messages out on the connection
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}"
# Maximum size of a message packet payload, in bytes
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }}
# Rate at which packets can be sent, in bytes/second
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
send-rate = {{ .P2P.SendRate }}
# Rate at which packets can be received, in bytes/second
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
recv-rate = {{ .P2P.RecvRate }}
# Set true to enable the peer-exchange reactor
@@ -437,8 +480,8 @@ rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}"
trust-height = {{ .StateSync.TrustHeight }}
trust-hash = "{{ .StateSync.TrustHash }}"
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
# period should suffice.
trust-period = "{{ .StateSync.TrustPeriod }}"
@@ -519,7 +562,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
[tx-index]
# The backend database list to back the indexer.
# If list contains null, meaning no indexer service will be used.
# If list contains "null" or "", meaning no indexer service will be used.
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
@@ -527,8 +570,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
# The PostgreSQL connection configuration, the connection format:
@@ -560,22 +603,22 @@ namespace = "{{ .Instrumentation.Namespace }}"
/****** these are for test settings ***********/
func ResetTestRoot(testName string) *Config {
func ResetTestRoot(testName string) (*Config, error) {
return ResetTestRootWithChainID(testName, "")
}
func ResetTestRootWithChainID(testName string, chainID string) *Config {
func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) {
// create a unique, concurrency-safe test directory under os.TempDir()
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
if err != nil {
panic(err)
return nil, err
}
// ensure config and data subdirs are created
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
panic(err)
return nil, err
}
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
panic(err)
return nil, err
}
conf := DefaultConfig()
@@ -584,26 +627,36 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State)
// Write default config file if missing.
writeDefaultConfigFileIfNone(rootDir)
if err := writeDefaultConfigFileIfNone(rootDir); err != nil {
return nil, err
}
if !tmos.FileExists(genesisFilePath) {
if chainID == "" {
chainID = "tendermint_test"
}
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
mustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
if err := writeFile(genesisFilePath, []byte(testGenesis), 0644); err != nil {
return nil, err
}
}
// we always overwrite the priv val
mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
if err := writeFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644); err != nil {
return nil, err
}
if err := writeFile(privStateFilePath, []byte(testPrivValidatorState), 0644); err != nil {
return nil, err
}
config := TestConfig().SetRoot(rootDir)
return config
return config, nil
}
func mustWriteFile(filePath string, contents []byte, mode os.FileMode) {
func writeFile(filePath string, contents []byte, mode os.FileMode) error {
if err := ioutil.WriteFile(filePath, contents, mode); err != nil {
tmos.Exit(fmt.Sprintf("failed to write file: %v", err))
return fmt.Errorf("failed to write file: %w", err)
}
return nil
}
var testGenesisFmt = `{

View File

@@ -24,17 +24,17 @@ func TestEnsureRoot(t *testing.T) {
// setup temp dir for test
tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err)
require.NoError(err)
defer os.RemoveAll(tmpDir)
// create root dir
EnsureRoot(tmpDir)
WriteConfigFile(tmpDir, DefaultConfig())
require.NoError(WriteConfigFile(tmpDir, DefaultConfig()))
// make sure config is set properly
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
require.Nil(err)
require.NoError(err)
checkConfig(t, string(data))
@@ -47,7 +47,8 @@ func TestEnsureTestRoot(t *testing.T) {
testName := "ensureTestRoot"
// create root dir
cfg := ResetTestRoot(testName)
cfg, err := ResetTestRoot(testName)
require.NoError(err)
defer os.RemoveAll(cfg.RootDir)
rootDir := cfg.RootDir

View File

@@ -1,39 +0,0 @@
package armor
import (
"bytes"
"fmt"
"io/ioutil"
"golang.org/x/crypto/openpgp/armor"
)
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
buf := new(bytes.Buffer)
w, err := armor.Encode(buf, blockType, headers)
if err != nil {
panic(fmt.Errorf("could not encode ascii armor: %s", err))
}
_, err = w.Write(data)
if err != nil {
panic(fmt.Errorf("could not encode ascii armor: %s", err))
}
err = w.Close()
if err != nil {
panic(fmt.Errorf("could not encode ascii armor: %s", err))
}
return buf.String()
}
func DecodeArmor(armorStr string) (blockType string, headers map[string]string, data []byte, err error) {
buf := bytes.NewBufferString(armorStr)
block, err := armor.Decode(buf)
if err != nil {
return "", nil, nil, err
}
data, err = ioutil.ReadAll(block.Body)
if err != nil {
return "", nil, nil, err
}
return block.Type, block.Header, data, nil
}

View File

@@ -1,20 +0,0 @@
package armor
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestArmor(t *testing.T) {
blockType := "MINT TEST"
data := []byte("somedata")
armorStr := EncodeArmor(blockType, nil, data)
// Decode armorStr and test for equivalence.
blockType2, _, data2, err := DecodeArmor(armorStr)
require.Nil(t, err, "%+v", err)
assert.Equal(t, blockType, blockType2)
assert.Equal(t, data, data2)
}

View File

@@ -8,34 +8,34 @@ import (
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/sr25519"
"github.com/tendermint/tendermint/libs/json"
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
func init() {
json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey")
json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519")
json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1")
json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey")
json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519")
json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1")
}
// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey
func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) {
var kp pc.PublicKey
func PubKeyToProto(k crypto.PubKey) (cryptoproto.PublicKey, error) {
var kp cryptoproto.PublicKey
switch k := k.(type) {
case ed25519.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Ed25519{
kp = cryptoproto.PublicKey{
Sum: &cryptoproto.PublicKey_Ed25519{
Ed25519: k,
},
}
case secp256k1.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Secp256K1{
kp = cryptoproto.PublicKey{
Sum: &cryptoproto.PublicKey_Secp256K1{
Secp256K1: k,
},
}
case sr25519.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Sr25519{
kp = cryptoproto.PublicKey{
Sum: &cryptoproto.PublicKey_Sr25519{
Sr25519: k,
},
}
@@ -46,9 +46,9 @@ func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) {
}
// PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey
func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
func PubKeyFromProto(k cryptoproto.PublicKey) (crypto.PubKey, error) {
switch k := k.Sum.(type) {
case *pc.PublicKey_Ed25519:
case *cryptoproto.PublicKey_Ed25519:
if len(k.Ed25519) != ed25519.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d",
len(k.Ed25519), ed25519.PubKeySize)
@@ -56,7 +56,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
pk := make(ed25519.PubKey, ed25519.PubKeySize)
copy(pk, k.Ed25519)
return pk, nil
case *pc.PublicKey_Secp256K1:
case *cryptoproto.PublicKey_Secp256K1:
if len(k.Secp256K1) != secp256k1.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d",
len(k.Secp256K1), secp256k1.PubKeySize)
@@ -64,7 +64,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
pk := make(secp256k1.PubKey, secp256k1.PubKeySize)
copy(pk, k.Secp256K1)
return pk, nil
case *pc.PublicKey_Sr25519:
case *cryptoproto.PublicKey_Sr25519:
if len(k.Sr25519) != sr25519.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d",
len(k.Sr25519), sr25519.PubKeySize)

View File

@@ -1,3 +1,4 @@
//go:build !libsecp256k1
// +build !libsecp256k1
package secp256k1

View File

@@ -5,14 +5,13 @@ import (
"math/big"
"testing"
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil/base58"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/secp256k1"
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
)
type keyData struct {

View File

@@ -2,8 +2,8 @@ package xchacha20poly1305
import (
"bytes"
cr "crypto/rand"
mr "math/rand"
crand "crypto/rand"
mrand "math/rand"
"testing"
)
@@ -19,23 +19,23 @@ func TestRandom(t *testing.T) {
var nonce [24]byte
var key [32]byte
al := mr.Intn(128)
pl := mr.Intn(16384)
al := mrand.Intn(128)
pl := mrand.Intn(16384)
ad := make([]byte, al)
plaintext := make([]byte, pl)
_, err := cr.Read(key[:])
_, err := crand.Read(key[:])
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = cr.Read(nonce[:])
_, err = crand.Read(nonce[:])
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = cr.Read(ad)
_, err = crand.Read(ad)
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = cr.Read(plaintext)
_, err = crand.Read(plaintext)
if err != nil {
t.Errorf("error on read: %w", err)
}
@@ -59,7 +59,7 @@ func TestRandom(t *testing.T) {
}
if len(ad) > 0 {
alterAdIdx := mr.Intn(len(ad))
alterAdIdx := mrand.Intn(len(ad))
ad[alterAdIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering additional data", i)
@@ -67,14 +67,14 @@ func TestRandom(t *testing.T) {
ad[alterAdIdx] ^= 0x80
}
alterNonceIdx := mr.Intn(aead.NonceSize())
alterNonceIdx := mrand.Intn(aead.NonceSize())
nonce[alterNonceIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering nonce", i)
}
nonce[alterNonceIdx] ^= 0x80
alterCtIdx := mr.Intn(len(ct))
alterCtIdx := mrand.Intn(len(ct))
ct[alterCtIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering ciphertext", i)

View File

@@ -34,6 +34,10 @@ module.exports = {
"label": "v0.34",
"key": "v0.34"
},
{
"label": "v0.35",
"key": "v0.35"
},
{
"label": "master",
"key": "master"
@@ -78,7 +82,7 @@ module.exports = {
},
footer: {
question: {
text: 'Chat with Tendermint developers in <a href=\'https://discord.gg/vcExX9T\' target=\'_blank\'>Discord</a> or reach out on the <a href=\'https://forum.cosmos.network/c/tendermint\' target=\'_blank\'>Tendermint Forum</a> to learn more.'
text: 'Chat with Tendermint developers in <a href=\'https://discord.gg/cosmosnetwork\' target=\'_blank\'>Discord</a> or reach out on the <a href=\'https://forum.cosmos.network/c/tendermint\' target=\'_blank\'>Tendermint Forum</a> to learn more.'
},
logo: '/logo-bw.svg',
textLink: {

View File

@@ -2,17 +2,27 @@
The documentation for Tendermint Core is hosted at:
- <https://docs.tendermint.com/master/>
- <https://docs.tendermint.com/>
built from the files in this (`/docs`) directory for
[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively.
built from the files in this [`docs` directory for `master`](https://github.com/tendermint/tendermint/tree/master/docs)
and other supported release branches.
## How It Works
There is a CircleCI job listening for changes in the `/docs` directory, on both
the `master` branch. Any updates to files in this directory
on those branches will automatically trigger a website deployment. Under the hood,
the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo.
There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml)
in the `tendermint/docs` repository that clones and builds the documentation
site from the contents of this `docs` directory, for `master` and for each
supported release branch. Under the hood, this workflow runs `make build-docs`
from the [Makefile](../Makefile#L214).
The list of supported versions are defined in [`config.js`](./.vuepress/config.js),
which defines the UI menu on the documentation site, and also in
[`docs/versions`](./versions), which determines which branches are built.
The last entry in the `docs/versions` file determines which version is linked
by default from the generated `index.html`. This should generally be the most
recent release, rather than `master`, so that new users are not confused by
documentation for unreleased features.
## README

View File

@@ -2,7 +2,7 @@
order: 1
parent:
title: Networks
order: 6
order: 1
---
# Overview

View File

@@ -169,7 +169,7 @@ Override the [command](https://github.com/tendermint/tendermint/blob/master/netw
ipv4_address: 192.167.10.2
```
Similarly do for node1, node2 and node3 then [run testnet](https://github.com/tendermint/tendermint/blob/master/docs/networks/docker-compose.md#run-a-testnet)
Similarly do for node1, node2 and node3 then [run testnet](#run-a-testnet).
## Logging

View File

@@ -16,8 +16,7 @@ the parameters set with their default values. It will look something
like the file below, however, double check by inspecting the
`config.toml` created with your version of `tendermint` installed:
```toml
# This is a TOML config file.
```toml# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
@@ -34,14 +33,14 @@ like the file below, however, double check by inspecting the
proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "anonymous"
moniker = "ape"
# Mode of Node: full | validator | seed (default: "validator")
# * validator node (default)
# - all reactors
# - with priv_validator_key.json, priv_validator_state.json
# * full node
# * full node
# - all reactors
# - No priv_validator_key.json, priv_validator_state.json
# * seed node
@@ -49,6 +48,11 @@ moniker = "anonymous"
# - No priv_validator_key.json, priv_validator_state.json
mode = "validator"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = true
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
@@ -84,16 +88,6 @@ log-format = "plain"
# Path to the JSON file containing the initial validator set and other meta data
genesis-file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv-validator-key-file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv-validator-state-file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv-validator-laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node-key-file = "config/node_key.json"
@@ -105,6 +99,33 @@ abci = "socket"
filter-peers = false
#######################################################
### Priv Validator Configuration ###
#######################################################
[priv-validator]
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
key-file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
state-file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
laddr = ""
# Path to the client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
client-certificate-file = ""
# Client key generated while creating certificates for secure connection
validator-client-key-file = ""
# Path to the Root Certificate Authority used to sign both client and server certificates
certificate-authority = ""
#######################################################################
### Advanced Configuration Options ###
#######################################################################
@@ -130,6 +151,7 @@ cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With",
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-laddr = ""
# Maximum number of simultaneous connections.
@@ -139,9 +161,10 @@ grpc-laddr = ""
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-max-open-connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
@@ -198,18 +221,34 @@ pprof-laddr = ""
#######################################################
[p2p]
# Enable the legacy p2p layer.
use-legacy = false
# Select the p2p internal queue
queue-type = "priority"
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
external-address = ""
# Comma separated list of seed nodes to connect to
# We only use these if we cant connect to peers in the addrbook
# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
seeds = ""
# Comma separated list of peers to be added to the peer store
# on startup. Either BootstrapPeers or PersistentPeers are
# needed for peer discovery
bootstrap-peers = ""
# Comma separated list of nodes to keep persistent connections to
persistent-peers = ""
@@ -217,6 +256,8 @@ persistent-peers = ""
upnp = false
# Path to address book
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
addr-book-file = "config/addrbook.json"
# Set true for strict address routability rules
@@ -224,9 +265,15 @@ addr-book-file = "config/addrbook.json"
addr-book-strict = true
# Maximum number of inbound peers
#
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
# ref: https://github.com/tendermint/tendermint/issues/5670
max-num-inbound-peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
#
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
# ref: https://github.com/tendermint/tendermint/issues/5670
max-num-outbound-peers = 10
# Maximum number of connections (inbound and outbound).
@@ -236,27 +283,40 @@ max-connections = 64
max-incoming-connection-attempts = 100
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
unconditional-peer-ids = ""
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
persistent-peers-max-dial-period = "0s"
# Time to wait before flushing messages out on the connection
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
flush-throttle-timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max-packet-msg-payload-size = 1024
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
max-packet-msg-payload-size = 1400
# Rate at which packets can be sent, in bytes/second
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
send-rate = 5120000
# Rate at which packets can be received, in bytes/second
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
recv-rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
private-peer-ids = ""
# Toggle to disable guard against peers connecting from the same ip.
@@ -349,8 +409,15 @@ discovery-time = "15s"
# Will create a new, randomly named directory within, and remove it when done.
temp-dir = ""
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 15 seconds).
chunk-request-timeout = "15s"
# The number of concurrent chunk and block fetchers to run (default: 4).
fetchers = "4"
#######################################################
### BlockSync Configuration Connections ###
### Block Sync Configuration Connections ###
#######################################################
[blocksync]
@@ -410,7 +477,8 @@ peer-query-maj23-sleep-duration = "2s"
#######################################################
[tx-index]
# What indexer to use for transactions
# The backend database list to back the indexer.
# If list contains "null" or "", meaning no indexer service will be used.
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
@@ -418,8 +486,13 @@ peer-query-maj23-sleep-duration = "2s"
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = "kv"
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = ["kv"]
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = ""
#######################################################
### Instrumentation Configuration Options ###
@@ -520,10 +593,61 @@ This section will cover settings within the p2p section of the `config.toml`.
- `external-address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port.
- > We recommend setting an external address. When used in a private network, Tendermint Core currently doesn't advertise the node's public address. There is active and ongoing work to improve the P2P system, but this is a helpful workaround for now.
- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks
- `persistent-peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added.
- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection).
- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).
- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node.
- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on.
- `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id.
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced.
We will cover the new and deprecated parameters below.
### New Parameters
There are three new parameters, which are enabled if use-legacy is set to false.
- `queue-type` = sets a type of queue to use in the p2p layer. There are three options available `fifo`, `priority` and `wdrr`. The default is priority
- `bootstrap-peers` = is a list of comma seperated peers which will be used to bootstrap the address book.
- `max-connections` = is the max amount of allowed inbound and outbound connections.
### Deprecated Parameters
> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by deafult with the deprecated fields. The new implementation uses different config parameters, explained above.
- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). *This was replaced by `max-connections`*
- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).*This was replaced by `max-connections`*
- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. *Deprecated*
- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks *Deprecated, replaced by bootstrap peers*
## Indexing Settings
Operators can configure indexing via the `[tx_index]` section. The `indexer`
field takes a series of supported indexers. If `null` is included, indexing will
be turned off regardless of other values provided.
### Supported Indexers
#### KV
The `kv` indexer type is an embedded key-value store supported by the main
underlying Tendermint database. Using the `kv` indexer type allows you to query
for block and transaction events directly against Tendermint's RPC. However, the
query syntax is limited and so this indexer type might be deprecated or removed
entirely in the future.
#### PostgreSQL
The `psql` indexer type allows an operator to enable block and transaction event
indexing by proxying it to an external PostgreSQL instance allowing for the events
to be stored in relational models. Since the events are stored in a RDBMS, operators
can leverage SQL to perform a series of rich and complex queries that are not
supported by the `kv` indexer type. Since operators can leverage SQL directly,
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
such query will fail.
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
must explicitly create the relations prior to starting Tendermint and enabling
the `psql` indexer type.
Example:
```shell
$ psql ... -f state/indexer/sink/psql/schema.sql
```

View File

@@ -20,6 +20,7 @@ The following metrics are available:
| **Name** | **Type** | **Tags** | **Description** |
| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- |
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
| consensus_height | Gauge | | Height of the chain |
| consensus_validators | Gauge | | Number of validators |
| consensus_validators_power | Gauge | | Total voting power of all validators |
@@ -55,6 +56,16 @@ The following metrics are available:
Percentage of missing + byzantine validators:
```md
((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100
```prometheus
((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100
```
Rate at which the application is responding to each ABCI method call.
```
sum(rate(tendermint_abci_connection_method_timing_count[5m])) by (method)
```
The 95th percentile response time for the application to the `deliver_tx` ABCI method call.
```
histogram_quantile(0.95, sum by(le) (rate(tendermint_abci_connection_method_timing_bucket{method="deliver_tx"}[5m])))
```

12551
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -42,5 +42,6 @@ sections.
- [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md)
- [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md)
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md)
- [RFC-005: Event System](./rfc-005-event-system.rst)
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->

View File

@@ -40,15 +40,15 @@ Critique of Current Peer-to-Peer Infrastructure
The current (refactored) P2P stack is an improvement on the previous iteration
(legacy), but as of 0.35, there remains room for improvement in the design and
implementation of the P2P layer.
implementation of the P2P layer.
Some limitations of the current stack include:
- heavy reliance on buffering to avoid backups in the flow of components,
which is fragile to maintain and can lead to unexpected memory usage
patterns and forces the routing layer to make decisions about when messages
should be discarded.
should be discarded.
- the current p2p stack relies on convention (rather than the compiler) to
enforce the API boundaries and conventions between reactors and the router,
making it very easy to write "wrong" reactor code or introduce a bad
@@ -64,7 +64,7 @@ Some limitations of the current stack include:
difficult to expose that information to monitoring/observability tools. This
general opacity also makes it difficult to interact with the peer system
from other areas of the code base (e.g. tests, reactors).
- the legacy stack provided some control to operators to force the system to
dial new peers or seed nodes or manipulate the topology of the system _in
situ_. The current stack can't easily provide this, and while the new stack
@@ -94,16 +94,16 @@ blocksync and consensus) from procedure calls to message passing.
This is a relatively simple change and could be implemented with the following
components:
- a constant to represent "local" delivery as the ``To``` field on
- a constant to represent "local" delivery as the ``To`` field on
``p2p.Envelope``.
- special path for routing local messages that doesn't require message
serialization (protobuf marshalling/unmarshaling).
Adding these semantics, particularly if in conjunction with synchronous
semantics provides a solution to dependency graph problems currently present
in the Tendermint codebase, which will simplify development, make it possible
to isolate components for testing.
to isolate components for testing.
Eventually, this will also make it possible to have a logical Tendermint node
running in multiple processes or in a collection of containers, although the
@@ -129,7 +129,7 @@ of request/response ID to allow identifying out-of-order responses over a
single connection. Additionally, expanded the programming model of the
``p2p.Channel`` to accommodate some kind of _future_ or similar paradigm to
make it viable to write reactor code without needing for the reactor developer
to wrestle with lower level concurency constructs.
to wrestle with lower level concurrency constructs.
Timeout Handling (QoS)
@@ -143,7 +143,7 @@ detect or attribute. Additionally, the current system provides three main
parameters to control quality of service:
- buffer sizes for channels and queues.
- priorities for channels
- queue implementation details for shedding load.
@@ -151,13 +151,13 @@ parameters to control quality of service:
These end up being quite coarse controls, and changing the settings are
difficult because as the queues and channels are able to buffer large numbers
of messages it can be hard to see the impact of a given change, particularly
in our extant test environment. In general, we should endeavor to:
in our extant test environment. In general, we should endeavor to:
- set real timeouts, via contexts, on most message send operations, so that
senders rather than queues can be responsible for timeout
logic. Additionally, this will make it possible to avoid sending messages
during shutdown.
- reduce (to the greatest extent possible) the amount of buffering in
channels and the queues, to more readily surface backpressure and reduce the
potential for buildup of stale messages.
@@ -173,8 +173,8 @@ transport types and makes it more likely that message-based caching and rate
limiting will be implemented at the transport layer rather than at a more
appropriate level.
The transport then, would be responsible for negitating the connection and the
handshake and otherwise behave like a socket/file discriptor with ``Read` and
The transport then, would be responsible for negotiating the connection and the
handshake and otherwise behave like a socket/file descriptor with ``Read`` and
``Write`` methods.
While this was included in the initial design for the new P2P layer, it may be
@@ -185,7 +185,7 @@ Service Discovery
~~~~~~~~~~~~~~~~~
In the current system, Tendermint assumes that all nodes in a network are
largely equivelent, and nodes tend to be "chatty" making many requests of
largely equivalent, and nodes tend to be "chatty" making many requests of
large numbers of peers and waiting for peers to (hopefully) respond. While
this works and has allowed Tendermint to get to a certain point, this both
produces a theoretical scaling bottle neck and makes it harder to test and
@@ -194,7 +194,7 @@ verify components of the system.
In addition to peer's identity and connection information, peers should be
able to advertise a number of services or capabilities, and node operators or
developers should be able to specify peer capability requirements (e.g. target
at least <x>-percent of peers with <y> capability.)
at least <x>-percent of peers with <y> capability.)
These capabilities may be useful in selecting peers to send messages to, it
may make sense to extend Tendermint's message addressing capability to allow
@@ -215,7 +215,7 @@ Continued Homegrown Implementation
The current peer system is homegrown and is conceptually compatible with the
needs of the project, and while there are limitations to the system, the p2p
layer is not (currently as of 0.35) a major source of bugs or friction during
development.
development.
However, the current implementation makes a number of allowances for
interoperability, and there are a collection of iterative improvements that
@@ -228,18 +228,18 @@ implementation, upcoming work would include:
connections using different protocols (e.g. QUIC, etc.)
- entirely remove the constructs and implementations of the legacy peer
implementation.
implementation.
- establish and enforce clearer chains of responsibility for connection
establishment (e.g. handshaking, setup,) which is currently shared between
three components.
three components.
- report better metrics regarding the into the state of peers and network
connectivity, which are opaque outside of the system. This is constrained at
the moment as a side effect of the split responsibility for connection
establishment.
- extend the PEX system to include service information so that ndoes in the
- extend the PEX system to include service information so that nodes in the
network weren't necessarily homogeneous.
While maintaining a bespoke peer management layer would seem to distract from
@@ -272,20 +272,20 @@ case that our internal systems need to know much less about peers than
otherwise specified. Similarly, the current system has a notion of peer
scoring that cannot be communicated to libp2p, which may be fine as this is
only used to support peer exchange (PEX,) which would become a property libp2p
and not expressed in it's current higher-level form.
and not expressed in it's current higher-level form.
In general, the effort to switch to libp2p would involve:
In general, the effort to switch to libp2p would involve:
- timing it during an appropriate protocol-breaking window, as it doesn't seem
viable to support both libp2p *and* the current p2p protocol.
viable to support both libp2p *and* the current p2p protocol.
- providing some in-memory testing network to support the use case that the
current ``p2p.MemoryNetwork`` provides.
- re-homing the ``p2p.Router`` implementation on top of libp2p components to
be able to maintain the current reactor implementations.
Open question include:
Open question include:
- how much local buffering should we be doing? It sort of seems like we should
figure out what the expected behavior is for libp2p for QoS-type
@@ -302,7 +302,7 @@ Open question include:
- how do efforts to select "the best" (healthy, close, well-behaving, etc.)
peers work out if Tendermint is not maintaining a local peer database?
- would adding additional higher level semantics (internal message passing,
request/response pairs, service discovery, etc.) facilitate removing some of
the direct linkages between constructs/components in the system and reduce
@@ -311,6 +311,6 @@ Open question include:
References
----------
- `Tracking Ticket for P2P Refactor Project <https://github.com/tendermint/tendermint/issues/5670>`_
- `Tracking Ticket for P2P Refactor Project <https://github.com/tendermint/tendermint/issues/5670>`_
- `ADR 61: P2P Refactor Scope <../architecture/adr-061-p2p-refactor-scope.md>`_
- `ADR 62: P2P Architecture and Abstraction <../architecture/adr-061-p2p-architecture.md>`_

View File

@@ -0,0 +1,122 @@
=====================
RFC 005: Event System
=====================
Changelog
---------
- 2021-09-17: Initial Draft (@tychoish)
Abstract
--------
The event system within Tendermint, which supports a lot of core
functionality, also represents a major infrastructural liability. As part of
our upcoming review of the RPC interfaces and our ongoing thoughts about
stability and performance, as well as the preparation for Tendermint 1.0, we
should revisit the design and implementation of the event system. This
document discusses both the current state of the system and potential
directions for future improvement.
Background
----------
Current State of Events
~~~~~~~~~~~~~~~~~~~~~~~
The event system makes it possible for clients, both internal and external,
to receive notifications of state replication events, such as new blocks,
new transactions, validator set changes, as well as intermediate events during
consensus. Because the event system is very cross cutting, the behavior and
performance of the event publication and subscription system has huge impacts
for all of Tendermint.
The subscription service is exposed over the RPC interface, but also powers
the indexing (e.g. to an external database,) and is the mechanism by which
`BroadcastTxCommit` is able to wait for transactions to land in a block.
The current pubsub mechanism relies on a couple of buffered channels,
primarily between all event creators and subscribers, but also for each
subscription. The result of this design is that, in some situations with the
right collection of slow subscription consumers the event system can put
backpressure on the consensus state machine and message gossiping in the
network, thereby causing nodes to lag.
Improvements
~~~~~~~~~~~~
The current system relies on implicit, bounded queues built by the buffered channels,
and though threadsafe, can force all activity within Tendermint to serialize,
which does not need to happen. Additionally, timeouts for subscription
consumers related to the implementation of the RPC layer, may complicate the
use of the system.
References
~~~~~~~~~~
- Legacy Implementation
- `publication of events <https://github.com/tendermint/tendermint/blob/master/libs/pubsub/pubsub.go#L333-L345>`_
- `send operation <https://github.com/tendermint/tendermint/blob/master/libs/pubsub/pubsub.go#L489-L527>`_
- `send loop <https://github.com/tendermint/tendermint/blob/master/libs/pubsub/pubsub.go#L381-L402>`_
- Related RFCs
- `RFC 002: IPC Ecosystem <./rfc-002-ipc-ecosystem.md>`_
- `RFC 003: Performance Questions <./rfc-003-performance-questions.md>`_
Discussion
----------
Changes to Published Events
~~~~~~~~~~~~~~~~~~~~~~~~~~~
As part of this process, the Tendermint team should do a study of the existing
event types and ensure that there are viable production use cases for
subscriptions to all event types. Instinctively it seems plausible that some
of the events may not be useable outside of tendermint, (e.g. ``TimeoutWait``
or ``NewRoundStep``) and it might make sense to remove them. Certainly, it
would be good to make sure that we don't maintain infrastructure for unused or
un-useful message indefinitely.
Blocking Subscription
~~~~~~~~~~~~~~~~~~~~~
The blocking subscription mechanism makes it possible to have *send*
operations into the subscription channel be un-buffered (the event processing
channel is still buffered.) In the blocking case, events from one subscription
can block processing that event for other non-blocking subscriptions. The main
case, it seems for blocking subscriptions is ensuring that a transaction has
been committed to a block for ``BroadcastTxCommit``. Removing blocking
subscriptions entirely, and potentially finding another way to implement
``BroadcastTxCommit``, could lead to important simplifications and
improvements to throughput without requiring large changes.
Subscription Identification
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before `#6386 <https://github.com/tendermint/tendermint/pull/6386>`_, all
subscriptions were identified by the combination of a client ID and a query,
and with that change, it became possible to identify all subscription given
only an ID, but compatibility with the legacy identification means that there's a
good deal of legacy code as well as client side efficiency that could be
improved.
Pubsub Changes
~~~~~~~~~~~~~~
The pubsub core should be implemented in a way that removes the possibility of
backpressure from the event system to impact the core system *or* for one
subscription to impact the behavior of another area of the
system. Additionally, because the current system is implemented entirely in
terms of a collection of buffered channels, the event system (and large
numbers of subscriptions) can be a source of memory pressure.
These changes could include:
- explicit cancellation and timeouts promulgated from callers (e.g. RPC end
points, etc,) this should be done using contexts.
- subscription system should be able to spill to disk to avoid putting memory
pressure on the core behavior of the node (consensus, gossip).
- subscriptions implemented as cursors rather than channels, with either
condition variables to simulate the existing "push" API or a client side
iterator API with some kind of long polling-type interface.

6
docs/roadmap/README.md Normal file
View File

@@ -0,0 +1,6 @@
---
order: false
parent:
title: Roadmap
order: 7
---

97
docs/roadmap/roadmap.md Normal file
View File

@@ -0,0 +1,97 @@
---
order: 1
---
# Tendermint Roadmap
*Last Updated: Friday 8 October 2021*
This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams.
Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint).
This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status.
The upgrades are split into two components: **Epics**, the features that define a release and to a large part dictate the timing of releases; and **minors**, features of smaller scale and lower priority, that could land in neighboring releases.
## V0.35 (completed Q3 2021)
### Prioritized Mempool
Transactions were previously added to blocks in the order with which they arrived to the mempool. Adding a priority field via `CheckTx` gives applications more control over which transactions make it into a block. This is important in the presence of transaction fees. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md)
### Refactor of the P2P Framework
The Tendermint P2P system is undergoing a large redesign to improve its performance and reliability. The first phase of this redesign is included in 0.35. This phase cleans and decouples abstractions, improves peer lifecycle management, peer address handling and enables pluggable transports. It is implemented to be protocol-compatible with the previous implementation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-062-p2p-architecture.md)
### State Sync Improvements
Following the initial version of state sync, several improvements have been made. These include the addition of [Reverse Sync](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-068-reverse-sync.md) needed for evidence handling, the introduction of a [P2P State Provider](https://github.com/tendermint/tendermint/pull/6807) as an alternative to RPC endpoints, new configuration parameters to adjust throughput, and several bug fixes.
### Custom event indexing + PSQL Indexer
Added a new `EventSink` interface to allow alternatives to Tendermint's proprietary transaction indexer. We also added a PostgreSQL Indexer implementation, allowing rich SQL-based index queries. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-065-custom-event-indexing.md)
### Minor Works
- Several Go packages were reorganized to make the distinction between public APIs and implementation details more clear.
- Block indexer to index begin-block and end-block events. [More](https://github.com/tendermint/tendermint/pull/6226)
- Block, state, evidence, and light storage keys were reworked to preserve lexicographic order. This change requires a database migration. [More](https://github.com/tendermint/tendermint/pull/5771)
- Introduciton of Tendermint modes. Part of this change includes the possibility to run a separate seed node that runs the PEX reactor only. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
## V0.36 (expected Q1 2022)
### ABCI++
An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md)
### Proposer-Based Timestamps
Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
### Soft Upgrades
We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222)
### Minor Works
- Remove the "legacy" P2P framework, and clean up of P2P package. [More](https://github.com/tendermint/tendermint/issues/5670)
- Remove the global mutex from the local ABCI client to enable application-controlled concurrency. [More](https://github.com/tendermint/tendermint/issues/7073)
- Enable P2P support for light clients
- Node orchestration of services + Node initialization and composibility
- Remove redundancy in several data structures. Remove unused components such as the block sync v2 reactor, gRPC in the RPC layer, and the socket-based remote signer.
- Improve node visibility by introducing more metrics
## V0.37 (expected Q3 2022)
### Complete P2P Refactor
Finish the final phase of the P2P system. Ongoing research and planning is taking place to decide whether to adopt [libp2p](https://libp2p.io/), alternative transports to `MConn` such as [QUIC](https://en.wikipedia.org/wiki/QUIC) and handshake/authentication protocols such as [Noise](https://noiseprotocol.org/). Research into more advanced gossiping techniques.
### Streamline Storage Engine
Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897)
### Evaluate Interprocess Communication
Tendermint nodes currently have multiple areas of communication with other processes (ABCI, remote-signer, P2P, JSONRPC, websockets, events as examples). Many of these have multiple implementations in which a single suffices. Consolidate and clean up IPC. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md)
### Minor Works
- Amnesia attack handling. [More](https://github.com/tendermint/tendermint/issues/5270)
- Remove / Update Consensus WAL. [More](https://github.com/tendermint/tendermint/issues/6397)
- Signature Aggregation. [More](https://github.com/tendermint/tendermint/issues/1319)
- Remove gogoproto dependency. [More](https://github.com/tendermint/tendermint/issues/5446)
## V1.0 (expected Q4 2022)
Has the same feature set as V0.37 but with a focus towards testing, protocol correctness and minor tweaks to ensure a stable product. Such work might include extending the [consensus testing framework](https://github.com/tendermint/tendermint/issues/5920), the use of canary/long-lived testnets and greater integration tests.
## Post 1.0 Work
- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347)
- Consensus engine refactor
- Bidirectional ABCI
- Randomized Leader Election
- ZK proofs / other cryptographic primitives
- Multichain Tendermint

View File

@@ -14,7 +14,12 @@ This section dives into the internals of Go-Tendermint.
- [Subscribing to events](./subscription.md)
- [Block Structure](./block-structure.md)
- [RPC](./rpc.md)
- [Block Sync](./block-sync.md)
- [State Sync](./state-sync.md)
- [Mempool](./mempool.md)
- [Block Sync](./block-sync/README.md)
- [State Sync](./state-sync/README.md)
- [Mempool](./mempool/README.md)
- [Light Client](./light-client.md)
- [Consensus](./consensus/README.md)
- [Peer Exachange (PEX)](./pex/README.md)
- [Evidence](./evidence/README.md)
For full specifications refer to the [spec repo](https://github.com/tendermint/spec).

View File

@@ -1,7 +1,11 @@
---
order: 10
order: 1
parent:
title: Block Sync
order: 6
---
# Block Sync
*Formerly known as Fast Sync*
@@ -61,3 +65,7 @@ another event for exposing the fast-sync `complete` status and the state `height
The user can query the events by subscribing `EventQueryBlockSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
## Implementation
To read more on the implamentation please see the [reactor doc](./reactor.md) and the [implementation doc](./implementation.md)

Binary file not shown.

After

Width:  |  Height:  |  Size: 265 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

View File

@@ -0,0 +1,47 @@
---
order: 3
---
# Implementation
## Blocksync Reactor
- coordinates the pool for syncing
- coordinates the store for persistence
- coordinates the playing of blocks towards the app using a sm.BlockExecutor
- handles switching between fastsync and consensus
- it is a p2p.BaseReactor
- starts the pool.Start() and its poolRoutine()
- registers all the concrete types and interfaces for serialisation
### poolRoutine
- listens to these channels:
- pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends
a &bcBlockRequestMessage for a specific height
- pool signals timeout of a specific peer by posting to timeoutsCh
- switchToConsensusTicker to periodically try and switch to consensus
- trySyncTicker to periodically check if we have fallen behind and then catch-up sync
- if there aren't any new blocks available on the pool it skips syncing
- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores
them on disk
- implements Receive which is called by the switch/peer
- calls AddBlock on the pool when it receives a new block from a peer
## Block Pool
- responsible for downloading blocks from peers
- makeRequestersRoutine()
- removes timeout peers
- starts new requesters by calling makeNextRequester()
- requestRoutine():
- picks a peer and sends the request, then blocks until:
- pool is stopped by listening to pool.Quit
- requester is stopped by listening to Quit
- request is redone
- we receive a block
- gotBlockCh is strange
## Go Routines in Blocksync Reactor
![Go Routines Diagram](img/bc-reactor-routines.png)

View File

@@ -0,0 +1,278 @@
---
order: 2
---
# Reactor
The Blocksync Reactor's high level responsibility is to enable peers who are
far behind the current state of the consensus to quickly catch up by downloading
many blocks in parallel, verifying their commits, and executing them against the
ABCI application.
Tendermint full nodes run the Blocksync Reactor as a service to provide blocks
to new nodes. New nodes run the Blocksync Reactor in "fast_sync" mode,
where they actively make requests for more blocks until they sync up.
Once caught up, "fast_sync" mode is disabled and the node switches to
using (and turns on) the Consensus Reactor.
## Architecture and algorithm
The Blocksync reactor is organised as a set of concurrent tasks:
- Receive routine of Blocksync Reactor
- Task for creating Requesters
- Set of Requesters tasks and - Controller task.
![Blocksync Reactor Architecture Diagram](img/bc-reactor.png)
### Data structures
These are the core data structures necessarily to provide the Blocksync Reactor logic.
Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`.
```go
type Requester {
mtx Mutex
block Block
height int64
peerID p2p.ID
redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat
}
```
Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc.
```go
type Pool {
mtx Mutex
requesters map[int64]*Requester
height int64
peers map[p2p.ID]*Peer
maxPeerHeight int64
numPending int32
store BlockStore
requestsChannel chan<- BlockRequest
errorsChannel chan<- peerError
}
```
Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc.
```go
type Peer struct {
id p2p.ID
height int64
numPending int32
timeout *time.Timer
didTimeout bool
}
```
BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`).
```go
type BlockRequest {
Height int64
PeerID p2p.ID
}
```
### Receive routine of Blocksync Reactor
It is executed upon message reception on the BlocksyncChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blocksync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full.
```go
handleMsg(pool, m):
upon receiving bcBlockRequestMessage m from peer p:
block = load block for height m.Height from pool.store
if block != nil then
try to send BlockResponseMessage(block) to p
else
try to send bcNoBlockResponseMessage(m.Height) to p
upon receiving bcBlockResponseMessage m from peer p:
pool.mtx.Lock()
requester = pool.requesters[m.Height]
if requester == nil then
error("peer sent us a block we didn't expect")
continue
if requester.block == nil and requester.peerID == p then
requester.block = m
pool.numPending -= 1 // atomic decrement
peer = pool.peers[p]
if peer != nil then
peer.numPending--
if peer.numPending == 0 then
peer.timeout.Stop()
// NOTE: we don't send Quit signal to the corresponding requester task!
else
trigger peer timeout to expire after peerTimeout
pool.mtx.Unlock()
upon receiving bcStatusRequestMessage m from peer p:
try to send bcStatusResponseMessage(pool.store.Height)
upon receiving bcStatusResponseMessage m from peer p:
pool.mtx.Lock()
peer = pool.peers[p]
if peer != nil then
peer.height = m.height
else
peer = create new Peer data structure with id = p and height = m.Height
pool.peers[p] = peer
if m.Height > pool.maxPeerHeight then
pool.maxPeerHeight = m.Height
pool.mtx.Unlock()
onTimeout(p):
send error message to pool error channel
peer = pool.peers[p]
peer.didTimeout = true
```
### Requester tasks
Requester task is responsible for fetching a single block at position `height`.
```go
fetchBlock(height, pool):
while true do {
peerID = nil
block = nil
peer = pickAvailablePeer(height)
peerID = peer.id
enqueue BlockRequest(height, peerID) to pool.requestsChannel
redo = false
while !redo do
select {
upon receiving Quit message do
return
upon receiving redo message with id on redoChannel do
if peerID == id {
mtx.Lock()
pool.numPending++
redo = true
mtx.UnLock()
}
}
}
pickAvailablePeer(height):
selectedPeer = nil
while selectedPeer = nil do
pool.mtx.Lock()
for each peer in pool.peers do
if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then
peer.numPending++
selectedPeer = peer
break
pool.mtx.Unlock()
if selectedPeer = nil then
sleep requestIntervalMS
return selectedPeer
```
sleep for requestIntervalMS
### Task for creating Requesters
This task is responsible for continuously creating and starting Requester tasks.
```go
createRequesters(pool):
while true do
if !pool.isRunning then break
if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then
pool.mtx.Lock()
nextHeight = pool.height + size(pool.requesters)
requester = create new requester for height nextHeight
pool.requesters[nextHeight] = requester
pool.numPending += 1 // atomic increment
start requester task
pool.mtx.Unlock()
else
sleep requestIntervalMS
pool.mtx.Lock()
for each peer in pool.peers do
if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then
send error on pool error channel
peer.didTimeout = true
if peer.didTimeout then
for each requester in pool.requesters do
if requester.getPeerID() == peer then
enqueue msg on requestor's redoChannel
delete(pool.peers, peerID)
pool.mtx.Unlock()
```
### Main blocksync reactor controller task
```go
main(pool):
create trySyncTicker with interval trySyncIntervalMS
create statusUpdateTicker with interval statusUpdateIntervalSeconds
create switchToConsensusTicker with interval switchToConsensusIntervalSeconds
while true do
select {
upon receiving BlockRequest(Height, Peer) on pool.requestsChannel:
try to send bcBlockRequestMessage(Height) to Peer
upon receiving error(peer) on errorsChannel:
stop peer for error
upon receiving message on statusUpdateTickerChannel:
broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine
upon receiving message on switchToConsensusTickerChannel:
pool.mtx.Lock()
receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds
ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight
haveSomePeers = size of pool.peers > 0
pool.mtx.Unlock()
if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then
switch to consensus mode
upon receiving message on trySyncTickerChannel:
for i = 0; i < 10; i++ do
pool.mtx.Lock()
firstBlock = pool.requesters[pool.height].block
secondBlock = pool.requesters[pool.height].block
if firstBlock == nil or secondBlock == nil then continue
pool.mtx.Unlock()
verify firstBlock using LastCommit from secondBlock
if verification failed
pool.mtx.Lock()
peerID = pool.requesters[pool.height].peerID
redoRequestsForPeer(peerId)
delete(pool.peers, peerID)
stop peer peerID for error
pool.mtx.Unlock()
else
delete(pool.requesters, pool.height)
save firstBlock to store
pool.height++
execute firstBlock
}
redoRequestsForPeer(pool, peerId):
for each requester in pool.requesters do
if requester.getPeerID() == peerID
enqueue msg on redoChannel for requester
```
## Channels
Defines `maxMsgSize` for the maximum size of incoming messages,
`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and
receiving buffers respectively. These are supposed to prevent amplification
attacks by setting up the upper limit on how much data we can receive & send to
a peer.
Sending incorrectly encoded data will result in stopping the peer.

View File

@@ -0,0 +1,42 @@
---
order: 1
parent:
title: Consensus
order: 6
---
# Consensus
Tendermint Consensus is a distributed protocol executed by validator processes to agree on
the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where
each round is a try to reach agreement on the next block. A round starts by having a dedicated
process (called proposer) suggesting to other processes what should be the next block with
the `ProposalMessage`.
The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote
messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the
next block should be; a validator might vote with a `VoteMessage` for a different block. If in some
round, enough number of processes vote for the same block, then this block is committed and later
added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the
validator. The internals of the protocol and how it ensures safety and liveness properties are
explained in a forthcoming document.
For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the
block as the block size is big, i.e., they don't embed the block inside `Proposal` and
`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in
[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section)
that uniquely identifies each block. The block itself is
disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a
proposer first splitting a block into a number of block parts, that are then gossiped between
processes using `BlockPartMessage`.
Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected
only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers
all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can
reach agreement on some block, and also obtain the content of the chosen block (block parts). As
part of the gossiping protocol, processes also send auxiliary messages that inform peers about the
executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and
also messages that inform peers what votes the process has seen (`HasVoteMessage`,
`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping
protocol to determine what messages a process should send to its peers.
We now describe the content of each message exchanged during Tendermint consensus protocol.

View File

@@ -0,0 +1,370 @@
---
order: 2
---
# Reactor
Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that
manages the state of the Tendermint consensus internal state machine.
When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service.
Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state
(that is used extensively in gossip routines) and starts the following three routines for the peer p:
Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible
for decoding messages received from a peer and for adequate processing of the message depending on its type and content.
The processing normally consists of updating the known peer state and for some messages
(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module
for further processing. In the following text we specify the core functionality of those separate unit of executions
that are part of the Consensus Reactor.
## ConsensusState service
Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals,
and upon reaching agreement, commits blocks to the chain and executes them against the application.
The internal state machine receives input from peers, the internal validator and from a timer.
Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine.
Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed
by the Receive Routine.
### Receive Routine of the ConsensusState service
Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions.
It is the only routine that updates RoundState that contains internal consensus state.
Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
It receives messages from peers, internal validators and from Timeout Ticker
and invokes the corresponding handlers, potentially updating the RoundState.
The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are
discussed in separate document. For understanding of this document
it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is
then extensively used by the gossip routines to determine what information should be sent to peer processes.
## Round State
RoundState defines the internal consensus state. It contains height, round, round step, a current validator set,
a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of
received votes and last commit and last validators set.
```go
type RoundState struct {
Height int64
Round int
Step RoundStepType
Validators ValidatorSet
Proposal Proposal
ProposalBlock Block
ProposalBlockParts PartSet
LockedRound int
LockedBlock Block
LockedBlockParts PartSet
Votes HeightVoteSet
LastCommit VoteSet
LastValidators ValidatorSet
}
```
Internally, consensus will run as a state machine with the following states:
- RoundStepNewHeight
- RoundStepNewRound
- RoundStepPropose
- RoundStepProposeWait
- RoundStepPrevote
- RoundStepPrevoteWait
- RoundStepPrecommit
- RoundStepPrecommitWait
- RoundStepCommit
## Peer Round State
Peer round state contains the known state of a peer. It is being updated by the Receive routine of
Consensus Reactor and by the gossip routines upon sending a message to the peer.
```golang
type PeerRoundState struct {
Height int64 // Height peer is at
Round int // Round peer is at, -1 if unknown.
Step RoundStepType // Step peer is at
Proposal bool // True if peer has proposal for this round
ProposalBlockPartsHeader PartSetHeader
ProposalBlockParts BitArray
ProposalPOLRound int // Proposal's POL round. -1 if none.
ProposalPOL BitArray // nil until ProposalPOLMessage received.
Prevotes BitArray // All votes peer has for this round
Precommits BitArray // All precommits peer has for this round
LastCommitRound int // Round of commit for last height. -1 if none.
LastCommit BitArray // All commit precommits of commit for last height.
CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none.
CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound
}
```
## Receive method of Consensus reactor
The entry point of the Consensus reactor is a receive method. When a message is
received from a peer p, normally the peer round state is updated
correspondingly, and some messages are passed for further processing, for
example to ConsensusState service. We now specify the processing of messages in
the receive method of Consensus reactor for each message type. In the following
message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`,
respectively.
### NewRoundStepMessage handler
```go
handleMessage(msg):
if msg is from smaller height/round/step then return
// Just remember these values.
prsHeight = prs.Height
prsRound = prs.Round
prsCatchupCommitRound = prs.CatchupCommitRound
prsCatchupCommit = prs.CatchupCommit
Update prs with values from msg
if prs.Height or prs.Round has been updated then
reset Proposal related fields of the peer state
if prs.Round has been updated and msg.Round == prsCatchupCommitRound then
prs.Precommits = psCatchupCommit
if prs.Height has been updated then
if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then
prs.LastCommitRound = msg.LastCommitRound
prs.LastCommit = prs.Precommits
} else {
prs.LastCommitRound = msg.LastCommitRound
prs.LastCommit = nil
}
Reset prs.CatchupCommitRound and prs.CatchupCommit
```
### NewValidBlockMessage handler
```go
handleMessage(msg):
if prs.Height != msg.Height then return
if prs.Round != msg.Round && !msg.IsCommit then return
prs.ProposalBlockPartsHeader = msg.BlockPartsHeader
prs.ProposalBlockParts = msg.BlockParts
```
The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to
protect the node against DOS attacks.
### HasVoteMessage handler
```go
handleMessage(msg):
if prs.Height == msg.Height then
prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
```
### VoteSetMaj23Message handler
```go
handleMessage(msg):
if prs.Height == msg.Height then
Record in rs that a peer claim to have majority for msg.BlockID
Send VoteSetBitsMessage showing votes node has for that BlockId
```
### ProposalMessage handler
```go
handleMessage(msg):
if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return
prs.Proposal = true
if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler
prs.ProposalBlockPartsHeader = msg.BlockPartsHeader
prs.ProposalPOLRound = msg.POLRound
prs.ProposalPOL = nil
Send msg through internal peerMsgQueue to ConsensusState service
```
### ProposalPOLMessage handler
```go
handleMessage(msg):
if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return
prs.ProposalPOL = msg.ProposalPOL
```
The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the
node against DOS attacks.
### BlockPartMessage handler
```go
handleMessage(msg):
if prs.Height != msg.Height || prs.Round != msg.Round then return
Record in prs that peer has block part msg.Part.Index
Send msg trough internal peerMsgQueue to ConsensusState service
```
### VoteMessage handler
```go
handleMessage(msg):
Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round
Send msg trough internal peerMsgQueue to ConsensusState service
```
### VoteSetBitsMessage handler
```go
handleMessage(msg):
Update prs for the bit-array of votes peer claims to have for the msg.BlockID
```
The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the
node against DOS attacks.
## Gossip Data Routine
It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and
`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`)
and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below:
```go
1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then
Part = pick a random proposal block part the peer does not have
Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel
if send returns true, record that the peer knows the corresponding block Part
Continue
1b) if (0 < prs.Height) and (prs.Height < rs.Height) then
help peer catch up using gossipDataForCatchup function
Continue
1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then
Sleep PeerGossipSleepDuration
Continue
// at this point rs.Height == prs.Height and rs.Round == prs.Round
1d) if (rs.Proposal != nil and !prs.Proposal) then
Send ProposalMessage(rs.Proposal) to the peer
if send returns true, record that the peer knows Proposal
if 0 <= rs.Proposal.POLRound then
polRound = rs.Proposal.POLRound
prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray()
Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray)
Continue
2) Sleep PeerGossipSleepDuration
```
### Gossip Data For Catchup
This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height).
The function executes the following logic:
```go
if peer does not have all block parts for prs.ProposalBlockPart then
blockMeta = Load Block Metadata for height prs.Height from blockStore
if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then
Sleep PeerGossipSleepDuration
return
Part = pick a random proposal block part the peer does not have
Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel
if send returns true, record that the peer knows the corresponding block Part
return
else Sleep PeerGossipSleepDuration
```
## Gossip Votes Routine
It is used to send the following message: `VoteMessage` on the VoteChannel.
The gossip votes routine is based on the local RoundState (`rs`)
and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below:
```go
1a) if rs.Height == prs.Height then
if prs.Step == RoundStepNewHeight then
vote = random vote from rs.LastCommit the peer does not have
Send VoteMessage(vote) to the peer
if send returns true, continue
if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then
Prevotes = rs.Votes.Prevotes(prs.Round)
vote = random vote from Prevotes the peer does not have
Send VoteMessage(vote) to the peer
if send returns true, continue
if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then
Precommits = rs.Votes.Precommits(prs.Round)
vote = random vote from Precommits the peer does not have
Send VoteMessage(vote) to the peer
if send returns true, continue
if prs.ProposalPOLRound != -1 then
PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound)
vote = random vote from PolPrevotes the peer does not have
Send VoteMessage(vote) to the peer
if send returns true, continue
1b) if prs.Height != 0 and rs.Height == prs.Height+1 then
vote = random vote from rs.LastCommit peer does not have
Send VoteMessage(vote) to the peer
if send returns true, continue
1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then
Commit = get commit from BlockStore for prs.Height
vote = random vote from Commit the peer does not have
Send VoteMessage(vote) to the peer
if send returns true, continue
2) Sleep PeerGossipSleepDuration
```
## QueryMaj23Routine
It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given
BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState
(`prs`). The routine repeats forever the logic shown below.
```go
1a) if rs.Height == prs.Height then
Prevotes = rs.Votes.Prevotes(prs.Round)
if there is a majority for some blockId in Prevotes then
m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId)
Send m to peer
Sleep PeerQueryMaj23SleepDuration
1b) if rs.Height == prs.Height then
Precommits = rs.Votes.Precommits(prs.Round)
if there is a majority for some blockId in Precommits then
m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId)
Send m to peer
Sleep PeerQueryMaj23SleepDuration
1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then
Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound)
if there is a majority for some blockId in Prevotes then
m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId)
Send m to peer
Sleep PeerQueryMaj23SleepDuration
1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and
prs.Height <= blockStore.Height() then
Commit = LoadCommit(prs.Height)
m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID)
Send m to peer
Sleep PeerQueryMaj23SleepDuration
2) Sleep PeerQueryMaj23SleepDuration
```
## Broadcast routine
The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those
events.
It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that
broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel.
Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel.
## Channels
Defines 4 channels: state, data, vote and vote_set_bits. Each channel
has `SendQueueCapacity` and `RecvBufferCapacity` and
`RecvMessageCapacity` set to `maxMsgSize`.
Sending incorrectly encoded data will result in stopping the peer.

View File

@@ -0,0 +1,13 @@
---
order: 1
parent:
title: Evidence
order: 3
---
Evidence is used to identify validators who have or are acting malicious. There are multiple types of evidence, to read more on the evidence types please see [Evidence Types](https://docs.tendermint.com/master/spec/core/data_structures.html#evidence).
The evidence reactor works similar to the mempool reactor. When evidence is observed, it is sent to all the peers in a repetitive manner. This ensures evidence is sent to as many people as possible to avoid sensoring. After evidence is received by peers and committed in a block it is pruned from the evidence module.
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
in stopping the peer.

View File

@@ -1,48 +0,0 @@
---
order: 12
---
# Mempool
## Transaction ordering
Currently, there's no ordering of transactions other than the order they've
arrived (via RPC or from other nodes).
So the only way to specify the order is to send them to a single node.
valA:
- `tx1`
- `tx2`
- `tx3`
If the transactions are split up across different nodes, there's no way to
ensure they are processed in the expected order.
valA:
- `tx1`
- `tx2`
valB:
- `tx3`
If valB is the proposer, the order might be:
- `tx3`
- `tx1`
- `tx2`
If valA is the proposer, the order might be:
- `tx1`
- `tx2`
- `tx3`
That said, if the transactions contain some internal value, like an
order/nonce/sequence number, the application can reject transactions that are
out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then
accept `tx1`. The sender can then retry sending `tx3`, which should probably be
rejected until the node has seen `tx2`.

View File

@@ -0,0 +1,71 @@
---
order: 1
parent:
title: Mempool
order: 2
---
The mempool is a in memory pool of potentially valid transactions,
both to broadcast to other nodes, as well as to provide to the
consensus reactor when it is selected as the block proposer.
There are two sides to the mempool state:
- External: get, check, and broadcast new transactions
- Internal: return valid transaction, update list after block commit
## External functionality
External functionality is exposed via network interfaces
to potentially untrusted actors.
- CheckTx - triggered via RPC or P2P
- Broadcast - gossip messages after a successful check
## Internal functionality
Internal functionality is exposed via method calls to other
code compiled into the tendermint binary.
- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the
size of the txs is less than MaxBytes, and gas is less than MaxGas
- Update - remove tx that were included in last block
- ABCI.CheckTx - call ABCI app to validate the tx
What does it provide the consensus reactor?
What guarantees does it need from the ABCI app?
(talk about interleaving processes in concurrency)
## Optimizations
The implementation within this library also implements a tx cache.
This is so that signatures don't have to be reverified if the tx has
already been seen before.
However, we only store valid txs in the cache, not invalid ones.
This is because invalid txs could become good later.
Txs that are included in a block aren't removed from the cache,
as they still may be getting received over the p2p network.
These txs are stored in the cache by their hash, to mitigate memory concerns.
Applications should implement replay protection, read [Replay
Protection](https://github.com/tendermint/tendermint/blob/8cdaa7f515a9d366bbc9f0aff2a263a1a6392ead/docs/app-dev/app-development.md#replay-protection) for more information.
## Configuration
The mempool has various configurable paramet
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
in stopping the peer.
`maxMsgSize` equals `MaxBatchBytes` (10MB) + 4 (proto overhead).
`MaxBatchBytes` is a mempool config parameter -> defined locally. The reactor
sends transactions to the connected peers in batches. The maximum size of one
batch is `MaxBatchBytes`.
The mempool will not send a tx back to any peer which it received it from.
The reactor assigns an `uint16` number for each peer and maintains a map from
p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders
(`[]uint16`). The list is updated every time mempool receives a transaction it
is already seen. `uint16` assumes that a node will never have over 65535 active
peers (0 is reserved for unknown source - e.g. RPC).

View File

@@ -0,0 +1,105 @@
---
order: 2
---
# Configuration
Here we describe configuration options around mempool.
For the purposes of this document, they are described
in a toml file, but some of them can also be passed in as
environmental variables.
Config:
```toml
[mempool]
recheck = true
broadcast = true
wal-dir = ""
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max-txs-bytes=5MB, mempool will only accept 5 transactions).
max-txs-bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache-size = 10000
# Do not remove invalid transactions from the cache (default: false)
# Set to true if it's not possible for any invalid transaction to become valid
# again in the future.
keep-invalid-txs-in-cache = false
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
max-tx-bytes = 1048576
# Maximum size of a batch of transactions to send to a peer
# Including space needed by encoding (one varint per transaction).
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = 0
```
<!-- Flag: `--mempool.recheck=false`
Environment: `TM_MEMPOOL_RECHECK=false` -->
## Recheck
Recheck determines if the mempool rechecks all pending
transactions after a block was committed. Once a block
is committed, the mempool removes all valid transactions
that were successfully included in the block.
If `recheck` is true, then it will rerun CheckTx on
all remaining transactions with the new block state.
## Broadcast
Determines whether this node gossips any valid transactions
that arrive in mempool. Default is to gossip anything that
passes checktx. If this is disabled, transactions are not
gossiped, but instead stored locally and added to the next
block this node is the proposer.
## WalDir
This defines the directory where mempool writes the write-ahead
logs. These files can be used to reload unbroadcasted
transactions if the node crashes.
If the directory passed in is an absolute path, the wal file is
created there. If the directory is a relative path, the path is
appended to home directory of the tendermint process to
generate an absolute path to the wal directory
(default `$HOME/.tendermint` or set via `TM_HOME` or `--home`)
## Size
Size defines the total amount of transactions stored in the mempool. Default is `5_000` but can be adjusted to any number you would like. The higher the size the more strain on the node.
## Max Transactions Bytes
Max transactions bytes defines the total size of all the transactions in the mempool. Default is 1 GB.
## Cache size
Cache size determines the size of the cache holding transactions we have already seen. The cache exists to avoid running `checktx` each time we receive a transaction.
## Keep Invalid Transactions In Cache
Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block.
## Max Transaction Bytes
Max transaction bytes defines the max size a transaction can be for your node. If you would like your node to only keep track of smaller transactions this field would need to be changed. Default is 1MB.
## Max Batch Bytes
Max batch bytes defines the amount of bytes the node will send to a peer. Default is 0.
> Note: Unused due to https://github.com/tendermint/tendermint/issues/5796

View File

@@ -0,0 +1,177 @@
---
order: 1
parent:
title: Peer Exchange
order: 5
---
# Peer Strategy and Exchange
Here we outline the design of the PeerStore
and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected
to good peers and to gossip peers to others.
## Peer Types
Certain peers are special in that they are specified by the user as `persistent`,
which means we auto-redial them if the connection fails, or if we fail to dial
them.
Some peers can be marked as `private`, which means
we will not put them in the peer store or gossip them to others.
All peers except private peers and peers coming from them are tracked using the
peer store.
The rest of our peers are only distinguished by being either
inbound (they dialed our public address) or outbound (we dialed them).
## Discovery
Peer discovery begins with a list of seeds.
When we don't have enough peers, we
1. ask existing peers
2. dial seeds if we're not dialing anyone currently
On startup, we will also immediately dial the given list of `persistent_peers`,
and will attempt to maintain persistent connections with them. If the
connections die, or we fail to dial, we will redial every 5s for a few minutes,
then switch to an exponential backoff schedule, and after about a day of
trying, stop dialing the peer. This behavior is when `persistent_peers_max_dial_period` is configured to zero.
But If `persistent_peers_max_dial_period` is set greater than zero, terms between each dial to each persistent peer
will not exceed `persistent_peers_max_dial_period` during exponential backoff.
Therefore, `dial_period` = min(`persistent_peers_max_dial_period`, `exponential_backoff_dial_period`)
and we keep trying again regardless of `maxAttemptsToDial`
As long as we have less than `MaxNumOutboundPeers`, we periodically request
additional peers from each of our own and try seeds.
## Listening
Peers listen on a configurable ListenAddr that they self-report in their
NodeInfo during handshakes with other peers. Peers accept up to
`MaxNumInboundPeers` incoming peers.
## Address Book
Peers are tracked via their ID (their PubKey.Address()).
Peers are added to the peer store from the PEX when they first connect to us or
when we hear about them from other peers.
The peer store is arranged in sets of buckets, and distinguishes between
vetted (old) and unvetted (new) peers. It keeps different sets of buckets for
vetted and unvetted peers. Buckets provide randomization over peer selection.
Peers are put in buckets according to their IP groups.
IP group can be a masked IP (e.g. `1.2.0.0` or `2602:100::`) or `local` for
local addresses or `unroutable` for unroutable addresses. The mask which
corresponds to the `/16` subnet is used for IPv4, `/32` subnet - for IPv6.
Each group has a limited number of buckets to prevent DoS attacks coming from
that group (e.g. an attacker buying a `/16` block of IPs and launching a DoS
attack).
[highwayhash](https://arxiv.org/abs/1612.06257) is used as a hashing function
when calculating a bucket.
When placing a peer into a new bucket:
```md
hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets
```
When placing a peer into an old bucket:
```md
hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets
```
where `key` - random 24 HEX string, `group` - IP group of the peer (e.g. `1.2.0.0`),
`sourcegroup` - IP group of the sender (peer who sent us this address) (e.g. `174.11.0.0`),
`addr` - string representation of the peer's address (e.g. `174.11.10.2:26656`).
A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and
each instance of the peer can have a different IP:PORT.
If we're trying to add a new peer but there's no space in its bucket, we'll
remove the worst peer from that bucket to make room.
## Vetting
When a peer is first added, it is unvetted.
Marking a peer as vetted is outside the scope of the `p2p` package.
For Tendermint, a Peer becomes vetted once it has contributed sufficiently
at the consensus layer; ie. once it has sent us valid and not-yet-known
votes and/or block parts for `NumBlocksForVetted` blocks.
Other users of the p2p package can determine their own conditions for when a peer is marked vetted.
If a peer becomes vetted but there are already too many vetted peers,
a randomly selected one of the vetted peers becomes unvetted.
If a peer becomes unvetted (either a new peer, or one that was previously vetted),
a randomly selected one of the unvetted peers is removed from the peer store.
More fine-grained tracking of peer behaviour can be done using
a trust metric (see below), but it's best to start with something simple.
## Select Peers to Dial
When we need more peers, we pick addresses randomly from the addrbook with some
configurable bias for unvetted peers. The bias should be lower when we have
fewer peers and can increase as we obtain more, ensuring that our first peers
are more trustworthy, but always giving us the chance to discover new good
peers.
We track the last time we dialed a peer and the number of unsuccessful attempts
we've made. If too many attempts are made, we mark the peer as bad.
Connection attempts are made with exponential backoff (plus jitter). Because
the selection process happens every `ensurePeersPeriod`, we might not end up
dialing a peer for much longer than the backoff duration.
If we fail to connect to the peer after 16 tries (with exponential backoff), we
remove from peer store completely. But for persistent peers, we indefinitely try to
dial all persistent peers unless `persistent_peers_max_dial_period` is configured to zero
## Select Peers to Exchange
When were asked for peers, we select them as follows:
- select at most `maxGetSelection` peers
- try to select at least `minGetSelection` peers - if we have less than that, select them all.
- select a random, unbiased `getSelectionPercent` of the peers
Send the selected peers. Note we select peers for sending without bias for vetted/unvetted.
## Preventing Spam
There are various cases where we decide a peer has misbehaved and we disconnect from them.
When this happens, the peer is removed from the peer store and black listed for
some amount of time. We call this "Disconnect and Mark".
Note that the bad behaviour may be detected outside the PEX reactor itself
(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor
so it can remove and mark the peer.
In the PEX, if a peer sends us an unsolicited list of peers,
or if the peer sends a request too soon after another one,
we Disconnect and MarkBad.
## Trust Metric
The quality of peers can be tracked in more fine-grained detail using a
Proportional-Integral-Derivative (PID) controller that incorporates
current, past, and rate-of-change data to inform peer quality.
While a PID trust metric has been implemented, it remains for future work
to use it in the PEX.
See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md)
and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md)
architecture docs for more details.
<!-- todo: diagrams!!! -->

View File

@@ -1,18 +0,0 @@
---
order: 11
---
# State Sync
With block sync a node is downloading all of the data of an application from genesis and verifying it.
With state sync your node will download data related to the head or near the head of the chain and verify the data.
This leads to drastically shorter times for joining a network.
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
## Events
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
The user can query the events by subscribing `EventQueryStateSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -0,0 +1,85 @@
---
order: 1
parent:
title: State Sync
order: 4
---
State sync allows new nodes to rapidly bootstrap and join the network by discovering, fetching,
and restoring state machine snapshots. For more information, see the [state sync ABCI section](https://docs.tendermint.com/master/spec/abci/abci.html#state-sync)).
The state sync reactor has two main responsibilities:
* Serving state machine snapshots taken by the local ABCI application to new nodes joining the
network.
* Discovering existing snapshots and fetching snapshot chunks for an empty local application
being bootstrapped.
The state sync process for bootstrapping a new node is described in detail in the section linked
above. While technically part of the reactor (see `statesync/syncer.go` and related components),
this document will only cover the P2P reactor component.
For details on the ABCI methods and data types, see the [ABCI documentation](https://docs.tendermint.com/master/spec/abci/).
Information on how to configure state sync is located in the [nodes section](../../nodes/state-sync.md)
## State Sync P2P Protocol
When a new node begin state syncing, it will ask all peers it encounters if it has any
available snapshots:
```go
type snapshotsRequestMessage struct{}
```
The receiver will query the local ABCI application via `ListSnapshots`, and send a message
containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots:
```go
type snapshotsResponseMessage struct {
Height uint64
Format uint32
Chunks uint32
Hash []byte
Metadata []byte
}
```
The node running state sync will offer these snapshots to the local ABCI application via
`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot
is accepted, the state syncer will request snapshot chunks from appropriate peers:
```go
type chunkRequestMessage struct {
Height uint64
Format uint32
Index uint32
}
```
The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`,
and respond with it (limited to 16 MB):
```go
type chunkResponseMessage struct {
Height uint64
Format uint32
Index uint32
Chunk []byte
Missing bool
}
```
Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty
chunk is a valid (although unlikely) response.
The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot
is restored. If a chunk response is not returned within some time, it will be re-requested,
possibly from a different peer.
The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol.
If no state sync is in progress (i.e. during normal operation), any unsolicited response messages
are discarded.

View File

@@ -99,7 +99,7 @@ Were hoping that these Tendermint tools will become de facto the first respon
Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet?
Join our [discord chat](https://discord.gg/vcExX9T), where we discuss the current issues and future improvements.
Join our [discord chat](https://discord.gg/cosmosnetwork), where we discuss the current issues and future improvements.

View File

@@ -1,4 +1,4 @@
# tm-signer-harness
# Remote Signer
Located under the `tools/tm-signer-harness` folder in the [Tendermint
repository](https://github.com/tendermint/tendermint).

File diff suppressed because it is too large Load Diff

View File

@@ -367,7 +367,7 @@ func main() {
flag.Parse()
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
server := abciserver.NewSocketServer(socketAddr, app)
server.SetLogger(logger)
@@ -438,7 +438,7 @@ This should create a `go.mod` file. The current tutorial only works with
the master branch of Tendermint, so let's make sure we're using the latest version:
```sh
go get github.com/tendermint/tendermint@master
go get github.com/tendermint/tendermint@97a3e44e0724f2017079ce24d36433f03124c09e
```
This will populate the `go.mod` with a release number followed by a hash for Tendermint.

View File

@@ -1,4 +1,5 @@
master master
v0.32.x v0.32
v0.33.x v0.33
v0.34.x v0.34
master master
v0.35.x v0.35

27
go.mod
View File

@@ -5,20 +5,23 @@ go 1.16
require (
github.com/BurntSushi/toml v0.4.1
github.com/Workiva/go-datastructures v1.0.53
github.com/adlio/schema v1.1.13
github.com/adlio/schema v1.2.3
github.com/btcsuite/btcd v0.22.0-beta
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/fortytw2/leaktest v1.3.0
github.com/go-kit/kit v0.11.0
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/golangci/golangci-lint v1.42.1
github.com/golangci/golangci-lint v1.43.0
github.com/google/orderedcode v0.0.1
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/lib/pq v1.10.3
github.com/lib/pq v1.10.4
github.com/libp2p/go-buffer-pool v0.0.2
github.com/minio/highwayhash v1.0.2
github.com/mroth/weightedrand v0.4.1
@@ -27,18 +30,18 @@ require (
github.com/prometheus/client_golang v1.11.0
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rs/cors v1.8.0
github.com/rs/zerolog v1.25.0
github.com/rs/zerolog v1.26.1
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1
github.com/spf13/cobra v1.3.0
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tm-db v0.6.4
github.com/vektra/mockery/v2 v2.9.3
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781
github.com/tendermint/tm-db v0.6.6
github.com/vektra/mockery/v2 v2.9.4
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/grpc v1.40.0
google.golang.org/grpc v1.43.0
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
pgregory.net/rapid v0.4.7
)

507
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -6,15 +6,15 @@ import (
"sync"
"time"
bc "github.com/tendermint/tendermint/internal/blocksync"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p"
sm "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/internal/store"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmSync "github.com/tendermint/tendermint/libs/sync"
tmsync "github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
@@ -36,7 +36,7 @@ var (
Priority: 5,
SendQueueCapacity: 1000,
RecvBufferCapacity: 1024,
RecvMessageCapacity: bc.MaxMsgSize,
RecvMessageCapacity: blocksync.MaxMsgSize,
MaxSendBytes: 100,
},
},
@@ -85,7 +85,7 @@ type Reactor struct {
store *store.BlockStore
pool *BlockPool
consReactor consensusReactor
blockSync *tmSync.AtomicBool
blockSync *tmsync.AtomicBool
blockSyncCh *p2p.Channel
// blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope
@@ -107,7 +107,7 @@ type Reactor struct {
// stopping the p2p Channel(s).
poolWG sync.WaitGroup
metrics *cons.Metrics
metrics *consensus.Metrics
syncStartTime time.Time
}
@@ -122,7 +122,7 @@ func NewReactor(
blockSyncCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
blockSync bool,
metrics *cons.Metrics,
metrics *consensus.Metrics,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
@@ -142,7 +142,7 @@ func NewReactor(
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
blockSync: tmSync.NewBool(blockSync),
blockSync: tmsync.NewBool(blockSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockSyncCh: blockSyncCh,
@@ -169,6 +169,8 @@ func (r *Reactor) OnStart() error {
if err := r.pool.Start(); err != nil {
return err
}
r.poolWG.Add(1)
go r.requestRoutine()
r.poolWG.Add(1)
go r.poolRoutine(false)
@@ -384,6 +386,9 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error {
r.syncStartTime = time.Now()
r.poolWG.Add(1)
go r.requestRoutine()
r.poolWG.Add(1)
go r.poolRoutine(true)
@@ -394,7 +399,6 @@ func (r *Reactor) requestRoutine() {
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
defer statusUpdateTicker.Stop()
r.poolWG.Add(1)
defer r.poolWG.Done()
for {
@@ -455,8 +459,6 @@ func (r *Reactor) poolRoutine(stateSynced bool) {
defer trySyncTicker.Stop()
defer switchToConsensusTicker.Stop()
go r.requestRoutine()
defer r.poolWG.Done()
FOR_LOOP:
@@ -605,6 +607,8 @@ FOR_LOOP:
case <-r.closeCh:
break FOR_LOOP
case <-r.pool.Quit():
break FOR_LOOP
}
}
}

View File

@@ -6,22 +6,23 @@ import (
"time"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state"
sf "github.com/tendermint/tendermint/internal/state/test/factory"
"github.com/tendermint/tendermint/internal/store"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
type reactorTestSuite struct {
@@ -97,7 +98,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
t.Helper()
rts.nodes = append(rts.nodes, nodeID)
rts.app[nodeID] = proxy.NewAppConns(proxy.NewLocalClientCreator(&abci.BaseApplication{}))
rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics())
require.NoError(t, rts.app[nodeID].Start())
blockDB := dbm.NewMemDB()
@@ -164,7 +165,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
rts.blockSyncChannels[nodeID],
rts.peerUpdates[nodeID],
rts.blockSync,
cons.NopMetrics())
consensus.NopMetrics())
require.NoError(t, err)
require.NoError(t, rts.reactors[nodeID].Start())
@@ -181,10 +182,11 @@ func (rts *reactorTestSuite) start(t *testing.T) {
}
func TestReactor_AbruptDisconnect(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
maxBlockHeight := int64(64)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -216,10 +218,11 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
}
func TestReactor_SyncTime(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
maxBlockHeight := int64(101)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -239,10 +242,12 @@ func TestReactor_SyncTime(t *testing.T) {
}
func TestReactor_NoBlockResponse(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
require.NoError(t, err)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
defer os.RemoveAll(cfg.RootDir)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
maxBlockHeight := int64(65)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -286,11 +291,12 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
// See: https://github.com/tendermint/tendermint/issues/6005
t.SkipNow()
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
require.NoError(t, err)
defer os.RemoveAll(cfg.RootDir)
maxBlockHeight := int64(48)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
@@ -324,7 +330,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
//
// XXX: This causes a potential race condition.
// See: https://github.com/tendermint/tendermint/issues/6005
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(config, 1, false, 30)
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30)
newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{
MaxPeers: uint16(len(rts.nodes) + 1),
MaxConnected: uint16(len(rts.nodes) + 1),

View File

@@ -5,8 +5,8 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/state"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)

View File

@@ -3,7 +3,7 @@ package v2
import (
"fmt"
tmState "github.com/tendermint/tendermint/state"
tmstate "github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/types"
)
@@ -36,7 +36,7 @@ func (e pcBlockProcessed) String() string {
type pcFinished struct {
priorityNormal
blocksSynced int
tmState tmState.State
tmState tmstate.State
}
func (p pcFinished) Error() string {
@@ -148,11 +148,11 @@ func (state *pcState) handle(event Event) (Event, error) {
return noOp, nil
case rProcessBlock:
tmState := state.context.tmState()
tmstate := state.context.tmState()
firstItem, secondItem, err := state.nextTwo()
if err != nil {
if state.draining {
return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil
return pcFinished{tmState: tmstate, blocksSynced: state.blocksSynced}, nil
}
return noOp, nil
}
@@ -164,7 +164,7 @@ func (state *pcState) handle(event Event) (Event, error) {
)
// verify if +second+ last commit "confirms" +first+ block
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
err = state.context.verifyCommit(tmstate.ChainID, firstID, first.Height, second.LastCommit)
if err != nil {
state.purgePeer(firstItem.peerID)
if firstItem.peerID != secondItem.peerID {

View File

@@ -3,8 +3,8 @@ package v2
import (
"fmt"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/state"
"github.com/tendermint/tendermint/types"
)
@@ -21,10 +21,10 @@ type pContext struct {
store blockStore
applier blockApplier
state state.State
metrics *cons.Metrics
metrics *consensus.Metrics
}
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext {
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *consensus.Metrics) *pContext {
return &pContext{
store: st,
applier: ex,

Some files were not shown because too many files have changed in this diff Show More