Compare commits

...

148 Commits

Author SHA1 Message Date
William Banfield
70624e8d27 more lock instrumentation 2022-08-03 18:20:12 -04:00
William Banfield
6b16cf6d68 Revert "update stats queue to be smaller"
This reverts commit d176124aa0.
2022-08-03 18:15:46 -04:00
William Banfield
d392a07b99 no peer status 2022-08-03 18:02:11 -04:00
William Banfield
ffd982af4f Revert "close"
This reverts commit 71607e6fcd.
2022-08-03 17:38:16 -04:00
William Banfield
d176124aa0 update stats queue to be smaller 2022-08-03 17:27:04 -04:00
William Banfield
71607e6fcd close 2022-08-03 17:19:09 -04:00
William Banfield
705316442a More metrics 2022-08-03 16:58:12 -04:00
William Banfield
83dea898fb add metrics 2022-08-03 16:45:44 -04:00
William Banfield
c764cebbe7 add unlock 2022-08-03 16:20:06 -04:00
William Banfield
f859f5ef6e add intermediate lock log 2022-08-03 13:51:39 -04:00
William Banfield
92a8e74fdf add more logs 2022-08-03 11:26:08 -04:00
William Banfield
5d2593c6ee add lock logs 2022-07-29 15:49:08 -04:00
dependabot[bot]
b4eaccd242 build(deps): Bump github.com/creachadair/tomledit from 0.0.22 to 0.0.23 (#9085)
Bumps [github.com/creachadair/tomledit](https://github.com/creachadair/tomledit) from 0.0.22 to 0.0.23.
- [Release notes](https://github.com/creachadair/tomledit/releases)
- [Commits](https://github.com/creachadair/tomledit/compare/v0.0.22...v0.0.23)

---
updated-dependencies:
- dependency-name: github.com/creachadair/tomledit
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-25 07:03:29 -07:00
dependabot[bot]
ed4d0de559 build(deps): Bump github.com/golangci/golangci-lint (#9069) 2022-07-25 10:34:58 +02:00
dependabot[bot]
a4ce134c93 build(deps): Bump github.com/bufbuild/buf from 1.3.1 to 1.6.0 (#9064) 2022-07-22 15:30:09 +02:00
mergify[bot]
0d2bf39c23 indexer: work around indexing problem for duplicate transactions (forward port: #8625) (#8950) 2022-07-21 19:33:08 +02:00
dependabot[bot]
d4495b8626 build(deps): Bump google.golang.org/grpc from 1.47.0 to 1.48.0 (#9060) 2022-07-21 18:53:12 +02:00
dependabot[bot]
ba671c1acf build(deps): Bump github.com/BurntSushi/toml from 1.1.0 to 1.2.0 (#9063)
Bumps [github.com/BurntSushi/toml](https://github.com/BurntSushi/toml) from 1.1.0 to 1.2.0.
- [Release notes](https://github.com/BurntSushi/toml/releases)
- [Commits](https://github.com/BurntSushi/toml/compare/v1.1.0...v1.2.0)

---
updated-dependencies:
- dependency-name: github.com/BurntSushi/toml
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-21 09:19:33 -04:00
dependabot[bot]
65feb7097b build(deps): Bump github.com/golangci/golangci-lint (#9045) 2022-07-20 17:22:47 -07:00
M. J. Fromberger
9d1dd560e6 Prepare changelog for Release v0.35.9. (#9057) 2022-07-20 15:28:54 -07:00
mergify[bot]
f6bbd8302c migration: scope key migration to stores (#9005) (#9027) 2022-07-20 14:24:53 +02:00
Callum Waters
3e96a376b0 spec: merge v0.35 spec into tendermint (#9018) 2022-07-20 12:37:46 +02:00
M. J. Fromberger
183e249709 Prepare changelog for candidate v0.35.9-rc0 (#9040) 2022-07-19 14:02:14 -07:00
M. J. Fromberger
22ed610083 mempool: rework lock discipline to mitigate callback deadlocks (#9030)
The priority mempool has a stricter synchronization requirement than the legacy
mempool. Under sufficiently-heavy load, exclusive access can lead to deadlocks
when processing a large batch of transaction rechecks through an out-of-process
application using the socket client.

By design, a socket client stalls when its send buffer fills, during which time
it holds a lock shared with the receive thread.  While blocked in this state, a
response read by the receive thread waits for the shared lock so the callback
can be invoked.

If we're lucky, the server will then read the next request and make enough room
in the buffer for the sender to proceed. If not however (e.g., if the next
request is bigger than the one just consumed), the receive thread is blocked:
It is waiting on the lock and cannot read a response.  Once the server's output
buffer fills, the system deadlocks.

This can happen with any sufficiently-busy workload, but is more likely during
a large recheck in the v1 mempool, where the callbacks need exclusive access to
mempool state.  As a workaround, process rechecks for the priority mempool in
their own goroutines outside the mempool mutex.  Responses still head-of-line
block, but will no longer get pushback due to contention on the mempool itself.
2022-07-19 13:28:46 -07:00
dependabot[bot]
32761ec729 build(deps): Bump github.com/golangci/golangci-lint (#9037)
Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.46.0 to 1.47.0.
- [Release notes](https://github.com/golangci/golangci-lint/releases)
- [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md)
- [Commits](https://github.com/golangci/golangci-lint/compare/v1.46.0...v1.47.0)

---
updated-dependencies:
- dependency-name: github.com/golangci/golangci-lint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-19 08:17:34 -07:00
dependabot[bot]
5edc9e3a15 build(deps): Bump pgregory.net/rapid from 0.4.7 to 0.4.8 (#9015)
Bumps [pgregory.net/rapid](https://github.com/flyingmutant/rapid) from 0.4.7 to 0.4.8.
- [Release notes](https://github.com/flyingmutant/rapid/releases)
- [Commits](https://github.com/flyingmutant/rapid/compare/v0.4.7...v0.4.8)

---
updated-dependencies:
- dependency-name: pgregory.net/rapid
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-07-15 09:33:52 -07:00
mergify[bot]
6b18dfcea1 Extract a library from the confix command-line tool. (backport #9012) (#9025)
(cherry picked from commit 18b5a500da)

Pull out the library functionality from scripts/confix and move it to
internal/libs/confix. Replace scripts/confix with a simple stub that has the
same command-line API, but uses the library instead.

Related:

- Move and update unit tests.
- Move scripts/confix/condiff to scripts/condiff.
- Update test data for v34, v35, and v36.
- Update reference diffs.
- Update testdata README.

Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-07-15 08:46:28 -07:00
M. J. Fromberger
9f2522148b config: fix the comments on p2p.queue-type (#9021)
These got disarranged during a previous cleanup.
2022-07-15 07:11:16 -07:00
dependabot[bot]
819e7f4bdd build(deps): Bump google.golang.org/grpc from 1.47.0 to 1.48.0 (#8992)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.47.0 to 1.48.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.47.0...v1.48.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-14 16:42:27 -07:00
M. J. Fromberger
9177206750 Prepare changelog for Release v0.35.8 (#8988) 2022-07-14 14:49:37 -07:00
mergify[bot]
0c6efd8c51 config: update config to reflect simple-priority queue (#9007) (#9008)
Update the queue documentation to reflect the types of queues and current default queue.

(cherry picked from commit c1c501ecd4)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
2022-07-14 17:13:41 -04:00
Sam Kleinman
f8d15fc682 blocksync: drop support for enabled=false (#8912) 2022-07-14 13:19:12 -04:00
William Banfield
7971514b55 p2p: configure max accepted for non-legacy as well (#8999)
* p2p: configure max connected for non-legacy as well

* remove explicit 0
2022-07-14 10:11:34 -04:00
M. J. Fromberger
b94470a6a4 mempool: ensure evicted transactions are removed from the cache (#9000)
In the original implementation transactions evicted for priority were also
removed from the cache. In addition, remove expired transactions from
the cache.

Related:

- Add Has method to cache implementations.
- Update tests to exercise this condition.
2022-07-14 06:51:54 -07:00
mergify[bot]
a1c8f8df0b doc: fix typos in quick-start.md. (#8990) (#8997) 2022-07-14 11:44:08 +02:00
M. J. Fromberger
3790968156 mempool: release lock during app connection flush (#8984)
This case is symmetric to what we did for CheckTx calls, where we release the
mempool mutex to ensure callbacks can fire during call setup.  We also need
this behaviour for application flush, for the same reason: The caller holds the
lock by contract from the Mempool interface.
2022-07-12 10:28:51 -07:00
M. J. Fromberger
9e64c95e56 mempool: reduce lock contention during CheckTx (cleanup) (#8983)
The way this was originally structured, we reacquired the lock after issuing
the initial ABCI CheckTx call, only to immediately release it. Restructure the
code so that this redundant acquire is no longer necessary.
2022-07-12 08:00:29 -07:00
M. J. Fromberger
cb93d3b587 mempool: don't log message type mismatch in the default callback (#8969) 2022-07-11 18:06:49 -07:00
M. J. Fromberger
f98de20f7e p2p: ensure closed channels stop receiving service (#8979)
Once these channels are closed, we should not continue to service them, as they
will never again deliver nonzero values.
2022-07-11 16:34:05 -07:00
M. J. Fromberger
451e697331 Update generated mocks after upgrade of Mockery v2. (#8973) 2022-07-11 09:18:36 -04:00
mergify[bot]
e3292a48e3 p2p: simpler priority queue (backport #8929) (#8956) 2022-07-08 13:29:42 -04:00
M. J. Fromberger
6a354a1e8d Update pending changelog. (#8965) 2022-07-08 09:54:50 -07:00
mergify[bot]
1daf7b939d p2p: make peer gossiping coinflip safer (#8949) (#8963)
Closes #8948

(cherry picked from commit 61ce384d75)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-07-08 12:32:12 -04:00
mergify[bot]
156c305b08 p2p: delete cruft (#8958) (#8959)
I think the decision in #8806 is that we shouldn't do this yet, so I think it's best to just drop this.

(cherry picked from commit 636320f901)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-07-08 09:59:57 -04:00
M. J. Fromberger
bc49f66c35 Add more unit tests for the priority mempool. (#8961)
- Add a test for time-based (TTL) expiration.
- Add tests for eviction based on size and priority.
2022-07-07 14:56:34 -07:00
M. J. Fromberger
9b02094827 Fix unbounded heap growth in the priority mempool. (#8944)
The primary effect of this change is to simplify the implementation of the
priority mempool to eliminate an unbounded heap growth observed by Vega team
when it was enabled in their testnet. It updates and fixes #8775.

The main body of this change is to remove the auxiliary indexing structures,
and use only the concurrent list structure (the same as the legacy mempool) to
maintain both gossip order and priority.

This means that operations that require priority information, such as block
updates and insert-time evictions, require a linear scan over the mempool.
This tradeoff greatly simplifies the code and eliminates the long-term heap
load, at the cost of some extra CPU and short-lived working memory during
CheckTx and Update calls.

Rough benchmark results:

 - This PR:
   BenchmarkTxMempool_CheckTx-10             486373              2271 ns/op
 - Original priority mempool implementation:
   BenchmarkTxMempool_CheckTx-10             500302              2113 ns/op
 - Legacy (v0) mempool:
   BenchmarkCheckTx-10                       364591              3571 ns/op

These benchmarks are not a good proxy for production load, but at least suggest
that the overhead of the implementation changes are not cause for concern.

In addition:

- Rework synchronization so that access to shared data structures is safe.
  Previously shared locks were used to exclude block updates during calls that
  update mempool state. Now access is properly exclusive where necessary.

- Fix a bug in the recheck flow, where priority updates from the application
  were not correctly reflected in the index structures.

- Eliminate the need for separate recheck cursors during block update. This
  avoids the need to explicitly invalidate elements of the concurrent list,
  which averts the dependency cycle that led to objects being pinned.

- Clean up, clarify, and fix inaccuracies in documentation comments throughout
  the package.

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
2022-07-07 07:15:08 -07:00
William Banfield
da83edc588 p2p: return from conn send on stopped mconn (#8904)
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-07-06 10:41:55 -04:00
mergify[bot]
047d7c927b p2p: fix flakey test due to disconnect cooldown (#8917) (#8918)
This test was made flakey by #8839. The cooldown period means that the node in the test will not try to reconnect as quickly as the test expects. This change makes the cooldown shorter in the test so that the node quickly reconnects.

(cherry picked from commit 5274f80de4)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-07-05 19:11:38 -04:00
mergify[bot]
49788adde5 p2p: use correct context error (#8916) (#8920)
handshakeCtx is the internal context carrying the timeout. Its error should be used for the error return.

(cherry picked from commit 921530c352)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2022-07-05 13:36:26 -04:00
dependabot[bot]
e414d0a878 build(deps): Bump github.com/libp2p/go-buffer-pool from 0.0.2 to 0.1.0 (#8931) 2022-07-05 12:19:03 +02:00
dependabot[bot]
6a646f366e build(deps): Bump github.com/vektra/mockery/v2 from 2.13.1 to 2.14.0 (#8925)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.13.1 to 2.14.0.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.13.1...v2.14.0)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-01 12:15:22 -04:00
mergify[bot]
01984cb3b2 p2p: set outgoing connections to around 20% of total connections (#8913) (#8914)
(cherry picked from commit 47cb30fc1d)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
2022-06-30 17:15:32 -04:00
dependabot[bot]
e2d2c04aac build(deps): Bump github.com/stretchr/testify from 1.7.2 to 1.8.0 (#8908)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.2 to 1.8.0.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.7.2...v1.8.0)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-30 08:33:13 -07:00
Sam Kleinman
204281fa66 node: always start blocksync and avoid misconfiguration (#8902) 2022-06-29 22:12:36 -04:00
mergify[bot]
486370ac68 log: do not pre-process log results (backport #8895) (#8896)
(cherry picked from commit 37f9d59969)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-06-29 11:26:28 -04:00
William Banfield
978f754ad3 p2p: set empty timeouts to configed values. (manual backport of #8847) (#8869)
* regenerate mocks using newer style

* p2p: set empty timeouts to small values. (#8847)

These timeouts default to 'do not time out' if they are not set. This times up resources, potentially indefinitely. If node on the other side of the the handshake is up but unresponsive, the[ handshake call](edec79448a/internal/p2p/router.go (L720)) will _never_ return.

* fix light client select statement
2022-06-28 16:07:15 -04:00
mergify[bot]
c4ef566071 p2p: remove dial sleep and provide disconnect cooldown (backport #8839) (#8875)
(cherry picked from commit 52b6dc19ba)
2022-06-27 10:49:51 -04:00
dependabot[bot]
f19e52e6f2 build(deps): Bump styfle/cancel-workflow-action from 0.9.1 to 0.10.0 (#8882)
Bumps [styfle/cancel-workflow-action](https://github.com/styfle/cancel-workflow-action) from 0.9.1 to 0.10.0.
- [Release notes](https://github.com/styfle/cancel-workflow-action/releases)
- [Commits](https://github.com/styfle/cancel-workflow-action/compare/0.9.1...0.10.0)

---
updated-dependencies:
- dependency-name: styfle/cancel-workflow-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-27 09:13:46 -04:00
mergify[bot]
19b98c7005 e2e: disable another network test (#8862) (#8873)
Follow up on: https://github.com/tendermint/tendermint/pull/8849

(cherry picked from commit c4d24eed7d)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2022-06-24 13:22:26 -04:00
mergify[bot]
826f224c2d p2p: add eviction metrics and cleanup dialing error handling (backport #8819) (#8820) 2022-06-24 10:42:58 -04:00
mergify[bot]
2df4c2b19d e2e: add tolerance to peer discovery test (#8849) (#8857)
(cherry picked from commit fb209136f8)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-06-23 14:46:10 -04:00
mergify[bot]
6f4ef72964 p2p: track peers by address (#8841) (#8855)
(cherry picked from commit 436a38f876)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-06-23 13:21:46 -04:00
mergify[bot]
3398f37979 cmd: add tool for compaction of goleveldb (backport #8564) (#8675) 2022-06-23 18:25:19 +02:00
mergify[bot]
8ef63fe3d9 e2e: report peer heights in error message (#8843) (#8853)
(cherry picked from commit 52b2efb827)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-06-23 10:46:51 -04:00
M. J. Fromberger
9daea43375 Update default version marker. (#8844) 2022-06-22 18:16:58 -04:00
M. J. Fromberger
df9363c67c Prepare changelog for Release v0.35.7 (#8772) 2022-06-22 11:54:03 -07:00
mergify[bot]
24701cd587 p2p: more dial routines (#8827) (#8828) 2022-06-21 21:27:28 -04:00
William Banfield
e9c87a3c49 remove dial wake change (#8824) 2022-06-21 20:20:04 -04:00
dependabot[bot]
034a9f8422 build(deps): Bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#8811)
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.4.0 to 1.5.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.4.0...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Thane Thomson <connect@thanethomson.com>
2022-06-21 17:16:31 -04:00
Callum Waters
4322f7d0b9 mempool: make error throwing for CheckTx consistent (#8817) 2022-06-21 18:51:50 +02:00
Sam Kleinman
83526cacbc p2p: peer store and dialing changes (0.35.x backport) (#8740)
* p2p: peer store and dialing changes

(cherry picked from commit 9dbb135152)

* reduce persistent peer max

(cherry picked from commit b213a2766f)

* don't gossip inactive peers

(cherry picked from commit cc28ce298f)

* fix small case

(cherry picked from commit 56a91642dc)

* fix error message

(cherry picked from commit 86db59f53b)

* remove seed flag

(cherry picked from commit 000aa05485)

* reduce logging level

(cherry picked from commit 4e2bc8f51e)

* make const

(cherry picked from commit e3068b50b2)

* update comment

(cherry picked from commit 31bd396c88)

* cleanup

(cherry picked from commit eddb23b5af)

* oops

* overflows

(cherry picked from commit 4c8651026a)

* Update internal/p2p/peermanager.go

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>
(cherry picked from commit f23f6e1089)

* Update internal/p2p/peermanager.go

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>
(cherry picked from commit 1c02758eaf)

* comment

(cherry picked from commit 9f604fd2ef)

* test: new scoring

(cherry picked from commit 930fd7f2be)

* fix scoring test

(cherry picked from commit 9abc55f3a0)

* cleanup peer manager

* fix panic

* add metrics

* fix compile

* fix test

* default metrics to noop

* noop metrics

* update metrics

(cherry picked from commit 720600ef62)

* rename metrics

* actually shuffle peers more

* fix up advertise

(cherry picked from commit 8195c97590)

* add max dialing attempts

* connection tracking

* comments mostly

(cherry picked from commit 053ecd9b8c)

* Apply suggestions from code review

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>

* comments

* fix lint

* cr feedback

* fixup cherrypick

* make wb happy

* more comments

* fixup

* fix lint

* iota fix

* add skip

* cleanup

* remove comment

* fix rand

* fix rand

* use numaddresses correctly

* advertise fixes

* remove some things

* cleanup comment

* more fixes

* toml

* fix comment

* fix spell

* dec limit

* fixes

* up the attmept max

* cr feedback

* probablistic test

* fix spell

* add metrics for peers stored on startup

* p2p: peer score should not wrap around (#8790)

(cherry picked from commit 4d820ff4f5)

# Conflicts:
#	internal/p2p/peermanager.go

* fix

* wake more

* wake if we need to

Co-authored-by: M. J. Fromberger <michael.j.fromberger@gmail.com>
2022-06-20 13:13:21 -04:00
mergify[bot]
25d724b920 e2e: reactivate network test (backport #8635) (#8777) 2022-06-20 17:10:20 +02:00
dependabot[bot]
3945cec115 build(deps): Bump github.com/adlio/schema from 1.3.0 to 1.3.3 (#8797)
Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.3.0 to 1.3.3.
- [Release notes](https://github.com/adlio/schema/releases)
- [Commits](https://github.com/adlio/schema/compare/v1.3.0...v1.3.3)

---
updated-dependencies:
- dependency-name: github.com/adlio/schema
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-20 09:19:34 -04:00
mergify[bot]
74c6d8100d p2p: fix typo (#8793) (#8794) 2022-06-19 11:52:43 -07:00
M. J. Fromberger
e2d01cdcff Make priority mempool fuzz test actually test the priority mempool. (#8785)
Fixes #8783.
2022-06-17 09:29:13 -07:00
dependabot[bot]
bee6597b28 build(deps): Bump github.com/vektra/mockery/v2 from 2.13.0 to 2.13.1 (#8765)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.13.0 to 2.13.1.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.13.0...v2.13.1)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-06-15 10:12:21 -04:00
mergify[bot]
ce8284c027 p2p: accept should not abort on first error (backport #8759) (#8760) 2022-06-15 07:56:15 -04:00
dependabot[bot]
d02f58e191 build(deps): Bump github.com/vektra/mockery/v2 from 2.12.3 to 2.13.0 (#8747)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.12.3 to 2.13.0.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.12.3...v2.13.0)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-14 10:00:05 -07:00
Callum Waters
28c38522e0 do not log an error for duplicate txs (#8732) 2022-06-10 11:56:00 +02:00
Sam Kleinman
0b63e293f1 e2e/generator: add additional testnets (0.35) (#8730) 2022-06-10 03:55:29 -04:00
mergify[bot]
af0590a819 consensus: switch timeout message to be debug and clarify meaning (#8694) (#8696)
(cherry picked from commit 75a12ea0c6)

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2022-06-09 09:45:58 -04:00
mergify[bot]
46c27b45ab rpc: always close http bodies (backport #8712) (#8715)
(cherry picked from commit 931c98f7ad)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-06-08 11:57:55 -07:00
dependabot[bot]
3c29b6996b build(deps): Bump github.com/rs/zerolog from 1.26.1 to 1.27.0 (#8723)
Bumps [github.com/rs/zerolog](https://github.com/rs/zerolog) from 1.26.1 to 1.27.0.
- [Release notes](https://github.com/rs/zerolog/releases)
- [Commits](https://github.com/rs/zerolog/compare/v1.26.1...v1.27.0)

---
updated-dependencies:
- dependency-name: github.com/rs/zerolog
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-08 07:14:17 -07:00
dependabot[bot]
138be1f7b0 build(deps): Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#8710)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.1 to 1.7.2.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.7.1...v1.7.2)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-07 04:59:40 -04:00
mergify[bot]
98411962c6 p2p: pass maxConns for MaxPeers during node setup (#8684) (#8692)
* pass maxConns for MaxPeers
* add upgrade connections to max connections for max peers
* change the formula to calculate max peers

(cherry picked from commit 30929cf190)

Co-authored-by: Evan Forbes <42654277+evan-forbes@users.noreply.github.com>
2022-06-04 08:53:41 -07:00
M. J. Fromberger
3079eb8b30 Prepare Release v0.35.6 (#8685) 2022-06-03 10:42:06 +02:00
mergify[bot]
0e3a3fe58b p2p: shed peers from store from other networks (backport #8678) (#8681) 2022-06-02 12:15:55 -04:00
mergify[bot]
e17e6b1aaa migrate: provide function for database production (backport #8614) (#8672)
(cherry picked from commit d5299882b0)
2022-06-02 06:17:06 -04:00
dependabot[bot]
0421f8b25e build(deps): Bump google.golang.org/grpc from 1.46.2 to 1.47.0 (#8666)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.2 to 1.47.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.46.2...v1.47.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-01 11:59:15 -07:00
Callum Waters
4faa8b72aa cmd: don't used global config for reset commands (#8668) 2022-06-01 18:34:35 +02:00
Callum Waters
336dc2f2c5 chore: update version (#8634) 2022-06-01 15:48:35 +02:00
Callum Waters
e8ac37223f pex: align max address thresholds (#8657) 2022-05-31 14:07:25 -04:00
Sam Kleinman
a889f17e51 consensus: restructure peer catchup sleep (#8651) 2022-05-31 11:31:51 -04:00
mergify[bot]
2b5a4de4b3 docs: add documentation for undocumented p2p metrics (backport #8640) (#8641)
* docs: add documentation for undocumented p2p metrics (#8640)

Once merged will backport to v0.35

(cherry picked from commit 3dec4a4744)

# Conflicts:
#	docs/nodes/metrics.md

* fix merge conflict

Co-authored-by: William Banfield <4561443+williambanfield@users.noreply.github.com>
Co-authored-by: William Banfield <wbanfield@gmail.com>
2022-05-30 05:03:50 -04:00
dependabot[bot]
a85d9c5163 build(deps): Bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#8631)
Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.11.0 to 1.12.0.
- [Release notes](https://github.com/spf13/viper/releases)
- [Commits](https://github.com/spf13/viper/compare/v1.11.0...v1.12.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/viper
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-27 07:51:54 -07:00
M. J. Fromberger
12a0559d67 Prepare changelog for release v0.35.5. (#8601) 2022-05-26 09:08:41 -07:00
mergify[bot]
a22f7bec39 migrate: reorder collection ordering (#8613) (#8616)
(cherry picked from commit f33722b423)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-05-25 13:26:56 -04:00
dependabot[bot]
3784371dd8 build(deps): Bump github.com/vektra/mockery/v2 from 2.12.2 to 2.12.3 (#8608) 2022-05-25 05:12:51 -04:00
mergify[bot]
4ee91663da p2p: reduce ability of SendError to disconnect peers (backport #8597) (#8603) 2022-05-25 04:12:43 -04:00
M. J. Fromberger
87763a3d6a rpc: fix encoding of block_results responses (backport #8593) (#8594)
The block results include validator updates in ABCI protobuf format, which does
not encode "correctly" according to the expected Amino style RPC clients expect.

- Write a regression test for this issue.
- Add JSON marshaling overrides for ABCI ValidatorUpdate messages.

Patches for v0.35.x:

- Replace jsontypes with tmjson (removed in v0.36)
- Regress test data for BeginBlock / EndBlock
2022-05-24 07:21:28 -07:00
dependabot[bot]
ad9e875376 build(deps): Bump goreleaser/goreleaser-action from 2 to 3 (#8589)
Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 2 to 3.
- [Release notes](https://github.com/goreleaser/goreleaser-action/releases)
- [Commits](https://github.com/goreleaser/goreleaser-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: goreleaser/goreleaser-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-23 08:31:18 -04:00
mergify[bot]
2f8483aa85 p2p: remove unused get height methods (backport #8569) (#8571) 2022-05-17 11:32:13 -04:00
dependabot[bot]
0e6b85efa9 build(deps): Bump github.com/lib/pq from 1.10.5 to 1.10.6 (#8568) 2022-05-17 04:57:37 -07:00
dependabot[bot]
13cc1931a7 build(deps): Bump google.golang.org/grpc from 1.46.0 to 1.46.2 (#8560)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.0 to 1.46.2.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.46.0...v1.46.2)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-16 09:10:13 -07:00
dependabot[bot]
f6b13f8c95 build(deps): Bump docker/login-action from 1.10.0 to 2.0.0 (#8536)
* build(deps): Bump docker/login-action from 1.10.0 to 2.0.0

Bumps [docker/login-action](https://github.com/docker/login-action) from 1.10.0 to 2.0.0.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](https://github.com/docker/login-action/compare/v1.10.0...v2.0.0)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:57:07 -07:00
dependabot[bot]
248cb26845 build(deps): Bump actions/stale from 4 to 5 (#8534)
Bumps [actions/stale](https://github.com/actions/stale) from 4 to 5.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/stale/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:55:22 -07:00
dependabot[bot]
79d83cea15 build(deps): Bump docker/setup-buildx-action from 1.6.0 to 2.0.0 (#8533)
* build(deps): Bump docker/setup-buildx-action from 1.6.0 to 2.0.0

Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1.6.0 to 2.0.0.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](https://github.com/docker/setup-buildx-action/compare/v1.6.0...v2.0.0)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:53:42 -07:00
dependabot[bot]
643eaef146 build(deps): Bump docker/build-push-action from 2.7.0 to 3.0.0 (#8530)
* build(deps): Bump docker/build-push-action from 2.7.0 to 3.0.0

Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 2.7.0 to 3.0.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v2.7.0...v3.0.0)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:49:56 -07:00
dependabot[bot]
552e1e78b8 build(deps): Bump codecov/codecov-action from 2.1.0 to 3.1.0 (#8529)
* build(deps): Bump codecov/codecov-action from 2.1.0 to 3.1.0

Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 2.1.0 to 3.1.0.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v2.1.0...v3.1.0)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:45:04 -07:00
dependabot[bot]
fcf0579f0e build(deps): Bump golangci/golangci-lint-action from 3.1.0 to 3.2.0 (#8526)
* build(deps): Bump golangci/golangci-lint-action from 3.1.0 to 3.2.0

Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.1.0 to 3.2.0.
- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
- [Commits](https://github.com/golangci/golangci-lint-action/compare/v3.1.0...v3.2.0)

---
updated-dependencies:
- dependency-name: golangci/golangci-lint-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:42:15 -07:00
dependabot[bot]
3df465c353 build(deps): Bump github.com/prometheus/client_golang (#8542)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.12.1 to 1.12.2.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/v1.12.2/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.12.1...v1.12.2)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-05-13 11:42:44 -07:00
dependabot[bot]
142b273c2f build(deps): Bump gaurav-nelson/github-action-markdown-link-check from 1.0.13 to 1.0.14 (#8523)
* build(deps): Bump gaurav-nelson/github-action-markdown-link-check

Bumps [gaurav-nelson/github-action-markdown-link-check](https://github.com/gaurav-nelson/github-action-markdown-link-check) from 1.0.13 to 1.0.14.
- [Release notes](https://github.com/gaurav-nelson/github-action-markdown-link-check/releases)
- [Commits](https://github.com/gaurav-nelson/github-action-markdown-link-check/compare/1.0.13...1.0.14)

---
updated-dependencies:
- dependency-name: gaurav-nelson/github-action-markdown-link-check
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* Target fork.

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-05-13 08:42:47 -07:00
M. J. Fromberger
74267a062e Remove backport-specific Dependabot config (v0.35.x). (#8520)
After #8518, this separate configuration is no longer needed.
The master copy will target updates to this branch.
2022-05-13 08:10:15 -07:00
mergify[bot]
12fed0ed53 blocksync: validate block before persisting it (backport #8493) (#8496) 2022-05-12 10:36:48 +02:00
Sam Kleinman
bdd59c892c statesync: avoid potential race (#8494) 2022-05-11 15:09:41 -04:00
dependabot[bot]
23834b6b31 build(deps): Bump github.com/creachadair/tomledit from 0.0.19 to 0.0.22 (#8503) 2022-05-11 12:38:25 -04:00
Callum Waters
b40a7b63b7 docs: remove developer sessions (#8497) 2022-05-10 22:09:47 -07:00
dependabot[bot]
923d14c439 build(deps): Bump github.com/golangci/golangci-lint (#8489)
Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.45.2 to 1.46.0.
- [Release notes](https://github.com/golangci/golangci-lint/releases)
- [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md)
- [Commits](https://github.com/golangci/golangci-lint/compare/v1.45.2...v1.46.0)

---
updated-dependencies:
- dependency-name: github.com/golangci/golangci-lint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-10 09:02:42 -07:00
dependabot[bot]
5b634976dc build(deps): Bump github.com/vektra/mockery/v2 from 2.12.1 to 2.12.2 (#8473) 2022-05-06 05:26:23 -07:00
mergify[bot]
383408479d keymigrate: improve filtering for legacy transaction hashes (#8466) (#8467)
This is a follow-up to #8352. The check for legacy evidence keys is only based
on the prefix of the key. Hashes, which are unprefixed, could easily have this
form and be misdiagnosed.

Because the conversion for evidence checks the key structure, this should not
cause corruption. The probability that a hash is a syntactically valid evidence
key is negligible.  The tool will report an error rather than storing bad data.
But this does mean that such transaction hashes could cause the migration to
stop and report an error before it is complete.

To ensure we convert all the data, refine the legacy key check to filter these
keys more precisely. Update the test cases to exercise this condition.

(cherry picked from commit dd4fee88ef)
2022-05-04 13:32:40 -07:00
dependabot[bot]
f383e8fa98 build(deps): Bump github.com/creachadair/atomicfile from 0.2.5 to 0.2.6 (#8461)
Bumps [github.com/creachadair/atomicfile](https://github.com/creachadair/atomicfile) from 0.2.5 to 0.2.6.
- [Release notes](https://github.com/creachadair/atomicfile/releases)
- [Commits](https://github.com/creachadair/atomicfile/compare/v0.2.5...v0.2.6)

---
updated-dependencies:
- dependency-name: github.com/creachadair/atomicfile
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-04 06:49:09 -07:00
dependabot[bot]
df66afab99 build(deps): Bump github.com/btcsuite/btcd from 0.22.0-beta to 0.22.1 (#8438) 2022-04-29 08:08:40 -04:00
dependabot[bot]
971bd1487e build(deps): Bump github.com/creachadair/tomledit from 0.0.18 to 0.0.19 (#8437) 2022-04-29 04:11:01 -07:00
dependabot[bot]
512a0bf356 build(deps): Bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#8421) 2022-04-27 06:24:47 -07:00
dependabot[bot]
06d3d41623 build(deps): Bump github.com/vektra/mockery/v2 from 2.12.0 to 2.12.1 (#8418) 2022-04-26 12:31:13 -04:00
dependabot[bot]
5b14d27ccf build(deps): Bump google.golang.org/grpc from 1.45.0 to 1.46.0 (#8409) 2022-04-25 08:59:51 -04:00
M. J. Fromberger
ad7c501359 Update common actions versions on v0.35.x to match master. (#8400) 2022-04-23 09:07:15 -07:00
dependabot[bot]
70d771ead2 build(deps): Bump github.com/vektra/mockery/v2 from 2.11.0 to 2.12.0 (#8394)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.11.0 to 2.12.0.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.11.0...v2.12.0)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-04-22 10:26:52 -07:00
dependabot[bot]
5b3b3065ad build(deps): Bump github.com/creachadair/tomledit from 0.0.16 to 0.0.18 (#8395) 2022-04-22 10:23:49 -04:00
mergify[bot]
9195a005bd Add config samples from TM v26, v27, v28, v29. (#8384) (#8387) 2022-04-21 08:58:18 -07:00
mergify[bot]
2a91d21b61 Add confix testdata for Tendermint v0.30. (#8380) (#8381)
Some additional testdata I grabbed while writing up the draft of RFC 019.

(cherry picked from commit d56392cee9)
2022-04-20 10:04:21 -07:00
mergify[bot]
14f0d60f24 p2p: fix setting in con-tracker (#8370) (#8371)
(cherry picked from commit 889341152a)
2022-04-19 23:32:54 -07:00
dependabot[bot]
21d68441a1 build(deps): Bump github.com/vektra/mockery/v2 from 2.10.6 to 2.11.0 (#8373)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.10.6 to 2.11.0.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.10.6...v2.11.0)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-04-19 10:07:33 -07:00
M. J. Fromberger
4d9ad115b0 build: clean up an unnecessary dependency (#8363) 2022-04-18 11:05:10 -07:00
dependabot[bot]
e646bd77ca build(deps): Bump github.com/creachadair/atomicfile from 0.2.4 to 0.2.5 (#8366)
Bumps [github.com/creachadair/atomicfile](https://github.com/creachadair/atomicfile) from 0.2.4 to 0.2.5.
- [Release notes](https://github.com/creachadair/atomicfile/releases)
- [Commits](https://github.com/creachadair/atomicfile/compare/v0.2.4...v0.2.5)

---
updated-dependencies:
- dependency-name: github.com/creachadair/atomicfile
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-04-18 10:18:18 -04:00
M. J. Fromberger
8682489551 Prepare changelog for release v0.35.4. (#8360) 2022-04-17 22:34:09 -07:00
mergify[bot]
04c1f76569 rpc: avoid leaking threads during checktx (backport #8328) (#8333) 2022-04-17 09:17:03 -04:00
Ethan Reesor
226bc94c5f node: always close database engine (#7113) (#8330) 2022-04-15 14:37:34 -07:00
mergify[bot]
641d290a6d keymigrate: fix conversion of transaction hash keys (backport #8352) (#8353)
In the legacy database format, keys were generally stored with a string prefix
to partition the key space. Transaction hashes, however, were not prefixed: The
hash of a transaction was the entire key for its record.

When the key migration script scans its input, it checks the format of each
key to determine whether it has already been converted, so that it is safe to run
the script over an already-converted database.

After checking for known prefixes, the migration script used two heuristics to
distinguish ABCI events and transaction hashes: For ABCI events, whose keys
used the form "name/value/height/index", it checked for the right number of
separators. For hashes, it checked that the length is exactly 32 bytes (the
length of a SHA-256 digest) AND that the value does not contain a "/".

This last check is problematic: Any hash containing the byte 0x2f (the code
point for "/") would be incorrectly filtered out from conversion. This leads to
some transaction hashes not being converted.

To fix this problem, this changes how the script recognizes keys:

1. Use a more rigorous syntactic check to filter out ABCI metadata.
2. Use only the length to identify hashes among what remains.

This change is still not a complete fix: It is possible, though unlikely, that
a valid hash could happen to look exactly like an ABCI metadata key. However,
the chance of that happening is vastly smaller than the chance of generating a
hash that contains at least one "/" byte.

Similarly, it is possible that an already-converted key of some other type
could be mistaken for a hash (not a converted hash, ironically, but another
type of the right length). Again, we can't do anything about that.

(cherry picked from commit 34e727676c)
2022-04-14 17:04:28 -07:00
William Banfield
8579cc382e invoke callbacks when set late in socket client (Forward-Port #8331) (#8336) 2022-04-14 18:36:09 -04:00
dependabot[bot]
1d8b1c7507 build(deps): Bump github.com/vektra/mockery/v2 from 2.10.4 to 2.10.6 (#8345) 2022-04-14 09:32:11 -07:00
dependabot[bot]
118ff02272 build(deps): Bump github.com/spf13/viper from 1.10.1 to 1.11.0 (#8347) 2022-04-14 08:49:19 -07:00
mergify[bot]
52bcd56d60 confix: convert tx-index.indexer from string to array (backport #8342) (#8348)
The format of this config value was changed in v0.35.

- Move plan to its own file (for ease of reading).
- Convert indexer string to an array if not already done.

(cherry picked from commit 69874c2050)
2022-04-14 06:59:16 -07:00
M. J. Fromberger
12e0ea6ea7 confix: add default mempool.version = "v1" in v0.35. (#8335) 2022-04-13 14:28:54 -07:00
M. J. Fromberger
1c3921f5df Revert CI cache override. (#8324)
The caches for golangci-lint failed to update correctly causing spurious
failures on #8300. To work around this, I disabled caching temporarily.
This change removes that override, restoring the default.
2022-04-12 21:34:57 -07:00
M. J. Fromberger
a639323cf0 Add a tool to update old config files to the latest version. (#8300)
A manual backport of #8281, adjusted to stop at v0.35.

* Update pending changelog.
* Backport applicable fixes for v0.35 from master.
2022-04-12 21:19:12 -07:00
mergify[bot]
e4d83ba2ad keymigrate: fix decoding of block-hash row keys (backport #8294) (#8295)
(cherry picked from commit 322bb460dd)
2022-04-09 09:17:28 -07:00
M. J. Fromberger
9edb87c5f8 Fix release notes to match the prevailing style. (#8293)
A manual backport of #8292.
Also update actions/checkout.
2022-04-08 18:26:44 -07:00
374 changed files with 50565 additions and 3235 deletions

4
.github/CODEOWNERS vendored
View File

@@ -7,4 +7,6 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @samricotta
/spec @josef-widder @milosevic @cason @sergio-mena @jmalicevic

View File

@@ -1,27 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
- package-ecosystem: npm
directory: "/docs"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
reviewers:
- fadeev
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
reviewers:
- melekes
- tessr
labels:
- T:dependencies

View File

@@ -20,11 +20,11 @@ jobs:
goos: ["linux"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
@@ -41,11 +41,11 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
@@ -63,11 +63,11 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go

View File

@@ -13,7 +13,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
@@ -39,17 +39,17 @@ jobs:
platforms: all
- name: Set up Docker Build
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.10.0
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.7.0
uses: docker/build-push-action@v3
with:
context: .
file: ./DOCKER/Dockerfile

View File

@@ -15,11 +15,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e

View File

@@ -21,11 +21,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v3
with:
ref: 'v0.34.x'

View File

@@ -21,11 +21,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e

View File

@@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go

View File

@@ -13,11 +13,11 @@ jobs:
fuzz-nightly-test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v3
- name: Install go-fuzz
working-directory: test/fuzz
@@ -54,14 +54,14 @@ jobs:
continue-on-error: true
- name: Archive crashers
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: crashers
path: test/fuzz/**/crashers
retention-days: 3
- name: Archive suppressions
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: suppressions
path: test/fuzz/**/suppressions

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: styfle/cancel-workflow-action@0.9.1
- uses: styfle/cancel-workflow-action@0.10.0
with:
workflow_id: 1041851,1401230,2837803
access_token: ${{ github.token }}

View File

@@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the Jepsen repository
uses: actions/checkout@v2.3.4
uses: actions/checkout@v3
with:
repository: 'tendermint/jepsen'
@@ -58,7 +58,7 @@ jobs:
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
- name: Archive results
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: results
path: tendermint/store/latest

View File

@@ -6,7 +6,7 @@ jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
- uses: actions/checkout@v3
- uses: creachadair/github-action-markdown-link-check@master
with:
folder-path: "docs"

View File

@@ -1,7 +1,11 @@
name: Lint
# Lint runs golangci-lint over the entire Tendermint repository
# This workflow is run on every pull request and push to master
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
name: Golang Linter
# Lint runs golangci-lint over the entire Tendermint repository.
#
# This workflow is run on every pull request and push to master.
#
# The `golangci` job will pass without running if no *.{go, mod, sum}
# files have been modified.
on:
pull_request:
push:
@@ -13,19 +17,21 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.4.0
- uses: actions/setup-go@v2
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '^1.16'
- uses: technote-space/get-diff-action@v5
go-version: '^1.17'
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v3.1.0
- uses: golangci/golangci-lint-action@v3
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
# Required: the version of golangci-lint is required and
# must be specified without patch version: we always use the
# latest patch version.
version: v1.45
args: --timeout 10m
github-token: ${{ secrets.github_token }}

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
- name: Lint Code Base
uses: docker://github/super-linter:v4
env:

23
.github/workflows/markdown-links.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Check Markdown links
on:
push:
branches:
- master
pull_request:
branches: [master]
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.md
- uses: creachadair/github-action-markdown-link-check@master
with:
check-modified-files-only: 'yes'
config-file: '.md-link-check.json'
if: env.GIT_DIFF

29
.github/workflows/proto-check.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Proto Check
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a file in the proto directory
# has been modified.
on:
workflow_dispatch: # allow running workflow manually
pull_request:
paths:
- "proto/**"
jobs:
proto-lint:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- uses: bufbuild/buf-setup-action@v1.6.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- uses: bufbuild/buf-setup-action@v1.6.0
- uses: bufbuild/buf-breaking-action@v1
with:
against: 'https://github.com/tendermint/tendermint.git#branch=v0.35.x'

View File

@@ -16,7 +16,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
@@ -34,16 +34,16 @@ jobs:
echo ::set-output name=tags::${TAGS}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v1.10.0
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.7.0
uses: docker/build-push-action@v3
with:
context: ./tools/proto
file: ./tools/proto/Dockerfile

View File

@@ -1,23 +0,0 @@
name: Protobuf
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a .proto file has been modified
on:
workflow_dispatch: # allow running workflow manually
pull_request:
paths:
- "**.proto"
jobs:
proto-lint:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.3.4
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.3.4
- name: check-breakage
run: make proto-check-breaking-ci

View File

@@ -12,26 +12,28 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2.3.4
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- name: Build
uses: goreleaser/goreleaser-action@v2
uses: goreleaser/goreleaser-action@v3
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Release
uses: goreleaser/goreleaser-action@v2
uses: goreleaser/goreleaser-action@v3
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
- uses: actions/stale@v5
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had

View File

@@ -16,11 +16,11 @@ jobs:
matrix:
part: ["00", "01", "02", "03", "04", "05"]
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "1.17"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
@@ -32,7 +32,7 @@ jobs:
run: |
make test-group-${{ matrix.part }} NUM_SPLIT=6
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./build/${{ matrix.part }}.profile.out
@@ -41,8 +41,8 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
@@ -50,26 +50,26 @@ jobs:
go.mod
go.sum
Makefile
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
- uses: codecov/codecov-action@v3
with:
file: ./coverage.txt
if: env.GIT_DIFF

9
.gitignore vendored
View File

@@ -10,7 +10,7 @@
.idea/
.revision
.tendermint
.tendermint-lite
.tendermint-light
.terraform
.vagrant
.vendor-new/
@@ -47,3 +47,10 @@ test/fuzz/**/corpus
test/fuzz/**/crashers
test/fuzz/**/suppressions
test/fuzz/**/*.zip
*.aux
*.bbl
*.blg
*.pdf
*.gz
*.dvi
.idea

11
.markdownlint.yml Normal file
View File

@@ -0,0 +1,11 @@
default: true
MD001: false
MD007: { indent: 4 }
MD013: false
MD024: { siblings_only: true }
MD025: false
MD033: false
MD036: false
MD010: false
MD012: false
MD028: false

View File

@@ -2,6 +2,115 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.9
July 20, 2022
This release fixes a deadlock that could occur in some cases when using the
priority mempool with the ABCI socket client.
### BUG FIXES
- [mempool] [\#9030](https://github.com/tendermint/tendermint/pull/9030) rework lock discipline to mitigate callback deadlocks (@creachadair)
## v0.35.8
July 12, 2022
Special thanks to external contributors on this release: @joeabbey
This release fixes an unbounded heap growth issue in the implementation of the
priority mempool, as well as some configuration, logging, and peer dialing
improvements in the non-legacy p2p stack. It also adds a new opt-in
"simple-priority" value for the `p2p.queue-type` setting, that should improve
gossip performance for non-legacy peer networks.
### BREAKING CHANGES
- CLI/RPC/Config
- [node] [\#8902](https://github.com/tendermint/tendermint/pull/8902) Always start blocksync and avoid misconfiguration (@tychoish)
### FEATURES
- [cli] [\#8675](https://github.com/tendermint/tendermint/pull/8675) Add command to force compact goleveldb databases (@cmwaters)
### IMPROVEMENTS
- [p2p] [\#8914](https://github.com/tendermint/tendermint/pull/8914) [\#8875](https://github.com/tendermint/tendermint/pull/8875) Improvements to peer dialing (backported). (@tychoish)
- [p2p] [\#8820](https://github.com/tendermint/tendermint/pull/8820) add eviction metrics and cleanup dialing error handling (backport #8819) (@tychoish)
- [logging] [\#8896](https://github.com/tendermint/tendermint/pull/8896) Do not pre-process log results (backport #8895). (@tychoish)
- [p2p] [\#8956](https://github.com/tendermint/tendermint/pull/8956) Simpler priority queue (backport #8929). (@tychoish)
### BUG FIXES
- [mempool] [\#8944](https://github.com/tendermint/tendermint/pull/8944) Fix unbounded heap growth in the priority mempool. (@creachadair)
- [p2p] [\#8869](https://github.com/tendermint/tendermint/pull/8869) Set empty timeouts to configed values. (backport #8847). (@williambanfield)
## v0.35.7
June 16, 2022
### BUG FIXES
- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684)
- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712)
- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759)
### BREAKING CHANGES
- P2P Protocol
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
## v0.35.6
June 3, 2022
### FEATURES
- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish)
### BUG FIXES
- [consensus] [\#8651](https://github.com/tendermint/tendermint/pull/8651) restructure peer catchup sleep (@tychoish)
- [pex] [\#8657](https://github.com/tendermint/tendermint/pull/8657) align max address thresholds (@cmwaters)
- [cmd] [\#8668](https://github.com/tendermint/tendermint/pull/8668) don't used global config for reset commands (@cmwaters)
- [p2p] [\#8681](https://github.com/tendermint/tendermint/pull/8681) shed peers from store from other networks (backport #8678) (@tychoish)
## v0.35.5
May 26, 2022
### BUG FIXES
- [p2p] [\#8371](https://github.com/tendermint/tendermint/pull/8371) fix setting in con-tracker (backport #8370) (@tychoish)
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
- [statesync] [\#8494](https://github.com/tendermint/tendermint/pull/8494) avoid potential race (@tychoish)
- [keymigrate] [\#8467](https://github.com/tendermint/tendermint/pull/8467) improve filtering for legacy transaction hashes (backport #8466) (@creachadair)
- [rpc] [\#8594](https://github.com/tendermint/tendermint/pull/8594) fix encoding of block_results responses (@creachadair)
## v0.35.4
April 18, 2022
Special thanks to external contributors on this release: @firelizzard18
### FEATURES
- [cli] [\#8300](https://github.com/tendermint/tendermint/pull/8300) Add a tool to update old config files to the latest version [backport [\#8281](https://github.com/tendermint/tendermint/pull/8281)]. (@creachadair)
### IMPROVEMENTS
### BUG FIXES
- [cli] [\#8294](https://github.com/tendermint/tendermint/pull/8294) keymigrate: ensure block hash keys are correctly translated. (@creachadair)
- [cli] [\#8352](https://github.com/tendermint/tendermint/pull/8352) keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
## v0.35.3
April 8, 2022
@@ -984,7 +1093,7 @@ and a validator address plus a timestamp. Note we may remove the validator
address & timestamp fields in the future (see ADR-25).
`lite2` package has been added to solve `lite` issues and introduce weak
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/consensus/light-client.md) for complete details.
`lite` package is now deprecated and will be removed in v0.34 release.
### BREAKING CHANGES:

View File

@@ -2,9 +2,9 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.4
## v0.35.10
Month, DD, YYYY
Month DD, YYYY
Special thanks to external contributors on this release:
@@ -24,4 +24,6 @@ Special thanks to external contributors on this release:
### IMPROVEMENTS
- (indexer) \#8625 Fix overriding tx index of duplicated txs.
### BUG FIXES

View File

@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
If the issue would benefit from thorough discussion, maintainers may
request that you create a [Request For
Comment](https://github.com/tendermint/spec/tree/master/rfc)
Comment](https://github.com/tendermint/tendermint/tree/master/docs/rfc)
in the Tendermint spec repo. Discussion
at the RFC stage will build collective understanding of the dimensions
of the problems and help structure conversations around trade-offs.

View File

@@ -83,14 +83,29 @@ $(BUILDDIR)/:
proto-all: proto-gen proto-lint proto-check-breaking
.PHONY: proto-all
proto-gen:
@echo "Generating Go packages for .proto files"
@$(DOCKER_PROTO) sh ./scripts/protocgen.sh
check-proto-deps:
ifeq (,$(shell which protoc-gen-gogofaster))
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
endif
.PHONY: check-proto-deps
check-proto-format-deps:
ifeq (,$(shell which clang-format))
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
endif
.PHONY: check-proto-format-deps
proto-gen: check-proto-deps
@echo "Generating Protobuf files"
@go run github.com/bufbuild/buf/cmd/buf generate
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
.PHONY: proto-gen
proto-lint:
@echo "Running lint checks for .proto files"
@$(DOCKER_PROTO) buf lint --error-format=json
# These targets are provided for convenience and are intended for local
# execution only.
proto-lint: check-proto-deps
@echo "Linting Protobuf files"
@go run github.com/bufbuild/buf/cmd/buf lint
.PHONY: proto-lint
proto-format:
@@ -98,16 +113,14 @@ proto-format:
@$(DOCKER_PROTO) find ./ -not -path "./third_party/*" -name '*.proto' -exec clang-format -i {} \;
.PHONY: proto-format
proto-check-breaking:
@echo "Checking for breaking changes in .proto files"
@$(DOCKER_PROTO) buf breaking --against .git#branch=$(BASE_BRANCH)
proto-check-breaking: check-proto-deps
@echo "Checking for breaking changes in Protobuf files against local branch"
@echo "Note: This is only useful if your changes have not yet been committed."
@echo " Otherwise read up on buf's \"breaking\" command usage:"
@echo " https://docs.buf.build/breaking/usage"
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
.PHONY: proto-check-breaking
proto-check-breaking-ci:
@echo "Checking for breaking changes in .proto files"
@$(DOCKER_PROTO) buf breaking --against $(HTTPS_GIT)#branch=$(BASE_BRANCH)
.PHONY: proto-check-breaking-ci
###############################################################################
### Build ABCI ###
###############################################################################
@@ -228,10 +241,8 @@ build-docs:
### Docker image ###
###############################################################################
build-docker: build-linux
cp $(BUILDDIR)/tendermint DOCKER/tendermint
build-docker:
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
rm -rf DOCKER/tendermint
.PHONY: build-docker

View File

@@ -20,7 +20,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
For protocol details, see [the specification](https://github.com/tendermint/spec).
For protocol details, see [the specification](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/README.md).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
@@ -70,7 +70,7 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
[specifications](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/README.md), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
and familiarize yourself with our
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).

View File

@@ -44,26 +44,47 @@ This guide provides instructions for upgrading to specific versions of Tendermin
* The fast sync process as well as the blockchain package and service has all
been renamed to block sync
* We have added a new, experimental tool to help operators migrate
configuration files created by previous versions of Tendermint.
To try this tool, run:
```shell
# Install the tool.
go install github.com/tendermint/tendermint/scripts/confix@v0.35.x
# Run the tool with the old configuration file as input.
# Replace the -config argument with your path.
confix -config ~/.tendermint/config/config.toml -out updated.toml
```
This tool should be able to update configurations from v0.34 to v0.35. We
plan to extend it to handle older configuration files in the future. For now,
it will report an error (without making any changes) if it does not recognize
the version that created the file.
### Database Key Format Changes
The format of all tendermint on-disk database keys changes in
0.35. Upgrading nodes must either re-sync all data or run a migration
script provided in this release. The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
provides the function `Migrate(context.Context, db.DB)` which you can
operationalize as makes sense for your deployment.
script provided in this release.
The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the
function `Migrate(context.Context, db.DB)` which you can operationalize as
makes sense for your deployment.
For ease of use the `tendermint` command includes a CLI version of the
migration script, which you can invoke, as in:
tendermint key-migrate
This reads the configuration file as normal and allows the
`--db-backend` and `--db-dir` flags to change database operations as
needed.
This reads the configuration file as normal and allows the `--db-backend` and
`--db-dir` flags to override the database location as needed.
The migration operation is idempotent and can be run more than once,
if needed.
The migration operation is intended to be idempotent, and should be safe to
rerun on the same database multiple times. As a safety measure, however, we
recommend that operators test out the migration on a copy of the database
first, if it is practical to do so, before applying it to the production data.
### CLI Changes
@@ -113,11 +134,11 @@ To access any of the functionality previously available via the
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
the full RPC interface provided as direct function calls. Import the
`github.com/tendermint/tendermint/rpc/client/local` package and pass
the node service as in the following:
the node service as in the following:
```go
node := node.NewDefault() //construct the node object
// start and set up the node service
// start and set up the node service
client := local.New(node.(local.NodeService))
// use client object to interact with the node
@@ -144,10 +165,10 @@ both stacks.
The P2P library was reimplemented in this release. The new implementation is
enabled by default in this version of Tendermint. The legacy implementation is still
included in this version of Tendermint as a backstop to work around unforeseen
production issues. The new and legacy version are interoperable. If necessary,
production issues. The new and legacy version are interoperable. If necessary,
you can enable the legacy implementation in the server configuration file.
To make use of the legacy P2P implemementation add or update the following field of
To make use of the legacy P2P implemementation add or update the following field of
your server's configuration file under the `[p2p]` section:
```toml
@@ -172,8 +193,8 @@ in the order in which they were received.
* `priority`: A priority queue of messages.
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
weighted deficit round robin queue is created per peer. Each queue contains a
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
weighted deficit round robin queue is created per peer. Each queue contains a
separate 'flow' for each of the channels of communication that exist between any two
peers. Tendermint maintains a channel per message type between peers. Each WDRR
queue maintains a shared buffered with a fixed capacity through which messages on different
@@ -692,14 +713,14 @@ due to changes in how various data structures are hashed.
Any implementations of Tendermint blockchain verification, including lite clients,
will need to be updated. For specific details:
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
* [Merkle tree](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/blockchain/encoding.md#merkle-trees)
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/blockchain/state.md#consensusparams)
There was also a small change to field ordering in the vote struct. Any
implementations of an out-of-process validator (like a Key-Management Server)
will need to be updated. For specific details:
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
* [Vote](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/consensus/signing.md#votes)
Finally, the proposer selection algorithm continues to evolve. See the
[work-in-progress

View File

@@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
A detailed description of the ABCI methods and message types is contained in:
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
- [The main spec](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md)
- [A protobuf file](../proto/tendermint/abci/types.proto)
- [A Go interface](./types/application.go)

View File

@@ -87,9 +87,15 @@ type ReqRes struct {
*sync.WaitGroup
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx tmsync.Mutex
done bool // Gets set to true once *after* WaitGroup.Done().
cb func(*types.Response) // A single callback that may be set.
mtx tmsync.Mutex
// callbackInvoked as a variable to track if the callback was already
// invoked during the regular execution of the request. This variable
// allows clients to set the callback simultaneously without potentially
// invoking the callback twice by accident, once when 'SetCallback' is
// called and once during the normal request.
callbackInvoked bool
cb func(*types.Response) // A single callback that may be set.
}
func NewReqRes(req *types.Request) *ReqRes {
@@ -98,8 +104,8 @@ func NewReqRes(req *types.Request) *ReqRes {
WaitGroup: waitGroup1(),
Response: nil,
done: false,
cb: nil,
callbackInvoked: false,
cb: nil,
}
}
@@ -109,7 +115,7 @@ func NewReqRes(req *types.Request) *ReqRes {
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
r.mtx.Lock()
if r.done {
if r.callbackInvoked {
r.mtx.Unlock()
cb(r.Response)
return
@@ -128,6 +134,7 @@ func (r *ReqRes) InvokeCallback() {
if r.cb != nil {
r.cb(r.Response)
}
r.callbackInvoked = true
}
// GetCallback returns the configured callback of the ReqRes object which may be
@@ -142,13 +149,6 @@ func (r *ReqRes) GetCallback() func(*types.Response) {
return r.cb
}
// SetDone marks the ReqRes object as done.
func (r *ReqRes) SetDone() {
r.mtx.Lock()
r.done = true
r.mtx.Unlock()
}
func waitGroup1() (wg *sync.WaitGroup) {
wg = &sync.WaitGroup{}
wg.Add(1)

View File

@@ -72,7 +72,6 @@ func (cli *grpcClient) OnStart() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
reqres.SetDone()
reqres.Done()
// Notify client listener if set
@@ -81,9 +80,7 @@ func (cli *grpcClient) OnStart() error {
}
// Notify reqRes listener if set
if cb := reqres.GetCallback(); cb != nil {
cb(reqres.Response)
}
reqres.InvokeCallback()
}
for reqres := range cli.chReqRes {
if reqres != nil {

View File

@@ -348,12 +348,13 @@ func (app *localClient) ApplySnapshotChunkSync(
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
app.Callback(req, res)
return newLocalReqRes(req, res)
rr := newLocalReqRes(req, res)
rr.callbackInvoked = true
return rr
}
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
reqRes := NewReqRes(req)
reqRes.Response = res
reqRes.SetDone()
return reqRes
}

View File

@@ -801,3 +801,18 @@ func (_m *Client) String() string {
func (_m *Client) Wait() {
_m.Called()
}
type mockConstructorTestingTNewClient interface {
mock.TestingT
Cleanup(func())
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewClient(t mockConstructorTestingTNewClient) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -3,6 +3,7 @@ package abciclient_test
import (
"context"
"fmt"
"sync"
"testing"
"time"
@@ -125,3 +126,73 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
}
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
// set after the client completes the call into the app. Currently this
// test relies on the callback being allowed to be invoked twice if set multiple
// times, once when set early and once when set late.
func TestCallbackInvokedWhenSetLate(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
app := blockedABCIApplication{
wg: wg,
}
_, c := setupClientServer(t, app)
reqRes, err := c.CheckTxAsync(context.Background(), types.RequestCheckTx{})
require.NoError(t, err)
done := make(chan struct{})
cb := func(_ *types.Response) {
close(done)
}
reqRes.SetCallback(cb)
app.wg.Done()
<-done
var called bool
cb = func(_ *types.Response) {
called = true
}
reqRes.SetCallback(cb)
require.True(t, called)
}
type blockedABCIApplication struct {
wg *sync.WaitGroup
types.BaseApplication
}
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
b.wg.Wait()
return b.BaseApplication.CheckTx(r)
}
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
// set before the client completes the call into the app.
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
app := blockedABCIApplication{
wg: wg,
}
_, c := setupClientServer(t, app)
reqRes, err := c.CheckTxAsync(context.Background(), types.RequestCheckTx{})
require.NoError(t, err)
done := make(chan struct{})
cb := func(_ *types.Response) {
close(done)
}
reqRes.SetCallback(cb)
app.wg.Done()
called := func() bool {
select {
case <-done:
return true
default:
return false
}
}
require.Eventually(t, called, time.Second, time.Millisecond*25)
}

View File

@@ -5,6 +5,9 @@ import (
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/encoding"
tmjson "github.com/tendermint/tendermint/libs/json"
)
const (
@@ -102,6 +105,48 @@ func (r *EventAttribute) UnmarshalJSON(b []byte) error {
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
// validatorUpdateJSON is the JSON encoding of a validator update.
//
// It handles translation of public keys from the protobuf representation to
// the legacy Amino-compatible format expected by RPC clients.
type validatorUpdateJSON struct {
PubKey json.RawMessage `json:"pub_key,omitempty"`
Power int64 `json:"power,string"`
}
func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) {
key, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
return nil, err
}
jkey, err := tmjson.Marshal(key)
if err != nil {
return nil, err
}
return json.Marshal(validatorUpdateJSON{
PubKey: jkey,
Power: v.GetPower(),
})
}
func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error {
var vu validatorUpdateJSON
if err := json.Unmarshal(data, &vu); err != nil {
return err
}
var key crypto.PubKey
if err := tmjson.Unmarshal(vu.PubKey, &key); err != nil {
return err
}
pkey, err := encoding.PubKeyToProto(key)
if err != nil {
return err
}
v.PubKey = pkey
v.Power = vu.Power
return nil
}
// Some compile time assertions to ensure we don't
// have accidental runtime surprises later on.

View File

@@ -7715,7 +7715,10 @@ func (m *Request) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -7797,7 +7800,10 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -7847,7 +7853,10 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -7999,7 +8008,10 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8237,7 +8249,10 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8392,7 +8407,10 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8576,7 +8594,10 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8679,7 +8700,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8763,7 +8787,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8832,7 +8859,10 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8882,7 +8912,10 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -8932,7 +8965,10 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -9052,7 +9088,10 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -9159,7 +9198,10 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -9294,7 +9336,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -9869,7 +9914,10 @@ func (m *Response) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -9951,7 +9999,10 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -10033,7 +10084,10 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -10083,7 +10137,10 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -10269,7 +10326,10 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -10423,7 +10483,10 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -10730,7 +10793,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -10814,7 +10880,10 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11168,7 +11237,10 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11439,7 +11511,10 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11593,7 +11668,10 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11696,7 +11774,10 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11780,7 +11861,10 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11849,7 +11933,10 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -11933,7 +12020,10 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12110,7 +12200,10 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12213,7 +12306,10 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12329,7 +12425,10 @@ func (m *Event) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12463,7 +12562,10 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12618,7 +12720,10 @@ func (m *TxResult) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12721,7 +12826,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12823,7 +12931,10 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -12926,7 +13037,10 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -13099,7 +13213,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
@@ -13274,7 +13391,10 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {

3
buf.work.yaml Normal file
View File

@@ -0,0 +1,3 @@
version: v1
directories:
- proto

View File

@@ -0,0 +1,69 @@
package commands
import (
"errors"
"path/filepath"
"sync"
"github.com/spf13/cobra"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/tendermint/tendermint/libs/log"
)
func MakeCompactDBCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "experimental-compact-goleveldb",
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
Long: `
This is a temporary utility command that performs a force compaction on the state
and blockstores to reduce disk space for a pruning node. This should only be run
once the node has stopped. This command will likely be omitted in the future after
the planned refactor to the storage engine.
Currently, only GoLevelDB is supported.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if config.DBBackend != "goleveldb" {
return errors.New("compaction is currently only supported with goleveldb")
}
compactGoLevelDBs(config.RootDir, logger)
return nil
},
}
return cmd
}
func compactGoLevelDBs(rootDir string, logger log.Logger) {
dbNames := []string{"state", "blockstore"}
o := &opt.Options{
DisableSeeksCompaction: true,
}
wg := sync.WaitGroup{}
for _, dbName := range dbNames {
dbName := dbName
wg.Add(1)
go func() {
defer wg.Done()
dbPath := filepath.Join(rootDir, "data", dbName+".db")
store, err := leveldb.OpenFile(dbPath, o)
if err != nil {
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
return
}
defer store.Close()
logger.Info("starting compaction...", "db", dbPath)
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
if err != nil {
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
}
}()
}
wg.Wait()
}

View File

@@ -5,7 +5,9 @@ import (
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/scripts/keymigrate"
"github.com/tendermint/tendermint/scripts/scmigrate"
)
@@ -15,53 +17,7 @@ func MakeKeyMigrateCommand() *cobra.Command {
Use: "key-migrate",
Short: "Run Database key migration",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
contexts := []string{
// this is ordered to put the
// (presumably) biggest/most important
// subsets first.
"blockstore",
"state",
"peerstore",
"tx_index",
"evidence",
"light",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
ID: dbctx,
Config: config,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
if dbctx == "blockstore" {
if err := scmigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running seen commit migration: %w", err)
}
}
}
logger.Info("completed database migration successfully")
return nil
return RunDatabaseMigration(cmd.Context(), logger, config)
},
}
@@ -70,3 +26,50 @@ func MakeKeyMigrateCommand() *cobra.Command {
return cmd
}
func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *cfg.Config) error {
contexts := []string{
// this is ordered to put
// the more ephemeral tables first to
// reduce the possibility of the
// ephemeral data overwriting later data
"tx_index",
"light",
"blockstore",
"state",
"evidence",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
ID: dbctx,
Config: conf,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, dbctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
if dbctx == "blockstore" {
if err := scmigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running seen commit migration: %w", err)
}
}
}
logger.Info("completed database migration successfully")
return nil
}

View File

@@ -27,6 +27,11 @@ var ResetStateCmd = &cobra.Command{
Use: "reset-state",
Short: "Remove all the data and WAL",
RunE: func(cmd *cobra.Command, args []string) error {
config, err := ParseConfig()
if err != nil {
return err
}
return resetState(config.DBDir(), logger, keyType)
},
}
@@ -47,13 +52,27 @@ var ResetPrivValidatorCmd = &cobra.Command{
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAllCmd(cmd *cobra.Command, args []string) error {
return resetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
config.PrivValidator.StateFile(), logger)
config, err := ParseConfig()
if err != nil {
return err
}
return resetAll(
config.DBDir(),
config.P2P.AddrBookFile(),
config.PrivValidator.KeyFile(),
config.PrivValidator.StateFile(),
logger,
)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) error {
config, err := ParseConfig()
if err != nil {
return err
}
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger, keyType)
}

View File

@@ -34,9 +34,6 @@ func AddNodeFlags(cmd *cobra.Command) {
config.PrivValidator.ListenAddr,
"socket address to listen on for connections from external priv-validator process")
// node flags
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
// This check was added to give users an upgrade prompt to use the new flag for syncing.
//

View File

@@ -32,6 +32,7 @@ func main() {
cmd.InspectCmd,
cmd.RollbackStateCmd,
cmd.MakeKeyMigrateCommand(),
cmd.MakeCompactDBCommand(),
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
)

View File

@@ -712,6 +712,10 @@ type P2PConfig struct { //nolint: maligned
// outbound).
MaxConnections uint16 `mapstructure:"max-connections"`
// MaxOutgoingConnections defines the maximum number of connected peers (inbound and
// outbound).
MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"`
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
// attempts per IP address.
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
@@ -758,7 +762,7 @@ type P2PConfig struct { //nolint: maligned
UseLegacy bool `mapstructure:"use-legacy"`
// Makes it possible to configure which queue backend the p2p
// layer uses. Options are: "fifo", "priority" and "wdrr",
// layer uses. Options are: "fifo", "simple-priority", "priority", and "wdrr",
// with the default being "priority".
QueueType string `mapstructure:"queue-type"`
}
@@ -774,6 +778,7 @@ func DefaultP2PConfig() *P2PConfig {
MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10,
MaxConnections: 64,
MaxOutgoingConnections: 12,
MaxIncomingConnectionAttempts: 100,
PersistentPeersMaxDialPeriod: 0 * time.Second,
FlushThrottleTimeout: 100 * time.Millisecond,
@@ -833,6 +838,9 @@ func (cfg *P2PConfig) ValidateBasic() error {
if cfg.RecvRate < 0 {
return errors.New("recv-rate can't be negative")
}
if cfg.MaxOutgoingConnections > cfg.MaxConnections {
return errors.New("max-outgoing-connections cannot be larger than max-connections")
}
return nil
}

View File

@@ -23,5 +23,6 @@ type DBProvider func(*DBContext) (dbm.DB, error)
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -301,7 +301,9 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
# Enable the legacy p2p layer.
use-legacy = {{ .P2P.UseLegacy }}
# Select the p2p internal queue
# Select the p2p internal queue.
# Options are: "fifo", "simple-priority", "priority", and "wdrr"
# with the default being "priority".
queue-type = "{{ .P2P.QueueType }}"
# Address to listen for incoming connections
@@ -355,6 +357,10 @@ max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
# Maximum number of connections (inbound and outbound).
max-connections = {{ .P2P.MaxConnections }}
# Maximum number of connections reserved for outgoing
# connections. Must be less than max-connections
max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }}
# Rate limits the number of incoming connection attempts per IP address.
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}

View File

@@ -44,10 +44,6 @@ module.exports = {
{
title: 'Resources',
children: [
{
title: 'Developer Sessions',
path: '/DEV_SESSIONS.html'
},
{
// TODO(creachadair): Figure out how to make this per-branch.
// See: https://github.com/tendermint/tendermint/issues/7908

View File

@@ -15,7 +15,7 @@ the block itself is never stored.
Each event contains a type and a list of attributes, which are key-value pairs
denoting something about what happened during the method's execution. For more
details on `Events`, see the
[ABCI](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events)
[ABCI](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/abci/abci.md#events)
documentation.
An `Event` has a composite key associated with it. A `compositeKey` is

View File

@@ -106,10 +106,10 @@ Next, use the `tendermint testnet` command to create four directories of config
Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4.
```sh
tendermint show_node_id --home ./mytestnet/node0
tendermint show_node_id --home ./mytestnet/node1
tendermint show_node_id --home ./mytestnet/node2
tendermint show_node_id --home ./mytestnet/node3
tendermint show-node-id --home ./mytestnet/node0
tendermint show-node-id --home ./mytestnet/node1
tendermint show-node-id --home ./mytestnet/node2
tendermint show-node-id --home ./mytestnet/node3
```
Finally, from each machine, run:

View File

@@ -120,7 +120,7 @@ Next follows a standard block creation cycle, where we enter a new
round, propose a block, receive more than 2/3 of prevotes, then
precommits and finally have a chance to commit a block. For details,
please refer to [Byzantine Consensus
Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md).
Algorithm](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/consensus/consensus.md).
```sh
I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus

View File

@@ -18,39 +18,43 @@ Listen address can be changed in the config file (see
The following metrics are available:
| **Name** | **Type** | **Tags** | **Description** |
| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- |
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
| consensus_height | Gauge | | Height of the chain |
| consensus_validators | Gauge | | Number of validators |
| consensus_validators_power | Gauge | | Total voting power of all validators |
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
| consensus_rounds | Gauge | | Number of rounds |
| consensus_num_txs | Gauge | | Number of transactions |
| consensus_total_txs | Gauge | | Total number of transactions committed |
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
| consensus_latest_block_height | gauge | | /status sync_info number |
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
| consensus_block_size_bytes | Gauge | | Block size in bytes |
| p2p_peers | Gauge | | Number of peers node's connected to |
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
| mempool_size | Gauge | | Number of uncommitted transactions |
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
| mempool_failed_txs | counter | | number of failed transactions |
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
| **Name** | **Type** | **Tags** | **Description** |
|----------------------------------------|-----------|---------------|-----------------------------------------------------------------------------------------------------------|
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
| consensus_height | Gauge | | Height of the chain |
| consensus_validators | Gauge | | Number of validators |
| consensus_validators_power | Gauge | | Total voting power of all validators |
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
| consensus_rounds | Gauge | | Number of rounds |
| consensus_num_txs | Gauge | | Number of transactions |
| consensus_total_txs | Gauge | | Total number of transactions committed |
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
| consensus_latest_block_height | gauge | | /status sync_info number |
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
| consensus_block_size_bytes | Gauge | | Block size in bytes |
| p2p_peers | Gauge | | Number of peers node's connected to |
| p2p_peer_receive_bytes_total | Counter | peer_id, chID | number of bytes per channel received from a given peer |
| p2p_peer_send_bytes_total | Counter | peer_id, chID | number of bytes per channel sent to a given peer |
| p2p_peer_pending_send_bytes | Gauge | peer_id | number of pending bytes to be sent to a given peer |
| p2p_router_peer_queue_recv | Histogram | | The time taken to read off of a peer's queue before sending on the connection |
| p2p_router_peer_queue_send | Histogram | | The time taken to send on a peer's queue which will later be sent on the connection |
| p2p_router_channel_queue_send | Histogram | | The time taken to send on a p2p channel's queue which will later be consumed by the corresponding service |
| p2p_router_channel_queue_dropped_msgs | Counter | ch_id | The number of messages dropped from a peer's queue for a specific p2p channel |
| p2p_peer_queue_msg_size | Gauge | ch_id | The size of messages sent over a peer's queue for a specific p2p channel |
| mempool_size | Gauge | | Number of uncommitted transactions |
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
| mempool_failed_txs | counter | | number of failed transactions |
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
## Useful queries

View File

@@ -8,7 +8,7 @@ order: 1
This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams.
Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint).
Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/tendermint/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint).
This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status.
@@ -43,7 +43,7 @@ Added a new `EventSink` interface to allow alternatives to Tendermint's propriet
### ABCI++
An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md)
An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/tendermint/blob/v0.35.x/rfc/004-abci%2B%2B.md)
### Proposer-Based Timestamps

View File

@@ -23,7 +23,7 @@ explained in a forthcoming document.
For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the
block as the block size is big, i.e., they don't embed the block inside `Proposal` and
`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in
[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section)
[Blockchain](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/core/data_structures.md#blockid) section)
that uniquely identifies each block. The block itself is
disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a
proposer first splitting a block into a number of block parts, that are then gossiped between

View File

@@ -43,7 +43,7 @@ transactions](../app-dev/indexing-transactions.md) for details.
When validator set changes, ValidatorSetUpdates event is published. The
event carries a list of pubkey/power pairs. The list is the same
Tendermint receives from ABCI application (see [EndBlock
section](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#endblock) in
section](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/abci/abci.md#endblock) in
the ABCI spec).
Response:

View File

@@ -49,7 +49,7 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g
chain IDs, you will have a bad time. The ChainID must be less than 50 symbols.
- `initial_height`: Height at which Tendermint should begin at. If a blockchain is conducting a network upgrade,
starting from the stopped height brings uniqueness to previous heights.
- `consensus_params` [spec](https://github.com/tendermint/spec/blob/master/spec/core/state.md#consensusparams)
- `consensus_params` [spec](https://github.com/tendermint/tendermint/blob/v0.35.x/spec/core/state.md#consensusparams)
- `block`
- `max_bytes`: Max block size, in bytes.
- `max_gas`: Max gas per block.

43
go.mod
View File

@@ -3,12 +3,16 @@ module github.com/tendermint/tendermint
go 1.16
require (
github.com/BurntSushi/toml v1.1.0
github.com/BurntSushi/toml v1.2.0
github.com/Workiva/go-datastructures v1.0.53
github.com/adlio/schema v1.3.0
github.com/btcsuite/btcd v0.22.0-beta
github.com/adlio/schema v1.3.3
github.com/btcsuite/btcd v0.22.1
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/bufbuild/buf v1.6.0
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/creachadair/atomicfile v0.2.6
github.com/creachadair/taskgroup v0.3.2
github.com/creachadair/tomledit v0.0.23
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
@@ -16,33 +20,36 @@ require (
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/golangci/golangci-lint v1.45.2
github.com/golangci/golangci-lint v1.47.2
github.com/google/go-cmp v0.5.8
github.com/google/orderedcode v0.0.1
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.5.0
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/lib/pq v1.10.5
github.com/libp2p/go-buffer-pool v0.0.2
github.com/lib/pq v1.10.6
github.com/libp2p/go-buffer-pool v0.1.0
github.com/minio/highwayhash v1.0.2
github.com/mroth/weightedrand v0.4.1
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
github.com/ory/dockertest v3.3.5+incompatible
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_golang v1.12.2
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rs/cors v1.8.2
github.com/rs/zerolog v1.26.1
github.com/rs/zerolog v1.27.0
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.1
github.com/spf13/cobra v1.5.0
github.com/spf13/viper v1.12.0
github.com/stretchr/testify v1.8.0
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
github.com/tendermint/tm-db v0.6.6
github.com/vektra/mockery/v2 v2.10.4
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/grpc v1.45.0
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
pgregory.net/rapid v0.4.7
github.com/vektra/mockery/v2 v2.14.0
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
google.golang.org/grpc v1.48.0
gotest.tools v2.2.0+incompatible // indirect
pgregory.net/rapid v0.4.8
)

528
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -544,8 +544,15 @@ FOR_LOOP:
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
if err == nil {
// validate the block before we persist it
err = r.blockExec.ValidateBlock(state, first)
}
// If either of the checks failed we log the error and request for a new block
// at that height
if err != nil {
err = fmt.Errorf("invalid last commit: %w", err)
r.Logger.Error(
err.Error(),
"last_commit", second.LastCommit,
@@ -570,37 +577,34 @@ FOR_LOOP:
}
continue FOR_LOOP
} else {
r.pool.PopRequest()
}
// TODO: batch saves so we do not persist to disk every block
r.store.SaveBlock(first, firstParts, second.LastCommit)
r.pool.PopRequest()
var err error
// TODO: batch saves so we do not persist to disk every block
r.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: Same thing for app - but we would need a way to get the hash
// without persisting the state.
state, err = r.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO: This is bad, are we zombie?
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
// TODO: Same thing for app - but we would need a way to get the hash
// without persisting the state.
state, err = r.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
r.metrics.RecordConsMetrics(first)
r.metrics.RecordConsMetrics(first)
blocksSynced++
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"block sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
)
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"block sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
)
lastHundred = time.Now()
}
lastHundred = time.Now()
}
continue FOR_LOOP

View File

@@ -663,6 +663,39 @@ func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) {
ensureVote(voteCh, height, round, tmproto.PrevoteType)
}
func ensurePrevoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) {
t.Helper()
ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrevoteType)
}
func ensurePrecommitMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) {
t.Helper()
ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrecommitType)
}
func ensureVoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte, voteType tmproto.SignedMsgType) {
t.Helper()
select {
case <-time.After(ensureTimeout):
t.Fatal("Timeout expired while waiting for NewVote event")
case msg := <-voteCh:
voteEvent, ok := msg.Data().(types.EventDataVote)
require.True(t, ok, "expected a EventDataVote, got %T. Wrong subscription channel?",
msg.Data())
vote := voteEvent.Vote
require.Equal(t, height, vote.Height)
require.Equal(t, round, vote.Round)
require.Equal(t, voteType, vote.Type)
if hash == nil {
require.Nil(t, vote.BlockID.Hash, "Expected prevote to be for nil, got %X", vote.BlockID.Hash)
} else {
require.True(t, bytes.Equal(vote.BlockID.Hash, hash), "Expected prevote to be for %X, got %X", hash, vote.BlockID.Hash)
}
}
}
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
voteType tmproto.SignedMsgType) {
select {

View File

@@ -97,6 +97,8 @@ type Metrics struct {
// timestamp and the timestamp of the latest prevote in a round where 100%
// of the voting power on the network issued prevotes.
FullPrevoteMessageDelay metrics.Gauge
Locks metrics.Gauge
}
// PrometheusMetrics returns Metrics build using Prometheus client library.
@@ -265,6 +267,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Help: "Difference in seconds between the proposal timestamp and the timestamp " +
"of the latest prevote that achieved 100% of the voting power in the prevote step.",
}, labels).With(labelsAndValues...),
Locks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "locks",
Help: "lock debugging",
}, append(labels, "function")).With(labelsAndValues...),
}
}
@@ -301,6 +309,7 @@ func NopMetrics() *Metrics {
BlockGossipPartsReceived: discard.NewCounter(),
QuorumPrevoteMessageDelay: discard.NewGauge(),
FullPrevoteMessageDelay: discard.NewGauge(),
Locks: discard.NewGauge(),
}
}

View File

@@ -26,3 +26,18 @@ func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
_m.Called(_a0, _a1)
}
type mockConstructorTestingTNewConsSyncReactor interface {
mock.TestingT
Cleanup(func())
}
// NewConsSyncReactor creates a new instance of ConsSyncReactor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewConsSyncReactor(t mockConstructorTestingTNewConsSyncReactor) *ConsSyncReactor {
mock := &ConsSyncReactor{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -231,7 +231,6 @@ func (r *Reactor) OnStop() {
if !r.WaitSync() {
r.state.Wait()
}
r.mtx.Lock()
// Close and wait for each of the peers to shutdown.
// This is safe to perform with the lock since none of the peers require the
@@ -544,6 +543,8 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer
func (r *Reactor) gossipDataRoutine(ps *PeerState) {
logger := r.Logger.With("peer", ps.peerID)
timer := time.NewTimer(r.state.config.PeerGossipSleepDuration)
defer timer.Stop()
OUTER_LOOP:
for {
@@ -551,6 +552,8 @@ OUTER_LOOP:
return
}
timer.Reset(r.state.config.PeerGossipSleepDuration)
select {
case <-r.closeCh:
return
@@ -558,8 +561,7 @@ OUTER_LOOP:
// The peer is marked for removal via a PeerUpdate as the doneCh was
// explicitly closed to signal we should exit.
return
default:
case <-timer.C:
}
rs := r.getRoundState()
@@ -605,7 +607,6 @@ OUTER_LOOP:
"blockstoreBase", blockStoreBase,
"blockstoreHeight", r.state.blockStore.Height(),
)
time.Sleep(r.state.config.PeerGossipSleepDuration)
} else {
ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader)
}
@@ -621,7 +622,6 @@ OUTER_LOOP:
// if height and round don't match, sleep
if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
time.Sleep(r.state.config.PeerGossipSleepDuration)
continue OUTER_LOOP
}
@@ -676,12 +676,8 @@ OUTER_LOOP:
}:
}
}
continue OUTER_LOOP
}
// nothing to do -- sleep
time.Sleep(r.state.config.PeerGossipSleepDuration)
continue OUTER_LOOP
}
}
@@ -1439,7 +1435,9 @@ func (r *Reactor) peerStatsRoutine() {
select {
case msg := <-r.state.statsMsgQueue:
r.Metrics.Locks.With("function", "GetPeerState").Add(1)
ps, ok := r.GetPeerState(msg.PeerID)
r.Metrics.Locks.With("function", "GetPeerState").Add(-1)
if !ok || ps == nil {
r.Logger.Debug("attempt to update stats for non-existent peer", "peer", msg.PeerID)
continue
@@ -1447,19 +1445,29 @@ func (r *Reactor) peerStatsRoutine() {
switch msg.Msg.(type) {
case *VoteMessage:
if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
r.Metrics.Locks.With("function", "RecordVote").Add(1)
numVotes := ps.RecordBlockPart()
r.Metrics.Locks.With("function", "RecordVote").Add(-1)
if numVotes%votesToContributeToBecomeGoodPeer == 0 {
r.Metrics.Locks.With("function", "SendUpdate").Add(1)
r.peerUpdates.SendUpdate(p2p.PeerUpdate{
NodeID: msg.PeerID,
Status: p2p.PeerStatusGood,
})
r.Metrics.Locks.With("function", "SendUpdate").Add(-1)
}
case *BlockPartMessage:
if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
r.Metrics.Locks.With("function", "RecordBlockPart").Add(1)
numParts := ps.RecordBlockPart()
r.Metrics.Locks.With("function", "RecordBlockPart").Add(-1)
if numParts%blocksToContributeToBecomeGoodPeer == 0 {
r.Metrics.Locks.With("function", "SendUpdate").Add(1)
r.peerUpdates.SendUpdate(p2p.PeerUpdate{
NodeID: msg.PeerID,
Status: p2p.PeerStatusGood,
})
r.Metrics.Locks.With("function", "SendUpdate").Add(-1)
}
}
case <-r.closeCh:

View File

@@ -275,7 +275,11 @@ func (cs *State) GetValidators() (int64, []*types.Validator) {
// SetPrivValidator sets the private validator account for signing votes. It
// immediately requests pubkey and caches it.
func (cs *State) SetPrivValidator(priv types.PrivValidator) {
// Doubtful
cs.metrics.Locks.With("function", "SetPrivValidator").Add(float64(1))
cs.mtx.Lock()
defer cs.metrics.Locks.With("function", "SetPrivValidator").Add(float64(-1))
defer cs.mtx.Unlock()
cs.privValidator = priv
@@ -308,8 +312,10 @@ func (cs *State) SetPrivValidator(priv types.PrivValidator) {
// SetTimeoutTicker sets the local timer. It may be useful to overwrite for
// testing.
func (cs *State) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
defer cs.metrics.Locks.With("function", "SetTimeoutTicker").Add(float64(1))
cs.mtx.Lock()
cs.timeoutTicker = timeoutTicker
defer cs.metrics.Locks.With("function", "SetTimeoutTicker").Add(float64(-1))
cs.mtx.Unlock()
}
@@ -862,7 +868,10 @@ func (cs *State) receiveRoutine(maxSteps int) {
// state transitions on complete-proposal, 2/3-any, 2/3-one
func (cs *State) handleMsg(mi msgInfo) {
// feels likely
cs.metrics.Locks.With("function", "handlemsg").Add(float64(1))
cs.mtx.Lock()
defer cs.metrics.Locks.With("function", "handlemsg").Add(float64(-1))
defer cs.mtx.Unlock()
var (
added bool
@@ -892,8 +901,10 @@ func (cs *State) handleMsg(mi msgInfo) {
// of RoundState and only locking when switching out State's copy of
// RoundState with the updated copy or by emitting RoundState events in
// more places for routines depending on it to listen for.
cs.metrics.Locks.With("function", "handlemsg_int").Add(float64(1))
cs.mtx.Unlock()
cs.metrics.Locks.With("function", "handlemsg_int").Add(float64(-1))
cs.mtx.Lock()
if added && cs.ProposalBlockParts.IsComplete() {
cs.handleCompleteProposal(msg.Height)
@@ -917,7 +928,9 @@ func (cs *State) handleMsg(mi msgInfo) {
// if the vote gives us a 2/3-any or 2/3-one, we transition
added, err = cs.tryAddVote(msg.Vote, peerID)
if added {
cs.metrics.Locks.With("function", "statsMsgQueue").Add(float64(1))
cs.statsMsgQueue <- mi
cs.metrics.Locks.With("function", "statsMsgQueue").Add(float64(-1))
}
// if err == ErrAddingVote {
@@ -962,7 +975,10 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
}
// the timeout will now cause a state transition
// also possible
cs.metrics.Locks.With("function", "handleTimeout").Add(float64(1))
cs.mtx.Lock()
defer cs.metrics.Locks.With("function", "handleTimeout").Add(float64(-1))
defer cs.mtx.Unlock()
switch ti.Step {
@@ -1850,6 +1866,8 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
//-----------------------------------------------------------------------------
func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
cs.metrics.Locks.With("function", "setProposal").Add(float64(1))
defer cs.metrics.Locks.With("function", "setProposal").Add(float64(-1))
// Already have one
// TODO: possibly catch double proposals
if cs.Proposal != nil {
@@ -1893,6 +1911,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
// once we have the full block.
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) {
cs.metrics.Locks.With("function", "addProposalBlockPart").Add(float64(1))
defer cs.metrics.Locks.With("function", "addProposalBlockPart").Add(float64(-1))
height, round, part := msg.Height, msg.Round, msg.Part
// Blocks might be reused, so round mismatch is OK
@@ -1963,6 +1983,8 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID
}
func (cs *State) handleCompleteProposal(blockHeight int64) {
cs.metrics.Locks.With("function", "handleCompleteProposal").Add(float64(1))
defer cs.metrics.Locks.With("function", "handleCompleteProposal").Add(float64(-1))
// Update Valid* if we can.
prevotes := cs.Votes.Prevotes(cs.Round)
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
@@ -1999,6 +2021,8 @@ func (cs *State) handleCompleteProposal(blockHeight int64) {
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) {
cs.metrics.Locks.With("function", "tryAddVote").Add(float64(1))
defer cs.metrics.Locks.With("function", "tryAddVote").Add(float64(-1))
added, err := cs.addVote(vote, peerID)
if err != nil {
// If the vote height is off, we'll just ignore it,
@@ -2046,6 +2070,8 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error)
}
func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
cs.metrics.Locks.With("function", "addvote").Add(float64(1))
defer cs.metrics.Locks.With("function", "addvote").Add(float64(-1))
cs.Logger.Debug(
"adding vote",
"vote_height", vote.Height,
@@ -2099,10 +2125,15 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
return
}
cs.metrics.Locks.With("function", "addvote").Add(float64(1))
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
cs.metrics.Locks.With("function", "publisheventvote").Add(float64(-1))
return added, err
}
cs.metrics.Locks.With("function", "publisheventvote").Add(float64(-1))
cs.metrics.Locks.With("function", "fireeventvote").Add(float64(1))
cs.evsw.FireEvent(types.EventVoteValue, vote)
cs.metrics.Locks.With("function", "fireeventvote").Add(float64(-1))
switch vote.Type {
case tmproto.PrevoteType:

View File

@@ -243,8 +243,7 @@ func TestStateBadProposal(t *testing.T) {
ensureProposal(proposalCh, height, round, blockID)
// wait for prevote
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
ensurePrevoteMatch(t, voteCh, height, round, nil)
// add bad prevote from vs2 and wait for it
signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
@@ -308,8 +307,7 @@ func TestStateOversizedBlock(t *testing.T) {
// and then should send nil prevote and precommit regardless of whether other validators prevote and
// precommit on it
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
ensurePrevoteMatch(t, voteCh, height, round, nil)
signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
ensurePrevote(voteCh, height, round)
ensurePrecommit(voteCh, height, round)
@@ -352,8 +350,7 @@ func TestStateFullRound1(t *testing.T) {
ensureNewProposal(propCh, height, round)
propBlockHash := cs.GetRoundState().ProposalBlock.Hash()
ensurePrevote(voteCh, height, round) // wait for prevote
validatePrevote(t, cs, round, vss[0], propBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
ensurePrecommit(voteCh, height, round) // wait for precommit
@@ -376,8 +373,8 @@ func TestStateFullRoundNil(t *testing.T) {
cs.enterPrevote(height, round)
cs.startRoutines(4)
ensurePrevote(voteCh, height, round) // prevote
ensurePrecommit(voteCh, height, round) // precommit
ensurePrevoteMatch(t, voteCh, height, round, nil) // prevote
ensurePrecommitMatch(t, voteCh, height, round, nil) // precommit
// should prevote and precommit nil
validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil)
@@ -502,10 +499,8 @@ func TestStateLockNoPOL(t *testing.T) {
panic("Expected proposal block to be nil")
}
// wait to finish prevote
ensurePrevote(voteCh, height, round)
// we should have prevoted our locked block
validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash())
// wait to finish prevote and ensure we have prevoted our locked block
ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash())
// add a conflicting prevote from the other validator
signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
@@ -548,8 +543,7 @@ func TestStateLockNoPOL(t *testing.T) {
rs.LockedBlock))
}
ensurePrevote(voteCh, height, round) // prevote
validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash())
ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash())
signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
ensurePrevote(voteCh, height, round)
@@ -594,9 +588,8 @@ func TestStateLockNoPOL(t *testing.T) {
}
ensureNewProposal(proposalCh, height, round)
ensurePrevote(voteCh, height, round) // prevote
// prevote for locked block (not proposal)
validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash())
ensurePrevoteMatch(t, voteCh, height, round, cs1.LockedBlock.Hash())
// prevote for proposed block
signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
@@ -704,8 +697,7 @@ func TestStateLockPOLRelock(t *testing.T) {
ensureNewProposal(proposalCh, height, round)
// go to prevote, node should prevote for locked block (not the new proposal) - this is relocking
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], theBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
// now lets add prevotes from everyone else for the new block
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
@@ -757,8 +749,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
theBlockHash := rs.ProposalBlock.Hash()
theBlockParts := rs.ProposalBlockParts.Header()
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], theBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
@@ -796,8 +787,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
ensureNewProposal(proposalCh, height, round)
// go to prevote, prevote for locked block (not proposal)
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], lockedBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, lockedBlockHash)
// now lets add prevotes from everyone else for nil (a polka!)
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
@@ -888,8 +878,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
// now we're on a new round but v1 misses the proposal
// go to prevote, node should prevote for locked block (not the new proposal) - this is relocking
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], firstBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, firstBlockHash)
// now lets add prevotes from everyone else for the new block
signAddVotes(config, cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4)
@@ -933,9 +922,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
t.Fatal(err)
}
ensurePrevote(voteCh, height, round)
// we are no longer locked to the first block so we should be able to prevote
validatePrevote(t, cs1, round, vss[0], thirdPropBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, thirdPropBlockHash)
signAddVotes(config, cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4)
@@ -975,8 +962,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
rs := cs1.GetRoundState()
propBlock := rs.ProposalBlock
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlock.Hash())
ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash())
// the others sign a polka but we don't see it
prevotes := signVotes(config, tmproto.PrevoteType,
@@ -1022,8 +1008,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash))
// go to prevote, prevote for proposal block
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
// now we see the others prevote for it, so we should lock on it
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
@@ -1049,10 +1034,8 @@ func TestStateLockPOLSafety1(t *testing.T) {
// timeout of propose
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
// finish prevote
ensurePrevote(voteCh, height, round)
// we should prevote what we're locked on
validatePrevote(t, cs1, round, vss[0], propBlockHash)
// finish prevote and vote for the block we're locked on
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
@@ -1119,8 +1102,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
}
ensureNewProposal(proposalCh, height, round)
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash1)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash1)
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
@@ -1162,9 +1144,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
ensureNewProposal(proposalCh, height, round)
ensureNoNewUnlock(unlockCh)
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash1)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash1)
}
// 4 vals.
@@ -1201,8 +1181,7 @@ func TestProposeValidBlock(t *testing.T) {
propBlock := rs.ProposalBlock
propBlockHash := propBlock.Hash()
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
// the others sign a polka
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
@@ -1225,8 +1204,7 @@ func TestProposeValidBlock(t *testing.T) {
// timeout of propose
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
@@ -1294,8 +1272,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
propBlockHash := propBlock.Hash()
propBlockParts := propBlock.MakePartSet(partSize)
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], propBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, propBlockHash)
// vs2 send prevote for propBlock
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2)
@@ -1358,8 +1335,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
ensurePrevoteMatch(t, voteCh, height, round, nil)
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockHash := propBlock.Hash()
@@ -1445,8 +1421,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
ensurePrevoteMatch(t, voteCh, height, round, nil)
}
// 4 vals, 3 Precommits for nil from the higher round.
@@ -1515,8 +1490,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], nil)
ensurePrevoteMatch(t, voteCh, height, round, nil)
}
// What we want:
@@ -1645,8 +1619,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) {
theBlockHash := rs.ProposalBlock.Hash()
theBlockParts := rs.ProposalBlockParts.Header()
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], theBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
@@ -1708,8 +1681,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
theBlockHash := rs.ProposalBlock.Hash()
theBlockParts := rs.ProposalBlockParts.Header()
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], theBlockHash)
ensurePrevoteMatch(t, voteCh, height, round, theBlockHash)
signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
@@ -1881,8 +1853,7 @@ func TestStateHalt1(t *testing.T) {
*/
// go to prevote, prevote for locked block
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash())
ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash())
// now we receive the precommit from the previous round
addVotes(cs1, precommit4)

View File

@@ -119,9 +119,9 @@ func (t *timeoutTicker) timeoutRoutine() {
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
t.Logger.Debug("Internal state machine timeout scheduled", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
case <-t.timer.C:
t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
t.Logger.Debug("Internal state machine timeout elapsed ", "duration", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine

View File

@@ -1,7 +1,7 @@
/*
Package evidence handles all evidence storage and gossiping from detection to block proposal.
For the different types of evidence refer to the `evidence.go` file in the types package
or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md.
or https://github.com/tendermint/tendermint/blob/v0.35.x/spec/consensus/light-client/accountability.md.
Gossiping

View File

@@ -57,3 +57,18 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
return r0
}
type mockConstructorTestingTNewBlockStore interface {
mock.TestingT
Cleanup(func())
}
// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore {
mock := &BlockStore{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,155 @@
// Package confix applies changes to a Tendermint TOML configuration file, to
// update configurations created with an older version of Tendermint to a
// compatible format for a newer version.
package confix
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"github.com/creachadair/atomicfile"
"github.com/creachadair/tomledit"
"github.com/creachadair/tomledit/transform"
"github.com/spf13/viper"
"github.com/tendermint/tendermint/config"
)
// Upgrade reads the configuration file at configPath and applies any
// transformations necessary to upgrade it to the current version. If this
// succeeds, the transformed output is written to outputPath. As a special
// case, if outputPath == "" the output is written to stdout.
//
// It is safe if outputPath == inputPath. If a regular file outputPath already
// exists, it is overwritten. In case of error, the output is not written.
//
// Upgrade is a convenience wrapper for calls to LoadConfig, ApplyFixes, and
// CheckValid. If the caller requires more control over the behavior of the
// upgrade, call those functions directly.
func Upgrade(ctx context.Context, configPath, outputPath string) error {
if configPath == "" {
return errors.New("empty input configuration path")
}
doc, err := LoadConfig(configPath)
if err != nil {
return fmt.Errorf("loading config: %v", err)
}
if err := ApplyFixes(ctx, doc); err != nil {
return fmt.Errorf("updating %q: %v", configPath, err)
}
var buf bytes.Buffer
if err := tomledit.Format(&buf, doc); err != nil {
return fmt.Errorf("formatting config: %v", err)
}
// Verify that Tendermint can parse the results after our edits.
if err := CheckValid(buf.Bytes()); err != nil {
return fmt.Errorf("updated config is invalid: %v", err)
}
if outputPath == "" {
_, err = os.Stdout.Write(buf.Bytes())
} else {
err = atomicfile.WriteData(outputPath, buf.Bytes(), 0600)
}
return err
}
// ApplyFixes transforms doc and reports whether it succeeded.
func ApplyFixes(ctx context.Context, doc *tomledit.Document) error {
// Check what version of Tendermint might have created this config file, as
// a safety check for the updates we are about to make.
tmVersion := GuessConfigVersion(doc)
if tmVersion == vUnknown {
return errors.New("cannot tell what Tendermint version created this config")
} else if tmVersion < v34 || tmVersion > v36 {
// TODO(creachadair): Add in rewrites for older versions. This will
// require some digging to discover what the changes were. The upgrade
// instructions do not give specifics.
return fmt.Errorf("unable to update version %s config", tmVersion)
}
return plan.Apply(ctx, doc)
}
// LoadConfig loads and parses the TOML document from path.
func LoadConfig(path string) (*tomledit.Document, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return tomledit.Parse(f)
}
const (
vUnknown = ""
v32 = "v0.32"
v33 = "v0.33"
v34 = "v0.34"
v35 = "v0.35"
v36 = "v0.36"
)
// GuessConfigVersion attempts to figure out which version of Tendermint
// created the specified config document. It returns "" if the creating version
// cannot be determined, otherwise a string of the form "vX.YY".
func GuessConfigVersion(doc *tomledit.Document) string {
hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil
hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only
if hasDisableWS && !hasUseLegacy {
return v36
}
hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35
hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34
if hasBlockSync && hasStateSync {
return v35
} else if hasStateSync {
return v34
}
hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33
hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33
if hasIndexKeys && !hasIndexTags {
return v33
}
hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32
if hasIndexTags && hasFastSync {
return v32
}
// Something older, probably.
return vUnknown
}
// CheckValid checks whether the specified config appears to be a valid
// Tendermint config file. This emulates how the node loads the config.
func CheckValid(data []byte) error {
v := viper.New()
v.SetConfigType("toml")
if err := v.ReadConfig(bytes.NewReader(data)); err != nil {
return fmt.Errorf("reading config: %w", err)
}
var cfg config.Config
if err := v.Unmarshal(&cfg); err != nil {
return fmt.Errorf("decoding config: %w", err)
}
return cfg.ValidateBasic()
}
// WithLogWriter returns a child of ctx with a logger attached that sends
// output to w. This is a convenience wrapper for transform.WithLogWriter.
func WithLogWriter(ctx context.Context, w io.Writer) context.Context {
return transform.WithLogWriter(ctx, w)
}

View File

@@ -0,0 +1,99 @@
package confix_test
import (
"bytes"
"context"
"strings"
"testing"
"github.com/creachadair/tomledit"
"github.com/google/go-cmp/cmp"
"github.com/tendermint/tendermint/internal/libs/confix"
)
func mustParseConfig(t *testing.T, path string) *tomledit.Document {
doc, err := confix.LoadConfig(path)
if err != nil {
t.Fatalf("Loading config: %v", err)
}
return doc
}
func TestGuessConfigVersion(t *testing.T) {
tests := []struct {
path, want string
}{
{"testdata/non-config.toml", ""},
{"testdata/v30-config.toml", ""},
{"testdata/v31-config.toml", ""},
{"testdata/v32-config.toml", "v0.32"},
{"testdata/v33-config.toml", "v0.33"},
{"testdata/v34-config.toml", "v0.34"},
{"testdata/v35-config.toml", "v0.35"},
{"testdata/v36-config.toml", "v0.36"},
}
for _, test := range tests {
t.Run(test.path, func(t *testing.T) {
got := confix.GuessConfigVersion(mustParseConfig(t, test.path))
if got != test.want {
t.Errorf("Wrong version: got %q, want %q", got, test.want)
}
})
}
}
func TestApplyFixes(t *testing.T) {
ctx := context.Background()
t.Run("Unknown", func(t *testing.T) {
err := confix.ApplyFixes(ctx, mustParseConfig(t, "testdata/v31-config.toml"))
if err == nil || !strings.Contains(err.Error(), "cannot tell what Tendermint version") {
t.Error("ApplyFixes succeeded, but should have failed for an unknown version")
}
})
t.Run("TooOld", func(t *testing.T) {
err := confix.ApplyFixes(ctx, mustParseConfig(t, "testdata/v33-config.toml"))
if err == nil || !strings.Contains(err.Error(), "unable to update version v0.33 config") {
t.Errorf("ApplyFixes: got %v, want version error", err)
}
})
t.Run("OK", func(t *testing.T) {
doc := mustParseConfig(t, "testdata/v34-config.toml")
if err := confix.ApplyFixes(ctx, doc); err != nil {
t.Fatalf("ApplyFixes: unexpected error: %v", err)
}
t.Run("Fixpoint", func(t *testing.T) {
// Verify that reapplying fixes to the same config succeeds, and does not
// make any additional changes.
var before bytes.Buffer
if err := tomledit.Format(&before, doc); err != nil {
t.Fatalf("Formatting document: %v", err)
}
if err := confix.CheckValid(before.Bytes()); err != nil {
t.Fatalf("Validating output: %v", err)
}
want := before.String()
// Re-parse the output from the first round of transformations.
doc2, err := tomledit.Parse(&before)
if err != nil {
t.Fatalf("Parsing fixed output: %v", err)
}
if err := confix.ApplyFixes(ctx, doc2); err != nil {
t.Fatalf("ApplyFixes: unexpected error: %v", err)
}
var after bytes.Buffer
if err := tomledit.Format(&after, doc2); err != nil {
t.Fatalf("Formatting document: %v", err)
}
got := after.String()
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("Reapplied fixes changed something: (-want, +got)\n%s", diff)
}
})
})
}

View File

@@ -0,0 +1,227 @@
package confix
import (
"context"
"errors"
"fmt"
"strings"
"github.com/creachadair/tomledit"
"github.com/creachadair/tomledit/parser"
"github.com/creachadair/tomledit/transform"
)
// The plan is the sequence of transformation steps that should be applied, in
// the given order, to convert a configuration file to be compatible with the
// current version of the config grammar.
//
// Transformation steps are specific to the target config version. For this
// reason, you must exercise caution when backporting changes to this script
// into older releases.
var plan = transform.Plan{
{
// Since https://github.com/tendermint/tendermint/pull/5777.
Desc: "Rename everything from snake_case to kebab-case",
T: transform.SnakeToKebab(),
},
{
// Since https://github.com/tendermint/tendermint/pull/6896.
Desc: "Rename [fastsync] to [blocksync]",
T: transform.Rename(parser.Key{"fastsync"}, parser.Key{"blocksync"}),
ErrorOK: true,
},
{
// Since https://github.com/tendermint/tendermint/pull/7159.
Desc: "Move top-level fast_sync key to blocksync.enable",
T: transform.MoveKey(
parser.Key{"fast-sync"},
parser.Key{"blocksync"},
parser.Key{"enable"},
),
ErrorOK: true,
},
{
// Since https://github.com/tendermint/tendermint/pull/6241.
Desc: `Add top-level mode setting (default "full")`,
T: transform.EnsureKey(nil, &parser.KeyValue{
Block: parser.Comments{"Mode of Node: full | validator | seed"},
Name: parser.Key{"mode"},
Value: parser.MustValue(`"full"`),
}),
ErrorOK: true,
},
{
// Since https://github.com/tendermint/tendermint/pull/6396.
Desc: "Remove vestigial mempool.wal-dir setting",
T: transform.Remove(parser.Key{"mempool", "wal-dir"}),
ErrorOK: true,
},
{
// Added in https://github.com/tendermint/tendermint/pull/6466.
Desc: `Add mempool.version default to "v1"`,
T: transform.EnsureKey(parser.Key{"mempool"}, &parser.KeyValue{
Block: parser.Comments{`Mempool version to use`},
Name: parser.Key{"version"},
Value: parser.MustValue(`"v1"`),
}),
ErrorOK: true,
},
{
// Since https://github.com/tendermint/tendermint/pull/6323.
Desc: "Add new [p2p] queue-type setting",
T: transform.EnsureKey(parser.Key{"p2p"}, &parser.KeyValue{
Block: parser.Comments{"Select the p2p internal queue"},
Name: parser.Key{"queue-type"},
Value: parser.MustValue(`"priority"`),
}),
ErrorOK: true,
},
{
// Since https://github.com/tendermint/tendermint/pull/6353.
Desc: "Add [p2p] connection count and rate limit settings",
T: transform.Func(func(_ context.Context, doc *tomledit.Document) error {
tab := transform.FindTable(doc, "p2p")
if tab == nil {
return errors.New("p2p table not found")
}
transform.InsertMapping(tab.Section, &parser.KeyValue{
Block: parser.Comments{"Maximum number of connections (inbound and outbound)."},
Name: parser.Key{"max-connections"},
Value: parser.MustValue("64"),
}, false)
transform.InsertMapping(tab.Section, &parser.KeyValue{
Block: parser.Comments{
"Rate limits the number of incoming connection attempts per IP address.",
},
Name: parser.Key{"max-incoming-connection-attempts"},
Value: parser.MustValue("100"),
}, false)
return nil
}),
},
{
// Added "chunk-fetchers" https://github.com/tendermint/tendermint/pull/6566.
// This value was backported into v0.34.11 (modulo casing).
// Renamed to "fetchers" https://github.com/tendermint/tendermint/pull/6587.
Desc: "Rename statesync.chunk-fetchers to statesync.fetchers",
T: transform.Func(func(ctx context.Context, doc *tomledit.Document) error {
// If the key already exists, rename it preserving its value.
if found := doc.First("statesync", "chunk-fetchers"); found != nil {
found.KeyValue.Name = parser.Key{"fetchers"}
return nil
}
// Otherwise, add it.
return transform.EnsureKey(parser.Key{"statesync"}, &parser.KeyValue{
Block: parser.Comments{
"The number of concurrent chunk and block fetchers to run (default: 4).",
},
Name: parser.Key{"fetchers"},
Value: parser.MustValue("4"),
})(ctx, doc)
}),
},
{
// Since https://github.com/tendermint/tendermint/pull/6807.
// Backported into v0.34.13 (modulo casing).
Desc: "Add statesync.use-p2p setting",
T: transform.EnsureKey(parser.Key{"statesync"}, &parser.KeyValue{
Block: parser.Comments{
"# State sync uses light client verification to verify state. This can be done either through the",
"# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer",
"# will be used.",
},
Name: parser.Key{"use-p2p"},
Value: parser.MustValue("false"),
}),
},
{
// v1 removed: https://github.com/tendermint/tendermint/pull/5728
// v2 deprecated: https://github.com/tendermint/tendermint/pull/6730
Desc: `Set blocksync.version to "v0"`,
T: transform.Func(func(_ context.Context, doc *tomledit.Document) error {
v := doc.First("blocksync", "version")
if v == nil {
return nil // nothing to do
} else if !v.IsMapping() {
// This shouldn't happen, but is easier to debug than a panic.
return fmt.Errorf("blocksync.version is weird: %v", v)
}
v.Value.X = parser.MustValue(`"v0"`).X
return nil
}),
},
{
// Since https://github.com/tendermint/tendermint/pull/6462.
Desc: "Move priv-validator settings under [priv-validator]",
T: transform.Func(func(_ context.Context, doc *tomledit.Document) error {
const pvPrefix = "priv-validator-"
var found []*tomledit.Entry
doc.Global.Scan(func(key parser.Key, e *tomledit.Entry) bool {
if len(key) == 1 && strings.HasPrefix(key[0], pvPrefix) {
found = append(found, e)
}
return true
})
if len(found) == 0 {
return nil // nothing to do
}
// Now that we know we have work to do, find the target table.
var sec *tomledit.Section
if dst := transform.FindTable(doc, "priv-validator"); dst == nil {
// If the table doesn't exist, create it. Old config files
// probably will not have it, so plug in the comment too.
sec = &tomledit.Section{
Heading: &parser.Heading{
Block: parser.Comments{
"#######################################################",
"### Priv Validator Configuration ###",
"#######################################################",
},
Name: parser.Key{"priv-validator"},
},
}
doc.Sections = append(doc.Sections, sec)
} else {
sec = dst.Section
}
for _, e := range found {
e.Remove()
e.Name = parser.Key{strings.TrimPrefix(e.Name[0], pvPrefix)}
sec.Items = append(sec.Items, e.KeyValue)
}
return nil
}),
},
{
// Since https://github.com/tendermint/tendermint/pull/6411.
Desc: "Convert tx-index.indexer from a string to a list of strings",
T: transform.Func(func(ctx context.Context, doc *tomledit.Document) error {
idx := doc.First("tx-index", "indexer")
if idx == nil {
// No previous indexer setting: Default to ["null"] per #8222.
return transform.EnsureKey(parser.Key{"tx-index"}, &parser.KeyValue{
Block: parser.Comments{"The backend database list to back the indexer."},
Name: parser.Key{"indexer"},
Value: parser.MustValue(`["null"]`),
})(ctx, doc)
}
// Versions prior to v0.35 had a string value here, v0.35 and onward
// use an array of strings.
switch idx.KeyValue.Value.X.(type) {
case parser.Array:
// OK, this is already up-to-date.
return nil
case parser.Token:
// Wrap the value in a single-element array.
idx.KeyValue.Value.X = parser.Array{idx.KeyValue.Value}
return nil
}
return fmt.Errorf("unrecognized value: %v", idx.KeyValue)
}),
},
}

52
internal/libs/confix/testdata/README.md vendored Normal file
View File

@@ -0,0 +1,52 @@
# Test data for `confix` and `condiff`
The files in this directory are stock Tendermint configuration files generated
by the last point release of each version series from v0.26 to present, along
with diffs between consecutive versions.
## Config Samples
The files named `vXX-config.toml` were generated by checking out and building
the corresponding version of Tendermint v0.xx.y and initializing a new node in
an empty home directory. The resulting `config.toml` file was copied here.
The exact build instructions vary a bit, but a general repro looks like:
```shell
# This example uses v0.31, substitute the version of your choice.
# Note that the branch names and tags may differ.
# Versions prior to v0.26 may not build.
git checkout v0.31.9
git clean -fdx
# Versions prior to v0.32 do not have Go module files.
# Those that do may need some dependencies manually updated.
go mod init github.com/tendermint/tendermint
go mod tidy
go get golang.org/x/sys
# Once you sort out the dependencies, this should usually work.
make build
# Confirm you go the version you expected, and generate the file.
./build/tendermint --home=tmhome version
./build/tendermint --home=tmhome init
# Copy the file out.
cp ./tmhome/config/config.toml v31-config.toml
```
## Version Diffs
The files named `diff-XX-YY.txt` were generated by using the `condiff` tool on
the config samples for versions v0.XX and v0.YY:
```shell
go run ./scripts/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt
```
The `baseline.txt` was computed in the same way, but using an empty starting
file so that we capture all the settings in the target:
```shell
go run ./scripts/condiff -desnake /dev/null v26-config.toml > baseline.txt
```

View File

@@ -0,0 +1,73 @@
+M abci
+M db-backend
+M db-dir
+M fast-sync
+M filter-peers
+M genesis-file
+M log-format
+M log-level
+M moniker
+M node-key-file
+M priv-validator-file
+M priv-validator-laddr
+M prof-laddr
+M proxy-app
+S consensus
+M consensus.wal-file
+M consensus.timeout-propose
+M consensus.timeout-propose-delta
+M consensus.timeout-prevote
+M consensus.timeout-prevote-delta
+M consensus.timeout-precommit
+M consensus.timeout-precommit-delta
+M consensus.timeout-commit
+M consensus.skip-timeout-commit
+M consensus.create-empty-blocks
+M consensus.create-empty-blocks-interval
+M consensus.peer-gossip-sleep-duration
+M consensus.peer-query-maj23-sleep-duration
+M consensus.blocktime-iota
+S instrumentation
+M instrumentation.prometheus
+M instrumentation.prometheus-listen-addr
+M instrumentation.max-open-connections
+M instrumentation.namespace
+S mempool
+M mempool.recheck
+M mempool.broadcast
+M mempool.wal-dir
+M mempool.size
+M mempool.cache-size
+S p2p
+M p2p.laddr
+M p2p.external-address
+M p2p.seeds
+M p2p.persistent-peers
+M p2p.upnp
+M p2p.addr-book-file
+M p2p.addr-book-strict
+M p2p.max-num-inbound-peers
+M p2p.max-num-outbound-peers
+M p2p.flush-throttle-timeout
+M p2p.max-packet-msg-payload-size
+M p2p.send-rate
+M p2p.recv-rate
+M p2p.pex
+M p2p.seed-mode
+M p2p.private-peer-ids
+M p2p.allow-duplicate-ip
+M p2p.handshake-timeout
+M p2p.dial-timeout
+S rpc
+M rpc.laddr
+M rpc.cors-allowed-origins
+M rpc.cors-allowed-methods
+M rpc.cors-allowed-headers
+M rpc.grpc-laddr
+M rpc.grpc-max-open-connections
+M rpc.unsafe
+M rpc.max-open-connections
+S tx-index
+M tx-index.indexer
+M tx-index.index-tags
+M tx-index.index-all-tags

View File

View File

@@ -0,0 +1,3 @@
-M priv-validator-file
+M priv-validator-key-file
+M priv-validator-state-file

View File

View File

View File

@@ -0,0 +1,7 @@
-M consensus.blocktime-iota
+M mempool.max-txs-bytes
+M rpc.max-subscription-clients
+M rpc.max-subscriptions-per-client
+M rpc.timeout-broadcast-tx-commit
+M rpc.tls-cert-file
+M rpc.tls-key-file

View File

@@ -0,0 +1,5 @@
+S fastsync
+M fastsync.version
+M mempool.max-tx-bytes
+M rpc.max-body-bytes
+M rpc.max-header-bytes

View File

@@ -0,0 +1,6 @@
+M p2p.persistent-peers-max-dial-period
+M p2p.unconditional-peer-ids
+M tx-index.index-all-keys
-M tx-index.index-all-tags
+M tx-index.index-keys
-M tx-index.index-tags

View File

@@ -0,0 +1,20 @@
-M prof-laddr
+M consensus.double-sign-check-height
+M mempool.keep-invalid-txs-in-cache
+M mempool.max-batch-bytes
+M rpc.experimental-close-on-slow-client
+M rpc.experimental-subscription-buffer-size
+M rpc.experimental-websocket-write-buffer-size
+M rpc.pprof-laddr
+S statesync
+M statesync.enable
+M statesync.rpc-servers
+M statesync.trust-height
+M statesync.trust-hash
+M statesync.trust-period
+M statesync.discovery-time
+M statesync.temp-dir
+M statesync.chunk-request-timeout
+M statesync.chunk-fetchers
-M tx-index.index-all-keys
-M tx-index.index-keys

View File

@@ -0,0 +1,28 @@
-M fast-sync
+M mode
-M priv-validator-key-file
-M priv-validator-laddr
-M priv-validator-state-file
+S blocksync
+M blocksync.enable
+M blocksync.version
-S fastsync
-M fastsync.version
-M mempool.wal-dir
+M p2p.bootstrap-peers
+M p2p.max-connections
+M p2p.max-incoming-connection-attempts
+M p2p.max-outgoing-connections
+M p2p.queue-type
-M p2p.seed-mode
+M p2p.use-legacy
+S priv-validator
+M priv-validator.key-file
+M priv-validator.state-file
+M priv-validator.laddr
+M priv-validator.client-certificate-file
+M priv-validator.client-key-file
+M priv-validator.root-ca-file
-M statesync.chunk-fetchers
+M statesync.fetchers
+M statesync.use-p2p

View File

@@ -0,0 +1,29 @@
-S blocksync
-M blocksync.enable
-M blocksync.version
-M consensus.skip-timeout-commit
-M consensus.timeout-commit
-M consensus.timeout-precommit
-M consensus.timeout-precommit-delta
-M consensus.timeout-prevote
-M consensus.timeout-prevote-delta
-M consensus.timeout-propose
-M consensus.timeout-propose-delta
-M mempool.recheck
-M mempool.version
-M p2p.addr-book-file
-M p2p.addr-book-strict
-M p2p.max-num-inbound-peers
-M p2p.max-num-outbound-peers
-M p2p.persistent-peers-max-dial-period
-M p2p.seeds
-M p2p.unconditional-peer-ids
-M p2p.use-legacy
+M rpc.event-log-max-items
+M rpc.event-log-window-size
-M rpc.experimental-close-on-slow-client
+M rpc.experimental-disable-websocket
-M rpc.experimental-subscription-buffer-size
-M rpc.experimental-websocket-write-buffer-size
-M rpc.grpc-laddr
-M rpc.grpc-max-open-connections

View File

@@ -0,0 +1,6 @@
# This is not a Tendermint config file.
[ test ]
key = 'value'
# Nothing to see here, move along.

View File

@@ -0,0 +1,249 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb | cleveldb
db_backend = "leveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "config/priv_validator.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = "[]"
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = "[HEAD GET POST]"
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = "[Origin Accept Content-Type X-Requested-With X-Server-Time]"
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = true
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# size of the mempool
size = 5000
# size of the cache (used to filter transactions we saw earlier)
cache_size = 10000
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
# Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
blocktime_iota = "1s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null" (default)
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,249 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb | cleveldb
db_backend = "leveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "config/priv_validator.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = true
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# size of the mempool
size = 5000
# size of the cache (used to filter transactions we saw earlier)
cache_size = 10000
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
# Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
blocktime_iota = "1s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,252 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb | cleveldb
db_backend = "leveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# size of the mempool
size = 5000
# size of the cache (used to filter transactions we saw earlier)
cache_size = 10000
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
# Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
blocktime_iota = "1s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,252 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb | cleveldb
db_backend = "leveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# size of the mempool
size = 5000
# size of the cache (used to filter transactions we saw earlier)
cache_size = 10000
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
# Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
blocktime_iota = "1s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,252 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: leveldb | memdb | cleveldb
db_backend = "leveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# size of the mempool
size = 5000
# size of the cache (used to filter transactions we saw earlier)
cache_size = 10000
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
# Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
blocktime_iota = "1s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,292 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: goleveldb | cleveldb | boltdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
db_backend = "goleveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max_subscription_clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max_subscriptions_per_client = 5
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout_broadcast_tx_commit = "10s"
# The name of a file containing certificate that is used to create the HTTPS server.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
tls_cert_file = ""
# The name of a file containing matching private key that is used to create the HTTPS server.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
tls_key_file = ""
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
max_txs_bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache_size = 10000
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,319 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
# relative to the home directory (e.g. "data"). The home directory is
# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable
# or --home cmd flag.
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: goleveldb | cleveldb | boltdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
db_backend = "goleveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://127.0.0.1:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max_subscription_clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max_subscriptions_per_client = 5
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout_broadcast_tx_commit = "10s"
# Maximum size of request body, in bytes
max_body_bytes = 1000000
# Maximum size of request header, in bytes
max_header_bytes = 1048576
# The path to a file containing certificate that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_cert_file = ""
# The path to a file containing matching private key that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_key_file = ""
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
max_txs_bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache_size = 10000
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
max_tx_bytes = 1048576
##### fast sync configuration options #####
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v1" - refactor of v0 version for better testability
version = "v0"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,335 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
# relative to the home directory (e.g. "data"). The home directory is
# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable
# or --home cmd flag.
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: goleveldb | cleveldb | boltdb | rocksdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
# * rocksdb (uses github.com/tecbot/gorocksdb)
# - EXPERIMENTAL
# - requires gcc
# - use rocksdb build tag (go build -tags rocksdb)
db_backend = "goleveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = ""
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://127.0.0.1:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max_subscription_clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max_subscriptions_per_client = 5
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout_broadcast_tx_commit = "10s"
# Maximum size of request body, in bytes
max_body_bytes = 1000000
# Maximum size of request header, in bytes
max_header_bytes = 1048576
# The path to a file containing certificate that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_cert_file = ""
# The path to a file containing matching private key that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_key_file = ""
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
unconditional_peer_ids = ""
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
persistent_peers_max_dial_period = "0s"
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
max_txs_bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache_size = 10000
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
max_tx_bytes = 1048576
##### fast sync configuration options #####
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v1" - refactor of v0 version for better testability
# 3) "v2" - refactor of v1 version for better usability
version = "v0"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "1s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of compositeKeys to index (by default the only key is "tx.hash")
# Remember that Event has the following structure: type.key
# type: [
# key: value,
# ...
# ]
#
# You can also index transactions by height by adding "tx.height" key here.
#
# It's recommended to index only a subset of keys due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_keys = ""
# When set to true, tells indexer to index all compositeKeys (predefined keys:
# "tx.hash", "tx.height" and all keys from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexKeys has a
# precedence over IndexAllKeys (i.e. when given both, IndexKeys will be
# indexed).
index_all_keys = false
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,455 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
# relative to the home directory (e.g. "data"). The home directory is
# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable
# or --home cmd flag.
#######################################################################
### Main Base Config Options ###
#######################################################################
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
# * rocksdb (uses github.com/tecbot/gorocksdb)
# - EXPERIMENTAL
# - requires gcc
# - use rocksdb build tag (go build -tags rocksdb)
# * badgerdb (uses github.com/dgraph-io/badger)
# - EXPERIMENTAL
# - use badgerdb build tag (go build -tags badgerdb)
db_backend = "goleveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "info"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
#######################################################################
### Advanced Configuration Options ###
#######################################################################
#######################################################
### RPC Server Configuration Options ###
#######################################################
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://127.0.0.1:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max_subscription_clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max_subscriptions_per_client = 5
# Experimental parameter to specify the maximum number of events a node will
# buffer, per subscription, before returning an error and closing the
# subscription. Must be set to at least 100, but higher values will accommodate
# higher event throughput rates (and will use more memory).
experimental_subscription_buffer_size = 200
# Experimental parameter to specify the maximum number of RPC responses that
# can be buffered per WebSocket client. If clients cannot read from the
# WebSocket endpoint fast enough, they will be disconnected, so increasing this
# parameter may reduce the chances of them being disconnected (but will cause
# the node to use more memory).
#
# Must be at least the same as "experimental_subscription_buffer_size",
# otherwise connections could be dropped unnecessarily. This value should
# ideally be somewhat higher than "experimental_subscription_buffer_size" to
# accommodate non-subscription-related RPC responses.
experimental_websocket_write_buffer_size = 200
# If a WebSocket client cannot read fast enough, at present we may
# silently drop events instead of generating an error or disconnecting the
# client.
#
# Enabling this experimental parameter will cause the WebSocket connection to
# be closed instead if it cannot read fast enough, allowing for greater
# predictability in subscription behaviour.
experimental_close_on_slow_client = false
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout_broadcast_tx_commit = "10s"
# Maximum size of request body, in bytes
max_body_bytes = 1000000
# Maximum size of request header, in bytes
max_header_bytes = 1048576
# The path to a file containing certificate that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_cert_file = ""
# The path to a file containing matching private key that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_key_file = ""
# pprof listen address (https://golang.org/pkg/net/http/pprof)
pprof_laddr = ""
#######################################################
### P2P Configuration Options ###
#######################################################
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
external_address = ""
# Comma separated list of seed nodes to connect to
seeds = ""
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
unconditional_peer_ids = ""
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
persistent_peers_max_dial_period = "0s"
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
#######################################################
### Mempool Configuration Option ###
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - (default) FIFO mempool.
# 2) "v1" - prioritized mempool.
version = "v0"
recheck = true
broadcast = true
wal_dir = ""
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
max_txs_bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache_size = 10000
# Do not remove invalid transactions from the cache (default: false)
# Set to true if it's not possible for any invalid transaction to become valid
# again in the future.
keep-invalid-txs-in-cache = false
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}.
max_tx_bytes = 1048576
# Maximum size of a batch of transactions to send to a peer
# Including space needed by encoding (one varint per transaction).
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max_batch_bytes = 0
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "0s"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = 0
#######################################################
### State Sync Configuration Options ###
#######################################################
[statesync]
# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine
# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in
# the network to take and serve state machine snapshots. State sync is not attempted if the node
# has any local state (LastBlockHeight > 0). The node will have a truncated block history,
# starting from the height of the snapshot.
enable = false
# RPC servers (comma-separated) for light client verification of the synced state machine and
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
# header hash obtained from a trusted source, and a period during which validators can be trusted.
#
# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2
# weeks) during which they can be financially punished (slashed) for misbehavior.
rpc_servers = ""
trust_height = 0
trust_hash = ""
trust_period = "168h0m0s"
# Time to spend discovering snapshots before initiating a restore.
discovery_time = "15s"
# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp).
# Will create a new, randomly named directory within, and remove it when done.
temp_dir = ""
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 1 minute).
chunk_request_timeout = "10s"
# The number of concurrent chunk fetchers to run (default: 1).
chunk_fetchers = "4"
#######################################################
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v1" - refactor of v0 version for better testability
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
#######################################################
### Consensus Configuration Options ###
#######################################################
[consensus]
wal_file = "data/cs.wal/wal"
# How long we wait for a proposal block before prevoting nil
timeout_propose = "3s"
# How much timeout_propose increases with each round
timeout_propose_delta = "500ms"
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
timeout_prevote = "1s"
# How much the timeout_prevote increases with each round
timeout_prevote_delta = "500ms"
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
timeout_precommit = "1s"
# How much the timeout_precommit increases with each round
timeout_precommit_delta = "500ms"
# How long we wait after committing a block, before starting on the new
# height (this gives us a chance to receive some more precommits, even
# though we already have +2/3).
timeout_commit = "1s"
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
# if the same consensus key was used to sign {double_sign_check_height} last blocks.
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
double_sign_check_height = 0
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
[tx_index]
# What indexer to use for transactions
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = "kv"
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = ""
#######################################################
### Instrumentation Configuration Options ###
#######################################################
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,533 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
# relative to the home directory (e.g. "data"). The home directory is
# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable
# or --home cmd flag.
#######################################################################
### Main Base Config Options ###
#######################################################################
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# Mode of Node: full | validator | seed
# * validator node
# - all reactors
# - with priv_validator_key.json, priv_validator_state.json
# * full node
# - all reactors
# - No priv_validator_key.json, priv_validator_state.json
# * seed node
# - only P2P, PEX Reactor
# - No priv_validator_key.json, priv_validator_state.json
mode = "validator"
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
# * rocksdb (uses github.com/tecbot/gorocksdb)
# - EXPERIMENTAL
# - requires gcc
# - use rocksdb build tag (go build -tags rocksdb)
# * badgerdb (uses github.com/dgraph-io/badger)
# - EXPERIMENTAL
# - use badgerdb build tag (go build -tags badgerdb)
db-backend = "goleveldb"
# Database directory
db-dir = "data"
# Output level for logging, including package level options
log-level = "info"
# Output format: 'plain' (colored text) or 'json'
log-format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis-file = "config/genesis.json"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node-key-file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter-peers = false
#######################################################
### Priv Validator Configuration ###
#######################################################
[priv-validator]
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
key-file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
state-file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
laddr = ""
# Path to the client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
client-certificate-file = ""
# Client key generated while creating certificates for secure connection
client-key-file = ""
# Path to the Root Certificate Authority used to sign both client and server certificates
root-ca-file = ""
#######################################################################
### Advanced Configuration Options ###
#######################################################################
#######################################################
### RPC Server Configuration Options ###
#######################################################
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://127.0.0.1:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors-allowed-origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors-allowed-methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-max-open-connections = 900
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc-max-open-connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max-open-connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max-subscription-clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max-subscriptions-per-client = 5
# Experimental parameter to specify the maximum number of events a node will
# buffer, per subscription, before returning an error and closing the
# subscription. Must be set to at least 100, but higher values will accommodate
# higher event throughput rates (and will use more memory).
experimental-subscription-buffer-size = 200
# Experimental parameter to specify the maximum number of RPC responses that
# can be buffered per WebSocket client. If clients cannot read from the
# WebSocket endpoint fast enough, they will be disconnected, so increasing this
# parameter may reduce the chances of them being disconnected (but will cause
# the node to use more memory).
#
# Must be at least the same as "experimental-subscription-buffer-size",
# otherwise connections could be dropped unnecessarily. This value should
# ideally be somewhat higher than "experimental-subscription-buffer-size" to
# accommodate non-subscription-related RPC responses.
experimental-websocket-write-buffer-size = 200
# If a WebSocket client cannot read fast enough, at present we may
# silently drop events instead of generating an error or disconnecting the
# client.
#
# Enabling this experimental parameter will cause the WebSocket connection to
# be closed instead if it cannot read fast enough, allowing for greater
# predictability in subscription behavior.
experimental-close-on-slow-client = false
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout-broadcast-tx-commit = "10s"
# Maximum size of request body, in bytes
max-body-bytes = 1000000
# Maximum size of request header, in bytes
max-header-bytes = 1048576
# The path to a file containing certificate that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls-cert-file = ""
# The path to a file containing matching private key that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls-key-file = ""
# pprof listen address (https://golang.org/pkg/net/http/pprof)
pprof-laddr = ""
#######################################################
### P2P Configuration Options ###
#######################################################
[p2p]
# Enable the legacy p2p layer.
use-legacy = false
# Select the p2p internal queue.
# Options are: "fifo", "simple-priority", "priority", and "wdrr"
# with the default being "priority".
queue-type = "priority"
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
external-address = ""
# Comma separated list of seed nodes to connect to
# We only use these if we cant connect to peers in the addrbook
# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
seeds = ""
# Comma separated list of peers to be added to the peer store
# on startup. Either BootstrapPeers or PersistentPeers are
# needed for peer discovery
bootstrap-peers = ""
# Comma separated list of nodes to keep persistent connections to
persistent-peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
# TODO: Remove once p2p refactor is complete in favor of peer store.
addr-book-file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr-book-strict = true
# Maximum number of inbound peers
#
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
# ref: https://github.com/tendermint/tendermint/issues/5670
max-num-inbound-peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
#
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
# ref: https://github.com/tendermint/tendermint/issues/5670
max-num-outbound-peers = 10
# Maximum number of connections (inbound and outbound).
max-connections = 64
# Maximum number of connections reserved for outgoing
# connections. Must be less than max-connections
max-outgoing-connections = 12
# Rate limits the number of incoming connection attempts per IP address.
max-incoming-connection-attempts = 100
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
# TODO: Remove once p2p refactor is complete.
# ref: https://github.com/tendermint/tendermint/issues/5670
unconditional-peer-ids = ""
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
persistent-peers-max-dial-period = "0s"
# Time to wait before flushing messages out on the connection
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
flush-throttle-timeout = "100ms"
# Maximum size of a message packet payload, in bytes
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
max-packet-msg-payload-size = 1400
# Rate at which packets can be sent, in bytes/second
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
send-rate = 5120000
# Rate at which packets can be received, in bytes/second
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
recv-rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
private-peer-ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow-duplicate-ip = false
# Peer connection configuration.
handshake-timeout = "20s"
dial-timeout = "3s"
#######################################################
### Mempool Configuration Option ###
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - The legacy non-prioritized mempool reactor.
# 2) "v1" (default) - The prioritized mempool reactor.
version = "v1"
recheck = true
broadcast = true
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max-txs-bytes=5MB, mempool will only accept 5 transactions).
max-txs-bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache-size = 10000
# Do not remove invalid transactions from the cache (default: false)
# Set to true if it's not possible for any invalid transaction to become valid
# again in the future.
keep-invalid-txs-in-cache = false
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
max-tx-bytes = 1048576
# Maximum size of a batch of transactions to send to a peer
# Including space needed by encoding (one varint per transaction).
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = 0
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "0s"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = 0
#######################################################
### State Sync Configuration Options ###
#######################################################
[statesync]
# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine
# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in
# the network to take and serve state machine snapshots. State sync is not attempted if the node
# has any local state (LastBlockHeight > 0). The node will have a truncated block history,
# starting from the height of the snapshot.
enable = false
# State sync uses light client verification to verify state. This can be done either through the
# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer
# will be used.
use-p2p = false
# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial,
# for example: "host.example.com:2125"
rpc-servers = ""
# The hash and height of a trusted block. Must be within the trust-period.
trust-height = 0
trust-hash = ""
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
# period should suffice.
trust-period = "168h0m0s"
# Time to spend discovering snapshots before initiating a restore.
discovery-time = "15s"
# Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
# The synchronizer will create a new, randomly named directory within this directory
# and remove it when the sync is complete.
temp-dir = ""
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 15 seconds).
chunk-request-timeout = "15s"
# The number of concurrent chunk and block fetchers to run (default: 4).
fetchers = "4"
#######################################################
### Block Sync Configuration Connections ###
#######################################################
[blocksync]
# If this node is many blocks behind the tip of the chain, BlockSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
enable = true
# Block Sync version to use:
# 1) "v0" (default) - the standard Block Sync implementation
# 2) "v2" - DEPRECATED, please use v0
version = "v0"
#######################################################
### Consensus Configuration Options ###
#######################################################
[consensus]
wal-file = "data/cs.wal/wal"
# How long we wait for a proposal block before prevoting nil
timeout-propose = "3s"
# How much timeout-propose increases with each round
timeout-propose-delta = "500ms"
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
timeout-prevote = "1s"
# How much the timeout-prevote increases with each round
timeout-prevote-delta = "500ms"
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
timeout-precommit = "1s"
# How much the timeout-precommit increases with each round
timeout-precommit-delta = "500ms"
# How long we wait after committing a block, before starting on the new
# height (this gives us a chance to receive some more precommits, even
# though we already have +2/3).
timeout-commit = "1s"
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
double-sign-check-height = 0
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip-timeout-commit = false
# EmptyBlocks mode and possible interval between empty blocks
create-empty-blocks = true
create-empty-blocks-interval = "0s"
# Reactor sleep duration parameters
peer-gossip-sleep-duration = "100ms"
peer-query-maj23-sleep-duration = "2s"
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
[tx-index]
# The backend database list to back the indexer.
# If list contains "null" or "", meaning no indexer service will be used.
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = ["kv"]
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = ""
#######################################################
### Instrumentation Configuration Options ###
#######################################################
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus-listen-addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max-open-connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -0,0 +1,485 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
# relative to the home directory (e.g. "data"). The home directory is
# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable
# or --home cmd flag.
#######################################################################
### Main Base Config Options ###
#######################################################################
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "localhost"
# Mode of Node: full | validator | seed
# * validator node
# - all reactors
# - with priv_validator_key.json, priv_validator_state.json
# * full node
# - all reactors
# - No priv_validator_key.json, priv_validator_state.json
# * seed node
# - only P2P, PEX Reactor
# - No priv_validator_key.json, priv_validator_state.json
mode = "validator"
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
# * rocksdb (uses github.com/tecbot/gorocksdb)
# - EXPERIMENTAL
# - requires gcc
# - use rocksdb build tag (go build -tags rocksdb)
# * badgerdb (uses github.com/dgraph-io/badger)
# - EXPERIMENTAL
# - use badgerdb build tag (go build -tags badgerdb)
db-backend = "goleveldb"
# Database directory
db-dir = "data"
# Output level for logging, including package level options
log-level = "info"
# Output format: 'plain' (colored text) or 'json'
log-format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis-file = "config/genesis.json"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node-key-file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter-peers = false
#######################################################
### Priv Validator Configuration ###
#######################################################
[priv-validator]
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
key-file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
state-file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
laddr = ""
# Path to the client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
client-certificate-file = ""
# Client key generated while creating certificates for secure connection
client-key-file = ""
# Path to the Root Certificate Authority used to sign both client and server certificates
root-ca-file = ""
#######################################################################
### Advanced Configuration Options ###
#######################################################################
#######################################################
### RPC Server Configuration Options ###
#######################################################
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://127.0.0.1:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors-allowed-origins = []
# A list of methods the client is allowed to use with cross-domain requests
cors-allowed-methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max-open-connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max-subscription-clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using a Local RPC client and /broadcast_tx_commit, set this
# to the estimated maximum number of broadcast_tx_commit calls per block.
max-subscriptions-per-client = 5
# If true, disable the websocket interface to the RPC service. This has
# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
# methods for event subscription.
#
# EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
experimental-disable-websocket = false
# The time window size for the event log. All events up to this long before
# the latest (up to EventLogMaxItems) will be available for subscribers to
# fetch via the /events method. If 0 (the default) the event log and the
# /events RPC method are disabled.
event-log-window-size = "0s"
# The maxiumum number of events that may be retained by the event log. If
# this value is 0, no upper limit is set. Otherwise, items in excess of
# this number will be discarded from the event log.
#
# Warning: This setting is a safety valve. Setting it too low may cause
# subscribers to miss events. Try to choose a value higher than the
# maximum worst-case expected event load within the chosen window size in
# ordinary operation.
#
# For example, if the window size is 10 minutes and the node typically
# averages 1000 events per ten minutes, but with occasional known spikes of
# up to 2000, choose a value > 2000.
event-log-max-items = 0
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout-broadcast-tx-commit = "10s"
# Maximum size of request body, in bytes
max-body-bytes = 1000000
# Maximum size of request header, in bytes
max-header-bytes = 1048576
# The path to a file containing certificate that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls-cert-file = ""
# The path to a file containing matching private key that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls-key-file = ""
# pprof listen address (https://golang.org/pkg/net/http/pprof)
pprof-laddr = ""
#######################################################
### P2P Configuration Options ###
#######################################################
[p2p]
# Select the p2p internal queue.
# Options are: "fifo", "simple-priority", and "priority",
# with the default being "priority".
queue-type = "simple-priority"
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
external-address = ""
# Comma separated list of seed nodes to connect to
# We only use these if we cant connect to peers in the addrbook
# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
seeds = ""
# Comma separated list of peers to be added to the peer store
# on startup. Either BootstrapPeers or PersistentPeers are
# needed for peer discovery
bootstrap-peers = ""
# Comma separated list of nodes to keep persistent connections to
persistent-peers = ""
# UPNP port forwarding
upnp = false
# Maximum number of connections (inbound and outbound).
max-connections = 64
# Maximum number of connections reserved for outgoing
# connections. Must be less than max-connections
max-outgoing-connections = 12
# Rate limits the number of incoming connection attempts per IP address.
max-incoming-connection-attempts = 100
# Set true to enable the peer-exchange reactor
pex = true
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
private-peer-ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow-duplicate-ip = false
# Peer connection configuration.
handshake-timeout = "20s"
dial-timeout = "3s"
# Time to wait before flushing messages out on the connection
# TODO: Remove once MConnConnection is removed.
flush-throttle-timeout = "100ms"
# Maximum size of a message packet payload, in bytes
# TODO: Remove once MConnConnection is removed.
max-packet-msg-payload-size = 1400
# Rate at which packets can be sent, in bytes/second
# TODO: Remove once MConnConnection is removed.
send-rate = 5120000
# Rate at which packets can be received, in bytes/second
# TODO: Remove once MConnConnection is removed.
recv-rate = 5120000
#######################################################
### Mempool Configuration Option ###
#######################################################
[mempool]
recheck = true
broadcast = true
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max-txs-bytes=5MB, mempool will only accept 5 transactions).
max-txs-bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache-size = 10000
# Do not remove invalid transactions from the cache (default: false)
# Set to true if it's not possible for any invalid transaction to become valid
# again in the future.
keep-invalid-txs-in-cache = false
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
max-tx-bytes = 1048576
# Maximum size of a batch of transactions to send to a peer
# Including space needed by encoding (one varint per transaction).
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = 0
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "0s"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = 0
#######################################################
### State Sync Configuration Options ###
#######################################################
[statesync]
# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine
# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in
# the network to take and serve state machine snapshots. State sync is not attempted if the node
# has any local state (LastBlockHeight > 0). The node will have a truncated block history,
# starting from the height of the snapshot.
enable = false
# State sync uses light client verification to verify state. This can be done either through the
# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer
# will be used.
use-p2p = false
# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial,
# for example: "host.example.com:2125"
rpc-servers = ""
# The hash and height of a trusted block. Must be within the trust-period.
trust-height = 0
trust-hash = ""
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
# period should suffice.
trust-period = "168h0m0s"
# Time to spend discovering snapshots before initiating a restore.
discovery-time = "15s"
# Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
# The synchronizer will create a new, randomly named directory within this directory
# and remove it when the sync is complete.
temp-dir = ""
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 15 seconds).
chunk-request-timeout = "15s"
# The number of concurrent chunk and block fetchers to run (default: 4).
fetchers = "4"
#######################################################
### Consensus Configuration Options ###
#######################################################
[consensus]
wal-file = "data/cs.wal/wal"
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
double-sign-check-height = 0
# EmptyBlocks mode and possible interval between empty blocks
create-empty-blocks = true
create-empty-blocks-interval = "0s"
# Reactor sleep duration parameters
peer-gossip-sleep-duration = "100ms"
peer-query-maj23-sleep-duration = "2s"
### Unsafe Timeout Overrides ###
# These fields provide temporary overrides for the Timeout consensus parameters.
# Use of these parameters is strongly discouraged. Using these parameters may have serious
# liveness implications for the validator and for the chain.
#
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
# For additional information, see ADR-74:
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
# This field provides an unsafe override of the Propose timeout consensus parameter.
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-propose-timeout-override = 0s
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
# This field configures how much the propose timeout increases with each round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-propose-timeout-delta-override = 0s
# This field provides an unsafe override of the Vote timeout consensus parameter.
# This field configures how long the consensus engine will wait after
# receiving +2/3 votes in a round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-vote-timeout-override = 0s
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
# This field configures how much the vote timeout increases with each round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-vote-timeout-delta-override = 0s
# This field provides an unsafe override of the Commit timeout consensus parameter.
# This field configures how long the consensus engine will wait after receiving
# +2/3 precommits before beginning the next height.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-commit-timeout-override = 0s
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
# This field configures if the consensus engine will wait for the full Commit timeout
# before proceeding to the next height.
# If this field is set to true, the consensus engine will proceed to the next height
# as soon as the node has gathered votes from all of the validators on the network.
# unsafe-bypass-commit-timeout-override =
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
[tx-index]
# The backend database list to back the indexer.
# If list contains "null" or "", meaning no indexer service will be used.
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
#
# Options:
# 1) "null" (default) - no indexer services.
# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = ["null"]
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = ""
#######################################################
### Instrumentation Configuration Options ###
#######################################################
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus-listen-addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max-open-connections = 3
# Instrumentation namespace
namespace = "tendermint"

View File

@@ -22,6 +22,10 @@ type TxCache interface {
// Remove removes the given raw transaction from the cache.
Remove(tx types.Tx)
// Has reports whether tx is present in the cache. Checking for presence is
// not treated as an access of the value.
Has(tx types.Tx) bool
}
var _ TxCache = (*LRUTxCache)(nil)
@@ -97,6 +101,14 @@ func (c *LRUTxCache) Remove(tx types.Tx) {
}
}
func (c *LRUTxCache) Has(tx types.Tx) bool {
c.mtx.Lock()
defer c.mtx.Unlock()
_, ok := c.cacheMap[tx.Key()]
return ok
}
// NopTxCache defines a no-op raw transaction cache.
type NopTxCache struct{}
@@ -105,3 +117,4 @@ var _ TxCache = (*NopTxCache)(nil)
func (NopTxCache) Reset() {}
func (NopTxCache) Push(types.Tx) bool { return true }
func (NopTxCache) Remove(types.Tx) {}
func (NopTxCache) Has(types.Tx) bool { return false }

View File

@@ -242,17 +242,13 @@ func (mem *CListMempool) CheckTx(
// so we only record the sender for txs still in the mempool.
if e, ok := mem.txsMap.Load(tx.Key()); ok {
memTx := e.(*clist.CElement).Value.(*mempoolTx)
_, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true)
memTx.senders.LoadOrStore(txInfo.SenderID, true)
// TODO: consider punishing peer for dups,
// its non-trivial since invalid txs can become valid,
// but they can spam the same tx with little cost to them atm.
if loaded {
return types.ErrTxInCache
}
}
mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash())
return nil
return types.ErrTxInCache
}
if ctx == nil {

View File

@@ -200,7 +200,7 @@ func TestMempoolUpdate(t *testing.T) {
err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{})
require.NoError(t, err)
assert.Error(t, err)
}
// 2. Removes valid txs from the mempool
@@ -248,13 +248,13 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
for _, tx := range txs {
reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx}))
reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK})
// SetDone allows the ReqRes to process its callback synchronously.
// This simulates the Response being ready for the client immediately.
reqRes.SetDone()
mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil)
err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})
require.NoError(t, err)
// ensure that the callback that the mempool sets on the ReqRes is run.
reqRes.InvokeCallback()
}
// Calling update to remove the first transaction from the mempool.
@@ -305,11 +305,15 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
// a must be added to the cache
err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{})
require.NoError(t, err)
if assert.Error(t, err) {
assert.Equal(t, types.ErrTxInCache, err)
}
// b must remain in the cache
err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{})
require.NoError(t, err)
if assert.Error(t, err) {
assert.Equal(t, types.ErrTxInCache, err)
}
}
// 2. An invalid transaction must remain in the cache

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/clist"
@@ -24,13 +23,6 @@ var (
_ p2p.Wrapper = (*protomem.Message)(nil)
)
// PeerManager defines the interface contract required for getting necessary
// peer information. This should eventually be replaced with a message-oriented
// approach utilizing the p2p stack.
type PeerManager interface {
GetHeight(types.NodeID) int64
}
// Reactor implements a service that contains mempool of txs that are broadcasted
// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping
// txs to the peers you received it from.
@@ -41,11 +33,6 @@ type Reactor struct {
mempool *CListMempool
ids *mempool.MempoolIDs
// XXX: Currently, this is the only way to get information about a peer. Ideally,
// we rely on message-oriented communication to get necessary peer data.
// ref: https://github.com/tendermint/tendermint/issues/5670
peerMgr PeerManager
mempoolCh *p2p.Channel
peerUpdates *p2p.PeerUpdates
closeCh chan struct{}
@@ -62,7 +49,6 @@ type Reactor struct {
func NewReactor(
logger log.Logger,
cfg *config.MempoolConfig,
peerMgr PeerManager,
mp *CListMempool,
mempoolCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
@@ -70,7 +56,6 @@ func NewReactor(
r := &Reactor{
cfg: cfg,
peerMgr: peerMgr,
mempool: mp,
ids: mempool.NewMempoolIDs(),
mempoolCh: mempoolCh,
@@ -171,6 +156,15 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
for _, tx := range protoTxs {
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
if errors.Is(err, types.ErrTxInCache) {
// if the tx is in the cache,
// then we've been gossiped a
// Tx that we've already
// got. Gossip should be
// smarter, but it's not a
// problem.
continue
}
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
}
}
@@ -355,15 +349,6 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
memTx := next.Value.(*mempoolTx)
if r.peerMgr != nil {
height := r.peerMgr.GetHeight(peerID)
if height > 0 && height < memTx.Height()-1 {
// allow for a lag of one block
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
}
// NOTE: Transaction batching was disabled due to:
// https://github.com/tendermint/tendermint/issues/5796

View File

@@ -70,7 +70,6 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint)
rts.reactors[nodeID] = NewReactor(
rts.logger.With("nodeID", nodeID),
config,
rts.network.Nodes[nodeID].PeerManager,
mempool,
rts.mempoolChnnels[nodeID],
rts.peerUpdates[nodeID],

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More