Compare commits

..

222 Commits

Author SHA1 Message Date
William Banfield
9997fa6928 test: install abci-cli when running make tests_integrations 2021-08-17 11:27:33 -04:00
Tess Rinearson
3dff227c5b changelog: prepare for v0.34.12 (#6831) 2021-08-17 16:18:15 +02:00
Tess Rinearson
e290bd624f changelog_pending: add missing entry (#6830) 2021-08-17 16:05:36 +02:00
mergify[bot]
0366c2b688 rpc: log update (backport #6825) (#6826) 2021-08-14 09:54:02 -04:00
mergify[bot]
6fde228e9d state/privval: vote timestamp fix (backport #6748) (#6783) 2021-07-30 17:48:49 +02:00
mergify[bot]
b69ac23fd2 light: add case to catch cancelled contexts within the detector (backport #6701) (#6720) 2021-07-14 15:26:03 +02:00
mergify[bot]
da9eefd111 rpc: add chunked rpc interface (backport #6445) (#6717)
* rpc: add chunked rpc interface (#6445)

(cherry picked from commit d9134063e7)

# Conflicts:
#	light/proxy/routes.go
#	node/node.go
#	rpc/core/net.go
#	rpc/core/routes.go

* fix conflicts

Co-authored-by: Sam Kleinman <garen@tychoish.com>
Co-authored-by: marbar3778 <marbar3778@yahoo.com>
2021-07-14 09:22:53 +00:00
Callum Waters
2c2f511f24 light: correctly handle contexts (backport -> v0.34.x) (#6685) 2021-07-09 14:30:33 +02:00
Callum Waters
8b84c7c168 e2e: disable app tests for light client (#6672) 2021-07-07 20:06:55 +02:00
mergify[bot]
0712063ec8 config: add example on external_address (backport #6621) (#6624) 2021-06-30 15:52:19 +02:00
Callum Waters
c2908ef785 release: prepare changelog for v0.34.11 (#6597) 2021-06-18 11:44:39 +02:00
Callum Waters
d515bbcf1d statesync: increase chunk priority and robustness (#6582) 2021-06-18 09:59:52 +02:00
mergify[bot]
be8c9833ca state sync: tune request timeout and chunkers (backport #6566) (#6581)
* state sync: tune request timeout and chunkers (#6566)

(cherry picked from commit 7d961b55b2)

# Conflicts:
#	CHANGELOG_PENDING.md
#	config/config.go
#	internal/statesync/reactor.go
#	internal/statesync/reactor_test.go
#	node/node.go
#	statesync/syncer.go

* fix build

* fix config

* fix config

Co-authored-by: Aleksandr Bezobchuk <alexanderbez@users.noreply.github.com>
Co-authored-by: Aleksandr Bezobchuk <aleks.bezobchuk@gmail.com>
2021-06-15 15:10:16 -04:00
mergify[bot]
358b1f23c0 p2p/conn: check for channel id overflow before processing receive msg (backport #6522) (#6528)
* p2p/conn: check for channel id overflow before processing receive msg (#6522)

Per tendermint spec, each Channel has a globally unique byte id, which
is mapped to uint8 in Go. However, the proto PacketMsg.ChannelID field
is declared as int32, and when receive the packet, we cast it to a byte
without checking for possible overflow. That leads to a malform packet
with invalid channel id is sent successfully.

To fix it, we just add a check for possible overflow, and return invalid
channel id error.

Fixed #6521

(cherry picked from commit 1f46a4c90e)
2021-06-04 20:20:36 -04:00
Marko
c376b44f1c Backport: #6494 (#6506)
* version: revert version through ldflag only (#6494)

Add version back to versions, but allow it to be overridden via a ldflag.

Reason:

Many users are not setting the ldflag causing issues with tooling that relies on it (cosmjs)

closes #6488

cc @webmaster128

* revert variable rename

* Update CHANGELOG_PENDING.md
2021-05-31 21:15:12 +00:00
Callum Waters
8dd8a4e8ea libs/os: avoid CopyFile truncating destination before checking if regular file (backport: #6428) (#6436) 2021-05-10 13:24:33 +02:00
mergify[bot]
353e3a3243 evidence: fix bug with hashes (backport #6375) (#6381) 2021-04-22 15:05:56 +02:00
Tess Rinearson
a9b4fac610 .github: make core team codeowners (#6384) 2021-04-21 13:38:07 -07:00
mergify[bot]
1614e12035 statesync: improve e2e test outcomes (backport #6378) (#6380)
(cherry picked from commit d36a5905a6)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-04-21 12:30:17 -04:00
Tess Rinearson
68eceda0b5 changelog: update for 0.34.10 (#6357) 2021-04-14 13:46:14 -07:00
Callum Waters
b878326396 e2e: relax timeouts (#6356)
* remove duplicate light error

* quieten handling of txs that already exist in the mempool

* notch back e2e timeouts
2021-04-14 19:53:54 +02:00
mergify[bot]
693e11c6c6 e2e: tx load to use broadcast sync instead of commit (backport #6347) (#6352) 2021-04-14 10:09:49 +02:00
mergify[bot]
6cc3e23a95 light: handle too high errors correctly (backport #6346) (#6351) 2021-04-13 14:46:54 +02:00
Callum Waters
a9ac63510d p2p: fix using custom channels (#6339) 2021-04-13 14:05:36 +02:00
mergify[bot]
bd968aba1f build(deps): Bump google.golang.org/grpc from 1.36.1 to 1.37.0 (bp #6330) (#6335) 2021-04-09 12:20:20 +02:00
Tess Rinearson
e54fdb6204 changelog: prepare changelog for 0.34.9 release (#6333) 2021-04-08 10:05:23 -07:00
Callum Waters
7869f5ec1d light/evidence: handle FLA backport (#6331) 2021-04-08 09:49:25 -07:00
mergify[bot]
af35ca9cf4 state: fix block event indexing reserved key check (#6314) (#6315) 2021-04-05 08:42:17 -04:00
Gustavo Chaín
c9966cd6be p2p: Fix "Unknown Channel" bug on CustomReactors (#6297) 2021-03-30 09:35:00 -04:00
mergify[bot]
6c0c27320c change index block log to info (#6290) (#6294)
## Description

Change log from error to info for indexing blocks

(cherry picked from commit 32ee737d42)

Co-authored-by: Marko <marbar3778@yahoo.com>
2021-03-29 13:57:57 +00:00
mergify[bot]
b7a4d5e7ba fix: jsonrpc url parsing and dial function (#6264) (#6288)
This PR fixes how the jsonrpc parses the URL, and how the dial function connects to the RPC.

Closes: https://github.com/tendermint/tendermint/issues/6260

(cherry picked from commit 9ecfcc93a6)

Co-authored-by: Frojdi Dymylja <33157909+fdymylja@users.noreply.github.com>
2021-03-29 11:05:03 +00:00
mergify[bot]
0682337de2 logging: shorten precommit log message (#6270) (#6274)
This is an attempt to clean up the logging message as requested in #6269.

(cherry picked from commit 3f9066b290)

Co-authored-by: Sam Kleinman <garen@tychoish.com>
2021-03-25 16:19:50 -04:00
mergify[bot]
b00cac9368 rpc: index block events to support block event queries (bp #6226) (#6261) 2021-03-22 15:01:25 -04:00
mergify[bot]
b2f01448be e2e: integrate light clients (bp #6196)
integrate light clients (#6196)
fix e2e app test (#6223)
fix light client generator (#6236)
2021-03-18 13:02:05 +01:00
mergify[bot]
4e25703d58 rpc/jsonrpc/server: return an error in WriteRPCResponseHTTP(Error) (bp #6204) (#6230)
* rpc/jsonrpc/server: return an error in WriteRPCResponseHTTP(Error) (#6204)

instead of panicking
Closes #5529

(cherry picked from commit 00b9524168)

# Conflicts:
#	CHANGELOG_PENDING.md
#	rpc/jsonrpc/server/http_json_handler.go
#	rpc/jsonrpc/server/http_server.go
#	rpc/jsonrpc/server/http_server_test.go
#	rpc/jsonrpc/server/http_uri_handler.go

* resolve conflicts

* fix linting

* fix conflict

Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
Co-authored-by: Marko Baricevic <marbar3778@yahoo.com>
2021-03-17 14:55:05 +00:00
mergify[bot]
d004a584f8 use error.Is to check for nondeterminstic vote error type (#6237) (#6239)
(cherry picked from commit bf8cce83db)

Co-authored-by: Callum Waters <cmwaters19@gmail.com>
2021-03-15 11:20:33 +01:00
mergify[bot]
11523b1302 note: add nondeterministic note to events (#6220) (#6225)
## Description

Since events are not hashed into the header they can be non deterministic. Changing an event is not consensus breaking. Will update docs in the spec

(cherry picked from commit 884d4d5252)

Co-authored-by: Marko <marbar3778@yahoo.com>
2021-03-09 16:39:19 +04:00
mergify[bot]
8bb85856d0 e2e: add benchmarking functionality (bp #6210) (#6216) 2021-03-05 15:30:18 +01:00
mergify[bot]
b9cdd0e28e indexer: remove info log (#6194)
Co-authored-by: Aleksandr Bezobchuk <alexanderbez@users.noreply.github.com>
Co-authored-by: Marko <marbar3778@yahoo.com>
2021-03-04 14:47:42 +00:00
mergify[bot]
1b5697a41d mempool/rpc: log grooming (bp #6201) (#6203) 2021-03-04 09:04:13 -05:00
mergify[bot]
a047a4a70f logs: cleanup (#6198)
Co-authored-by: Marko <marbar3778@yahoo.com>
2021-03-04 10:42:19 +00:00
mergify[bot]
52b1d90f56 rpc/jsonrpc: Unmarshal RPCRequest correctly (bp #6191) (#6193)
* rpc/jsonrpc: Unmarshal RPCRequest correctly (#6191)

i.e. without double pointer. With double pointer, it was possible to
submit `null` value, which will crash the server.

```
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x189ddc0]

goroutine 1 [running]:
github.com/tendermint/tendermint/rpc/jsonrpc/types.(*RPCRequest).UnmarshalJSON(0xc0000147e0, 0xc00029f201, 0x4, 0x1ff, 0x883baa0, 0xc0000147e0)
        /Users/anton/go/src/github.com/tendermint/tendermint/rpc/jsonrpc/types/types.go:70 +0x100
encoding/json.(*decodeState).literalStore(0xc000216bb0, 0xc00029f201, 0x4, 0x1ff, 0x1998800, 0xc0000147e0, 0x199, 0xc000231700, 0x10e0a5e, 0x197)
        /usr/local/Cellar/go/1.16/libexec/src/encoding/json/decode.go:860 +0x30ce
encoding/json.(*decodeState).value(0xc000216bb0, 0x1998800, 0xc0000147e0, 0x199, 0x1998800, 0xc0000147e0)
        /usr/local/Cellar/go/1.16/libexec/src/encoding/json/decode.go:384 +0x40c
encoding/json.(*decodeState).array(0xc000216bb0, 0x18df040, 0xc0001be540, 0x16, 0xc000216bd8, 0x10e405b)
        /usr/local/Cellar/go/1.16/libexec/src/encoding/json/decode.go:558 +0x365
encoding/json.(*decodeState).value(0xc000216bb0, 0x18df040, 0xc0001be540, 0x16, 0x16, 0x6e)
        /usr/local/Cellar/go/1.16/libexec/src/encoding/json/decode.go:360 +0x22f
encoding/json.(*decodeState).unmarshal(0xc000216bb0, 0x18df040, 0xc0001be540, 0xc000216bd8, 0x0)
        /usr/local/Cellar/go/1.16/libexec/src/encoding/json/decode.go:180 +0x2c9
encoding/json.Unmarshal(0xc00029f200, 0x6, 0x200, 0x18df040, 0xc0001be540, 0x0, 0x0)
        /usr/local/Cellar/go/1.16/libexec/src/encoding/json/decode.go:107 +0x15d
```

(cherry picked from commit fe4e97afe0)

# Conflicts:
#	CHANGELOG_PENDING.md

* fix conflict

Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
2021-03-02 14:46:48 +04:00
mergify[bot]
28bebe3ddb docs/tutorials: fix sample code #6186
Co-authored-by: winor <12413150+winor30@users.noreply.github.com>
Co-authored-by: Marko Baricevic <marbar3778@yahoo.com>
2021-03-01 08:41:49 +00:00
Tess Rinearson
dea73e08b3 changelog: update for 0.34.8 (#6181) 2021-02-25 12:30:29 +01:00
mergify[bot]
28ce355656 libs/log: [JSON format] include timestamp (bp #6174) (#6179)
Closes #6146
2021-02-25 11:27:49 +04:00
mergify[bot]
55ae781efa logging: print string instead of callback (#6178)
## Description

Fixes marshaling error in sdk

closes https://github.com/cosmos/cosmos-sdk/issues/8578

the output stays the same, we are avoiding the passing of the callback because sdk uses typed logging.

Co-authored-by: Marko <marbar3778@yahoo.com>
2021-02-24 19:08:05 +00:00
mergify[bot]
0191a22636 state executor: groom logs (bp #6152) (#6172) 2021-02-24 09:50:46 -05:00
Tess Rinearson
9d9b947b02 goreleaser: reintroduce arm64 build instructions 2021-02-23 11:20:19 +01:00
Tess Rinearson
c6e0d20d4b Revert "Revert "tooling: remove tools/Makefile (bp #6102) (#6106)""
This reverts commit afd07096a7.

I had believed that this tooling change could have been what broke our
GoReleaser flow; I now know that it was a result of changes in Go 1.16
and an update to GoReleaser! GoReleaser has now been updated again
and our flow should be un-broken.
2021-02-23 11:20:06 +01:00
Tess Rinearson
efd9d07257 changelog: fix changelog pending version numbering (#6149) 2021-02-19 14:51:18 +01:00
mergify[bot]
a0f376127d consensus: more log grooming (bp #6140) (#6143) 2021-02-18 14:23:12 -05:00
mergify[bot]
8d3c36ccc3 abci: Fix ReCheckTx for Socket Client (bp #6124) (#6125) 2021-02-18 08:36:05 -05:00
Tess Rinearson
15eb2c2211 .goreleaser: remove arm64 build instructions and bump changelog again (#6131) 2021-02-18 03:04:16 +01:00
Tess Rinearson
e4d2893ff6 changelog: bump to v0.34.6 2021-02-18 02:36:01 +01:00
Tess Rinearson
afd07096a7 Revert "tooling: remove tools/Makefile (bp #6102) (#6106)"
This reverts commit 1b2174a0da.
2021-02-18 02:36:01 +01:00
Tess Rinearson
340071d81b changelog: update for 0.34.5 (#6129) 2021-02-18 02:09:16 +01:00
Tess Rinearson
53d40e1092 consensus: remove privValidator from log call (#6128) 2021-02-18 01:47:55 +01:00
Aleksandr Bezobchuk
bedb00d252 consensus: Groom Logs (#5917)
Executed a local network using simapp and looked for logs that seemed superfluous. This isn't by any means an exhaustive grooming, but should drastically help legibility of logs.

ref: #5912
2021-02-17 10:05:13 +00:00
mergify[bot]
1030072dd0 changelog: update 0.34.3 changelog with details on security vuln (bp #6108) (#6110)
* changelog: update 0.34.3 changelog with details on security vuln (#6108)

Closes #6095.

(cherry picked from commit df0b868415)

# Conflicts:
#	CHANGELOG.md

* solve conflicts

Co-authored-by: Tess Rinearson <tess.rinearson@gmail.com>
Co-authored-by: Marko Baricevic <marbar3778@yahoo.com>
2021-02-15 14:51:54 +01:00
mergify[bot]
1b2174a0da tooling: remove tools/Makefile (bp #6102) (#6106)
Description

We use docker for all protobuf related items. This makes it unnecessary to provide a way to download tooling.

ref #6103

Co-authored-by: Tess Rinearson <tess.rinearson@gmail.com>
Co-authored-by: Marko <marbar3778@yahoo.com>
2021-02-12 10:09:29 +00:00
Tess Rinearson
6bac9d9f43 makefile: remove call to tools (#6104) 2021-02-11 22:31:17 +01:00
Tess Rinearson
5efbbab789 changelog: improve with suggestions from @melekes (#6097) 2021-02-11 20:47:43 +01:00
Tess Rinearson
4a0fab041b changelog: update for v0.34.4 (#6096) 2021-02-11 19:13:40 +01:00
Callum Waters
5ee2ada942 .github: remove erik as reviewer from dependapot (#6076) 2021-02-11 17:29:52 +01:00
Callum Waters
fbf2c3815d check block store base is non negative before sending block meta or commits (#6042) 2021-02-11 17:29:52 +01:00
dependabot[bot]
cc57a560e7 build(deps-dev): Bump watchpack from 2.1.0 to 2.1.1 in /docs (#6063)
Bumps [watchpack](https://github.com/webpack/watchpack) from 2.1.0 to 2.1.1.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/webpack/watchpack/releases">watchpack's releases</a>.</em></p>
<blockquote>
<h2>v2.1.1</h2>
<h1>Bugfix</h1>
<ul>
<li>fix warnings with ENOENT when symlinks are resolved by watchpack</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="f1b5e2da2d"><code>f1b5e2d</code></a> 2.1.1</li>
<li><a href="cbfc11a8d7"><code>cbfc11a</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/webpack/watchpack/issues/188">#188</a> from Aghassi/fix/enoent-throwing</li>
<li><a href="7684df0846"><code>7684df0</code></a> fix: adds ENOENT for non windows errors</li>
<li>See full diff in <a href="https://github.com/webpack/watchpack/compare/v2.1.0...v2.1.1">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=watchpack&package-manager=npm_and_yarn&previous-version=2.1.0&new-version=2.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2021-02-11 17:18:45 +01:00
Erik Grinaker
950c9f71b5 CODEOWNERS: remove erikgrinaker (#6057) 2021-02-11 17:18:45 +01:00
dependabot[bot]
90a2c33285 build(deps): Bump actions/cache from v2.1.3 to v2.1.4 (#6055)
Bumps [actions/cache](https://github.com/actions/cache) from v2.1.3 to v2.1.4.
- [Release notes](https://github.com/actions/cache/releases)
- [Commits](https://github.com/actions/cache/compare/v2.1.3...26968a09c0ea4f3e233fdddbafd1166051a095f6)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-02-11 17:18:45 +01:00
Anton Kaliaev
093dcfc8a0 goreleaser: downcase archive and binary names (#6029)
before:

```
Tendermint_0.34.3_darwin_amd64.tar.gz

-rw-r--r--  0 runner docker 192329 Jan 19 19:30 CHANGELOG.md
-rw-r--r--  0 runner docker    321 Jan 19 19:30 CHANGELOG_PENDING.md
-rw-r--r--  0 runner docker  11382 Jan 19 19:30 LICENSE
-rw-r--r--  0 runner docker   8165 Jan 19 19:30 README.md
-rwxr-xr-x  0 runner docker 23224320 Jan 19 19:30 tendermint
```

after:

```
tendermint_0.34.3_darwin_amd64.tar.gz

-rw-r--r--  0 runner docker 192329 Jan 19 19:30 CHANGELOG.md
-rw-r--r--  0 runner docker    321 Jan 19 19:30 CHANGELOG_PENDING.md
-rw-r--r--  0 runner docker  11382 Jan 19 19:30 LICENSE
-rw-r--r--  0 runner docker   8165 Jan 19 19:30 README.md
-rwxr-xr-x  0 runner docker 23224320 Jan 19 19:30 tendermint
```
2021-02-11 17:09:10 +01:00
Anton Kaliaev
72851a12d3 libs/log: format []byte as hexidecimal string (uppercased) (#5960)
Closes: #5806

Co-authored-by: Lanie Hei <heixx011@umn.edu>
2021-02-11 17:02:38 +01:00
dependabot[bot]
07979d88d0 build(deps): Bump github.com/tendermint/tm-db from 0.6.3 to 0.6.4 (#6073)
Bumps [github.com/tendermint/tm-db](https://github.com/tendermint/tm-db) from 0.6.3 to 0.6.4.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/tendermint/tm-db/releases">github.com/tendermint/tm-db's releases</a>.</em></p>
<blockquote>
<h2>v0.6.4</h2>
<p><a href="https://github.com/tendermint/tm-db/blob/v0.6.4/CHANGELOG.md#064">https://github.com/tendermint/tm-db/blob/v0.6.4/CHANGELOG.md#064</a></p>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/tendermint/tm-db/blob/master/CHANGELOG.md">github.com/tendermint/tm-db's changelog</a>.</em></p>
<blockquote>
<h2>0.6.4</h2>
<p><strong>2021-02-09</strong></p>
<p>Bump protobuf to 1.3.2 and grpc to 1.35.0.</p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="6f9a08cd45"><code>6f9a08c</code></a> update changelog for v0.6.4 (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/150">#150</a>)</li>
<li><a href="4de5f6b9a4"><code>4de5f6b</code></a> CODEOWNERS: remove erikgrinaker (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/148">#148</a>)</li>
<li><a href="9f5cde003a"><code>9f5cde0</code></a> build(deps): bump google.golang.org/grpc from 1.33.2 to 1.35.0 (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/143">#143</a>)</li>
<li><a href="c606a78361"><code>c606a78</code></a> build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0 (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/142">#142</a>)</li>
<li><a href="0438145e16"><code>0438145</code></a> build(deps): bump github.com/gogo/protobuf from 1.3.1 to 1.3.2 (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/140">#140</a>)</li>
<li><a href="f2b292dfc2"><code>f2b292d</code></a> testing: docker deployment (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/144">#144</a>)</li>
<li><a href="3157a92898"><code>3157a92</code></a> changelog: update with 0.5.2 release (<a href="https://github-redirect.dependabot.com/tendermint/tm-db/issues/138">#138</a>)</li>
<li>See full diff in <a href="https://github.com/tendermint/tm-db/compare/v0.6.3...v0.6.4">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/tendermint/tm-db&package-manager=go_modules&previous-version=0.6.3&new-version=0.6.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2021-02-11 16:56:50 +01:00
Marko Baricevic
12eac92738 docs: fix typo in state sync example (#5989) 2021-02-11 15:08:23 +00:00
Aleksandr Bezobchuk
73375b0912 backport v0.34.x: 6000 & 6001 2021-02-11 09:50:18 -05:00
Marko
e3a79d4e2e tests: fix make test (#5966)
## Description
 
- bump deadlock dep to master
  - fixes `make test` since we now use `deadlock.Once`

Closes: #XXX
2021-02-11 14:44:19 +01:00
Marko
fa3287c012 maverick: reduce some duplication (#6052)
- Reduce duplication in messages and metrics.
- merge WAL interfaces. Meant to push the developer to make changes in both places.
2021-02-11 14:44:19 +01:00
Marko
cb7c9564a4 docker: dont login when in PR (#5961) 2021-02-11 14:44:19 +01:00
odidev
9df5fcf1f1 docker: release Linux/ARM64 image (#5925)
Co-authored-by: Marko <marbar3778@yahoo.com>
2021-02-11 14:44:19 +01:00
Anton Kaliaev
d575f8a38f fix build 2021-02-11 16:10:28 +04:00
Anton Kaliaev
1e355b6b56 .github: use job ID (not step ID) inside if condition (#6060)
https://stackoverflow.com/a/66073112/820520
2021-02-11 16:10:28 +04:00
Anton Kaliaev
108073077b .github: fix fuzz-nightly job (#5965)
outputs is a property of the job, not an individual step.
2021-02-11 16:10:28 +04:00
Anton Kaliaev
8b48d23084 terminate go-fuzz gracefully (w/ SIGINT) (#5973)
and preserve exit code.

```
2021/01/26 03:34:49 workers: 2, corpus: 4 (8m28s ago), crashers: 0, restarts: 1/9976, execs: 11013732 (21596/sec), cover: 121, uptime: 8m30s
make: *** [fuzz-mempool] Terminated
Makefile:5: recipe for target 'fuzz-mempool' failed
Error: Process completed with exit code 124.
```

https://github.com/tendermint/tendermint/runs/1766661614

`continue-on-error` should make GH ignore any error codes.
2021-02-11 16:10:28 +04:00
Anton Kaliaev
c3d2f68c05 .github: archive crashers and fix set-crashers-count step (#5992) 2021-02-11 16:10:28 +04:00
Anton Kaliaev
0f58a8470a .github: rename crashers output (fuzz-nightly-test) (#5993) 2021-02-11 16:10:28 +04:00
Anton Kaliaev
197b746f8d test/fuzz: move fuzz tests into this repo (#5918)
Co-authored-by: Emmanuel T Odeke <emmanuel@orijtech.com>

Closes #5907

- add init-corpus to blockchain reactor
- remove validator-set FromBytes test
now that we have proto, we don't need to test it! bye amino
- simplify mempool test
do we want to test remote ABCI app?
- do not recreate mux on every crash in jsonrpc test
- update p2p pex reactor test
- remove p2p/listener test
the API has changed + I did not understand what it's tested anyway
- update secretconnection test
- add readme and makefile
- list inputs in readme
- add nightly workflow
- remove blockchain fuzz test
EncodeMsg / DecodeMsg no longer exist
2021-02-11 16:10:28 +04:00
Marko Baricevic
06623202f0 Update metrics.md (#5930) 2021-02-11 10:55:29 +00:00
Marko
a3a9398971 proto: docker deployment (#5931) 2021-02-11 10:55:29 +00:00
Marko
7b7d6e1f98 docs: change v0.33 version (#5950)
- change version for v0.33.x

Closes: #XXX
2021-02-11 10:55:29 +00:00
Erik Grinaker
98be3f2aab Makefile: always pull image in proto-gen-docker. (#5953)
The `proto-gen-docker` target didn't pull an updated Docker image, and would use a local image if present which could be outdated and produce wrong results.
2021-02-11 10:55:29 +00:00
Tess Rinearson
3e41bb57d6 .github/workflows: cleanup yaml for e2e nightlies (#6049) 2021-02-11 11:43:19 +01:00
Tess Rinearson
6252b63e53 .github/workflows: fix whitespace in e2e config file (#6043) 2021-02-11 11:43:19 +01:00
Tess Rinearson
591e55b301 .github/workflows: separate e2e workflows for 0.34.x and master (#6041)
Co-authored-by: Erik Grinaker <erik@interchain.berlin>
Co-authored-by: Marko <marbar3778@yahoo.com>
2021-02-11 11:43:19 +01:00
Erik Grinaker
0028ac38ed test/e2e: increase validator tolerances (#6037) 2021-02-11 11:43:19 +01:00
Tess Rinearson
57aed01639 .github/workflows: try different e2e nightly test set (#6036) 2021-02-11 11:43:19 +01:00
Erik Grinaker
8788673a3e test/e2e: increase sign/propose tolerances (#6033)
E2E tests often fail because validators miss signing or proposing blocks. Often this is because e.g. there's a lot of disruption in the network or it takes a long time to start up all the nodes.

This changes the test criteria to only check for 3 signed/proposed blocks, rather than a fraction of the expected blocks. This should be enough to catch most issues, apart from performance problems causing nodes to miss signing/proposing, but we may want separate tests for those sorts of things.
2021-02-11 11:43:19 +01:00
Tess Rinearson
f009a1a731 Revert "e2e: releases nightly (#5906)" (#6031)
This reverts commit 64961e2267, to see if it will make the workflow dispatch trigger reappear and fix our Slack notification link.
2021-02-11 11:43:19 +01:00
Anton Kaliaev
33fb03fcc8 test/e2e: enable pprof server to help debugging failures (#6003) 2021-02-11 11:43:19 +01:00
Marko
eb09376ba0 e2e: releases nightly (#5906) 2021-02-11 11:43:19 +01:00
Anton Kaliaev
f48b154751 evidence: terminate broadcastEvidenceRoutine when peer is stopped (#6068) 2021-02-09 11:36:36 +04:00
Callum Waters
2dd5cbfb5c light: remove witnesses in order of decreasing index (#6065) 2021-02-08 17:36:21 +01:00
Callum Waters
3c22ed8320 light: fix panic with RPC calls to commit and validator when height is nil (#6040) 2021-02-04 15:17:34 +01:00
Anton Kaliaev
7f02d8971c light/provider/http: fix Validators (#6024)
Closes #6010
2021-02-04 13:28:59 +04:00
Callum Waters
b021ad5b7a test: don't use foo-bar.net in TestHTTPClientMakeHTTPDialer (#5997) (#6047)
This test relied on connecting to the external site `foo-bar.net`, and (predictably) the site went down and broke all of our CI runs. This changes it to use local HTTP servers instead.

Co-authored-by: Erik Grinaker <erik@interchain.berlin>
2021-02-04 13:11:07 +04:00
Cyrus Goh
f89eca427a docs: bump package-lock.json of v0.34.x (#5952) 2021-01-22 20:45:04 +00:00
Marko
0213e544e0 docs: package-lock.json fix (#5948) 2021-01-22 19:03:31 +00:00
Tess Rinearson
6b2ab0f0e1 changelog: update for 0.34.3 (#5926) 2021-01-19 16:12:47 +01:00
Callum
a2a6852ab9 use correct source of evidence time
Conflicting votes are now sent to the evidence pool to form duplicate vote evidence only once
the height of the evidence is finished and the time of the block finalised.
2021-01-19 16:00:02 +01:00
Tess Rinearson
7ea4dc52ed readme: add security mailing list (#5916)
No one knows we have this mailing list 🙈
2021-01-19 12:58:35 +01:00
dependabot[bot]
d969a5ed1b build(deps): Bump vuepress-theme-cosmos from 1.0.179 to 1.0.180 in /docs (#5915)
Bumps [vuepress-theme-cosmos](https://github.com/cosmos/vuepress-theme-cosmos) from 1.0.179 to 1.0.180.
<details>
<summary>Commits</summary>
<ul>
<li>See full diff in <a href="https://github.com/cosmos/vuepress-theme-cosmos/commits">compare view</a></li>
</ul>
</details>
<br />

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=vuepress-theme-cosmos&package-manager=npm_and_yarn&previous-version=1.0.179&new-version=1.0.180)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)

</details>
2021-01-19 12:58:35 +01:00
Tess Rinearson
0def3a964a config: fix mispellings (#5914)
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2021-01-19 12:58:35 +01:00
Marko
54338a52fa proto: bump gogoproto (1.3.2) (#5886)
- bump gogoproto (1.3.2)
- regenerate proto files

Closes: #XXX
2021-01-19 12:41:35 +01:00
Tess Rinearson
bf45df0b2b mod: go mod tidy 2021-01-19 12:17:29 +01:00
Tess Rinearson
46fa6e666c .github/codeowners: add alexanderbez (#5913)
* .github/codeowners: add alexanderbez

* Update .github/CODEOWNERS

Co-authored-by: Marko <marbar3778@yahoo.com>

Co-authored-by: Marko <marbar3778@yahoo.com>
2021-01-19 12:17:29 +01:00
dependabot[bot]
a18e3de3ac build(deps): Bump google.golang.org/grpc from 1.34.0 to 1.35.0 (#5902)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.34.0 to 1.35.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/grpc/grpc-go/releases">google.golang.org/grpc's releases</a>.</em></p>
<blockquote>
<h2>Release 1.35.0</h2>
<h1>Behavior Changes</h1>
<ul>
<li>roundrobin: strip attributes from addresses (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4024">#4024</a>)</li>
<li>balancer: set RPC metadata in address attributes, instead of Metadata field (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4041">#4041</a>)</li>
</ul>
<h1>New Features</h1>
<ul>
<li>support unix-abstract schema (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4079">#4079</a>)
<ul>
<li>Special Thanks: <a href="https://github.com/resec">@resec</a></li>
</ul>
</li>
<li>xds: implement experimental RouteAction timeout support (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4116">#4116</a>)</li>
<li>xds: Implement experimental circuit breaking support. (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4050">#4050</a>)</li>
</ul>
<h1>Bug Fixes</h1>
<ul>
<li>xds: <code>server_features</code> should be a child of <code>xds_servers</code> and not a sibling (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4087">#4087</a>)</li>
<li>xds: NACK more invalid RDS responses (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4120">#4120</a>)</li>
</ul>
<h2>Release 1.34.1</h2>
<ul>
<li>xds client: Updated v3 type for http connection manager (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4137">#4137</a>)</li>
<li>lrs: use JSON for locality's String representation (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4135">#4135</a>)</li>
<li>eds/lrs: handle nil when LRS is disabled (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4086">#4086</a>)</li>
<li>client: fix &quot;unix&quot; scheme handling for some corner cases (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4021">#4021</a>)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="577eb69627"><code>577eb69</code></a> Change version to 1.35.0 (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4140">#4140</a>)</li>
<li><a href="fb40d83340"><code>fb40d83</code></a> xds interop: turn on circuit breaking test (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4144">#4144</a>)</li>
<li><a href="083393f287"><code>083393f</code></a> xds/resolver: fix resource deletion (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4143">#4143</a>)</li>
<li><a href="85e55dc558"><code>85e55dc</code></a> interop: update client for xds testing support (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4108">#4108</a>)</li>
<li><a href="6a318bb011"><code>6a318bb</code></a> xds: add HTTP connection manager max_stream_duration support (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4122">#4122</a>)</li>
<li><a href="0bd76be2bb"><code>0bd76be</code></a> lrs: use JSON for locality's String representation (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4135">#4135</a>)</li>
<li><a href="ecc9a99b66"><code>ecc9a99</code></a> interop: remove test.proto clones/variants and use grpc-proto repo instead (#...</li>
<li><a href="4f80d77fe4"><code>4f80d77</code></a> github: enable CodeQL checker (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4134">#4134</a>)</li>
<li><a href="829919d572"><code>829919d</code></a> xds client: Updated v3 type for http connection manager (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4137">#4137</a>)</li>
<li><a href="f4a20d2f41"><code>f4a20d2</code></a> xds: NACK more invalid RDS responses (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4120">#4120</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/grpc/grpc-go/compare/v1.34.0...v1.35.0">compare view</a></li>
</ul>
</details>
<br />

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.34.0&new-version=1.35.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)

</details>
2021-01-19 12:17:29 +01:00
dependabot[bot]
e8d35597df build(deps): Bump github.com/stretchr/testify from 1.6.1 to 1.7.0 (#5897)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.6.1 to 1.7.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/stretchr/testify/releases">github.com/stretchr/testify's releases</a>.</em></p>
<blockquote>
<h2>Minor improvements and bug fixes</h2>
<p>Minor feature improvements and bug fixes</p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="acba37e5db"><code>acba37e</code></a> Only use repeatability if no repeatability left</li>
<li><a href="eb8c41ec07"><code>eb8c41e</code></a> Add more tests to mock package</li>
<li><a href="a5830c56d3"><code>a5830c5</code></a> Extract method to evaluate closest match</li>
<li><a href="1962448488"><code>1962448</code></a> Use Repeatability as tie-breaker for closest match</li>
<li><a href="92707c0b2d"><code>92707c0</code></a> Fixed the link to not point to assert only</li>
<li><a href="05dd0b2b35"><code>05dd0b2</code></a> Updated the readme to point to pkg.dev</li>
<li><a href="c26b7f39f8"><code>c26b7f3</code></a> Update assertions.go</li>
<li><a href="8fb4b2442e"><code>8fb4b24</code></a> [Fix] The most recent changes to golang/protobuf breaks the spew Circular dat...</li>
<li><a href="dc8af7208c"><code>dc8af72</code></a> add generated code for positive/negative assertion</li>
<li><a href="1544508911"><code>1544508</code></a> add assert positive/negative</li>
<li>Additional commits viewable in <a href="https://github.com/stretchr/testify/compare/v1.6.1...v1.7.0">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/stretchr/testify&package-manager=go_modules&previous-version=1.6.1&new-version=1.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2021-01-19 12:17:29 +01:00
Erik Grinaker
bdbe4a7cd7 test/e2e: disable abci/grpc and blockchain/v2 due to flake (#5854)
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2021-01-12 18:24:44 +01:00
Callum Waters
6a7a431ba5 remove misbehaviors from e2e generator (#5629) 2021-01-12 18:24:44 +01:00
Tess Rinearson
23c8a7a93d changelog: prepare 0.34.2 release (#5894) 2021-01-12 17:42:26 +01:00
Callum Waters
cf3a720988 state sync: correctly set last consensus params height (#5889) 2021-01-12 16:44:42 +01:00
Callum Waters
ad552b2bb1 evidence: buffer evidence from consensus (#5890) 2021-01-12 16:44:42 +01:00
Erik Grinaker
5d63765990 os: simplify EnsureDir() (#5871)
#5852 fixed an issue with error propagation in `os.EnsureDir()`. However, this function is basically identical to `os.MkdirAll()`, and can be replaced entirely with a call to it. We keep the function for backwards compatibility.
2021-01-06 17:27:51 +01:00
Erik Grinaker
3185bb8b22 blockchain/v0: stop tickers on poolRoutine exit (#5860)
Fixes #5841.
2021-01-06 17:27:51 +01:00
Erik Grinaker
2eba38051a blockchain/v2: fix missing mutex unlock (#5862)
Fixes #5843.
2021-01-06 17:27:51 +01:00
Erik Grinaker
15eed81f12 test/consensus: improve WaitGroup handling in Byzantine tests (#5861)
Fixes #5845.
2021-01-06 17:27:51 +01:00
Erik Grinaker
fca7c6449a libs/os: EnsureDir now returns IO errors and checks file type (#5852)
Fixes #5839.
2021-01-06 17:27:51 +01:00
Erik Grinaker
c2b5f8bc4a abci/grpc: fix invalid mutex handling in StopForError() (#5849)
Fixes #5840.
2021-01-06 17:27:51 +01:00
Erik Grinaker
4246000a8c tools/tm-signer-harness: fix listener leak in newTestHarnessListener() (#5850)
Fixes #5837.
2021-01-06 17:27:51 +01:00
Tess Rinearson
2924d41f8b changelog: update changelog for v0.34.1 (#5872) 2021-01-06 16:32:43 +01:00
Erik Grinaker
13833cba9e p2p: fix MConnection inbound traffic statistics and rate limiting (#5868) (#5870)
Fixes #5866. Inbound traffic monitoring (and by extension inbound rate limiting) was inadvertently removed in 660e72a.
2021-01-06 16:10:28 +01:00
Tess Rinearson
17ce2ccc92 CHANGELOG: prepare 0.34.1-rc1 (#5832) 2020-12-23 18:45:04 +01:00
Anton Kaliaev
b1328db07f modify Reactor priorities (#5826) (#5830)
blockchain/vX reactor priority was decreased because during the normal operation
(i.e. when the node is not fast syncing) blockchain priority can't be
the same as consensus reactor priority. Otherwise, it's theoretically possible to
slow down consensus by constantly requesting blocks from the node.

NOTE: ideally blockchain/vX reactor priority would be dynamic. e.g. when
the node is fast syncing, the priority is 10 (max), but when it's done
fast syncing - the priority gets decreased to 5 (only to serve blocks
for other nodes). But it's not possible now, therefore I decided to
focus on the normal operation (priority = 5).

evidence and consensus critical messages are more important than
the mempool ones, hence priorities are bumped by 1 (from 5 to 6).

statesync reactor priority was changed from 1 to 5 to be the same as
blockchain/vX priority.

Refs https://github.com/tendermint/tendermint/issues/5816
2020-12-23 18:05:14 +01:00
Marko
829a9e1de7 docs/tutorials: specify 0.34 (#5823)
# Description

Specify 0.34 for tutorials.

Closes: #5735
2020-12-21 09:39:07 -08:00
Anton Kaliaev
dc101f2eff mempool: disable MaxBatchBytes (#5800)
@p4u from vocdoni.io reported that the mempool might behave incorrectly under a
high load. The consequences can range from pauses between blocks to the peers
disconnecting from this node.

My current theory is that the flowrate lib we're using to control flow
(multiplex over a single TCP connection) was not designed w/ large blobs
(1MB batch of txs) in mind.

I've tried decreasing the Mempool reactor priority, but that did not
have any visible effect. What actually worked is adding a time.Sleep
into mempool.Reactor#broadcastTxRoutine after an each successful send ==
manual control flow of sort.

As a temporary remedy (until the mempool package
is refactored), the max-batch-bytes was disabled. Transactions will be sent
one by one without batching

Closes #5796
2020-12-21 20:29:31 +04:00
Anton Kaliaev
dc90cf60d5 mempool: introduce KeepInvalidTxsInCache config option (#5813)
When set to true, an invalid transaction will be kept in the cache (this may help some applications to protect against spam).

NOTE: this is a temporary config option. The more correct solution would be to add a TTL to each transaction (i.e. CheckTx may return a TTL in ResponseCheckTx).

Closes: #5751
2020-12-21 20:29:14 +04:00
Callum Waters
9f0d71e81f cmd: hyphen-case cli v0.34.1 (#5786) 2020-12-11 13:22:09 +01:00
Anton Kaliaev
7f06371915 evidence: omit bytes field (#5745)
Follow-up to https://github.com/tendermint/tendermint/pull/5743
2020-12-04 12:18:14 +01:00
Tess Rinearson
2a4fd3804c blockchain/v1: omit incoming message bytes from log 2020-12-04 12:18:14 +01:00
Tess Rinearson
0d9606e1b4 reactors: omit incoming message bytes from reactor logs (#5743)
After a reactor has failed to parse an incoming message, it shouldn't output the "bad" data into the logs, as that data is unfiltered and could have anything in it. (We also don't think this information is helpful to have in the logs anyways.)
2020-12-04 12:18:14 +01:00
Erik Grinaker
ce144a1d71 test: fix TestByzantinePrevoteEquivocation flake (#5710)
This fixes spurious `TestByzantinePrevoteEquivocation` failures by extending the block range and time spent waiting for evidence. I've seen many runs where the evidence isn't committed until e.g. height 27. Haven't looked into _why_ this happens, but as long as the evidence is committed eventually and the test doesn't spuriously fail I'm (mostly) happy. WDYT @cmwaters?
2020-11-30 11:30:40 +01:00
Marko
6c0d4070c2 ci: build for 32 bit, libs: fix overflow (#5700) 2020-11-30 11:00:35 +01:00
Erik Grinaker
15b70373cc crypto: fix infinite recursion in Secp256k1 string formatting (#5707) (#5709)
This caused stack overflow panics in E2E tests, e.g.:

```
2020-11-24T02:37:17.6085640Z validator04    | runtime: goroutine stack exceeds 1000000000-byte limit
2020-11-24T02:37:17.6087818Z validator04    | runtime: sp=0xc0234b23c0 stack=[0xc0234b2000, 0xc0434b2000]
2020-11-24T02:37:17.6088920Z validator04    | fatal error: stack overflow
2020-11-24T02:37:17.6089776Z validator04    |
2020-11-24T02:37:17.6090569Z validator04    | runtime stack:
2020-11-24T02:37:17.6091677Z validator04    | runtime.throw(0x12dc476, 0xe)
2020-11-24T02:37:17.6093123Z validator04    | 	/usr/local/go/src/runtime/panic.go:1116 +0x72
2020-11-24T02:37:17.6094320Z validator04    | runtime.newstack()
2020-11-24T02:37:17.6095374Z validator04    | 	/usr/local/go/src/runtime/stack.go:1067 +0x78d
2020-11-24T02:37:17.6096381Z validator04    | runtime.morestack()
2020-11-24T02:37:17.6097657Z validator04    | 	/usr/local/go/src/runtime/asm_amd64.s:449 +0x8f
2020-11-24T02:37:17.6098505Z validator04    |
2020-11-24T02:37:17.6099328Z validator04    | goroutine 88 [running]:
2020-11-24T02:37:17.6100470Z validator04    | runtime.heapBitsSetType(0xc009565380, 0x20, 0x18, 0x1137e00)
2020-11-24T02:37:17.6101961Z validator04    | 	/usr/local/go/src/runtime/mbitmap.go:911 +0xaa5 fp=0xc0234b23d0 sp=0xc0234b23c8 pc=0x432625
2020-11-24T02:37:17.6103906Z validator04    | runtime.mallocgc(0x20, 0x1137e00, 0x117b601, 0x11e9240)
2020-11-24T02:37:17.6105179Z validator04    | 	/usr/local/go/src/runtime/malloc.go:1090 +0x5a5 fp=0xc0234b2470 sp=0xc0234b23d0 pc=0x428b25
2020-11-24T02:37:17.6106540Z validator04    | runtime.convTslice(0xc002743710, 0x21, 0x21, 0xc0234b24e8)
2020-11-24T02:37:17.6107861Z validator04    | 	/usr/local/go/src/runtime/iface.go:385 +0x59 fp=0xc0234b24a0 sp=0xc0234b2470 pc=0x426379
2020-11-24T02:37:17.6109315Z validator04    | github.com/tendermint/tendermint/crypto/secp256k1.PubKey.String(...)
2020-11-24T02:37:17.6151692Z validator04    | 	/src/tendermint/crypto/secp256k1/secp256k1.go:161
2020-11-24T02:37:17.6153872Z validator04    | github.com/tendermint/tendermint/crypto/secp256k1.(*PubKey).String(0xc009565360, 0x11e9240, 0xc009565360)
2020-11-24T02:37:17.6157421Z validator04    | 	<autogenerated>:1 +0x65 fp=0xc0234b24f8 sp=0xc0234b24a0 pc=0x656965
2020-11-24T02:37:17.6159134Z validator04    | fmt.(*pp).handleMethods(0xc00956c680, 0x58, 0xc0234b2801)
2020-11-24T02:37:17.6161462Z validator04    | 	/usr/local/go/src/fmt/print.go:630 +0x30a fp=0xc0234b2768 sp=0xc0234b24f8 pc=0x518b8a
[...]
2020-11-24T02:37:17.6649685Z validator04    | 	/usr/local/go/src/fmt/print.go:630 +0x30a fp=0xc0234b7f48 sp=0xc0234b7cd8 pc=0x518b8a
2020-11-24T02:37:17.6651177Z validator04    | created by github.com/tendermint/tendermint/node.startStateSync
2020-11-24T02:37:17.6652521Z validator04    | 	/src/tendermint/node/node.go:587 +0x150

```
2020-11-24 14:01:53 +01:00
Tess Rinearson
182fa32851 .goreleaser: build for windows 2020-11-19 18:52:34 +01:00
Tess Rinearson
fe94825985 changelog: squash changelog from 0.34 RCs into one (#5687)
"Squashes" the changelog from RCs 2-6 into one changelog message for 0.34.0, and adds the changelog pending.
2020-11-19 18:43:04 +01:00
Tess Rinearson
386a44cd02 .goreleaser: don't build linux/arm 2020-11-19 18:32:32 +01:00
Marko
0f29b1631e fix docker deployment (#5647) 2020-11-19 18:03:24 +01:00
Tess Rinearson
b80d4d8ff0 relase_notes: add release notes for v0.34.0 2020-11-19 17:41:41 +01:00
Tess Rinearson
b5b53bfc0d upgrading: update 0.34 instructions with updates since RC4 (#5686) 2020-11-18 19:16:05 +01:00
Callum Waters
4ed0fddc37 light: make fraction parts uint64, ensuring that it is always positive (#5655) 2020-11-18 15:49:31 +01:00
Marko
23bc2f690c ci: remove add-path (#5674) 2020-11-18 15:21:52 +01:00
Marko
bea7673c1c e2e: use ed25519 for secretConn (remote signer) (#5678)
## Description

Hardcode ed25519 to dialTCPFn in e2e tests. 

I will backport `DefaultRequestHandler` fixes

This will be replaced when grpc is implemented.
2020-11-18 15:21:52 +01:00
Marko
26493bbbd8 test/e2e: fix secp failures (#5649) 2020-11-18 15:21:52 +01:00
Aleksandr Bezobchuk
53463b3fef rpc: fix content-type header 2020-11-16 10:54:34 -05:00
Anton Kaliaev
e0cf94f5b0 privval: reset pingTimer to avoid sending unnecessary pings (#5642) (#5668)
Refs #5550
2020-11-16 18:10:49 +04:00
Anton Kaliaev
047b5ea85e bump go version to 1.15 (#5639) (#5667) 2020-11-16 17:58:55 +04:00
Anton Kaliaev
9567477d55 privval: increase read/write timeout to 5s and calculate ping interva… (#5666)
…l based on it (#5638)

Partially closes #5550
2020-11-16 17:49:56 +04:00
Erik Grinaker
637d76254d go.mod: upgrade iavl and deps (#5657)
Bumps IAVL, which pulled in some other upgrades as well. I think they should be fine though.
2020-11-13 14:11:08 +01:00
dependabot[bot]
a447c507e4 build(deps): Bump github.com/tendermint/tm-db from 0.6.2 to 0.6.3
Bumps [github.com/tendermint/tm-db](https://github.com/tendermint/tm-db) from 0.6.2 to 0.6.3.
- [Release notes](https://github.com/tendermint/tm-db/releases)
- [Changelog](https://github.com/tendermint/tm-db/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tendermint/tm-db/compare/v0.6.2...v0.6.3)

Signed-off-by: dependabot[bot] <support@github.com>
2020-11-13 14:11:08 +01:00
dependabot[bot]
24d13479fe build(deps): Bump google.golang.org/grpc from 1.33.1 to 1.33.2 (#5635)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.33.1 to 1.33.2.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/grpc/grpc-go/releases">google.golang.org/grpc's releases</a>.</em></p>
<blockquote>
<h2>Release 1.33.2</h2>
<ul>
<li>protobuf: update all generated code to google.golang.org/protobuf (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/3932">#3932</a>)</li>
<li>xdsclient: populate error details for NACK (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/3975">#3975</a>)</li>
<li>internal/credentials: fix a bug and add one more helper function SPIFFEIDFromCert (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/3929">#3929</a>)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="56d63285d5"><code>56d6328</code></a> github: remove advancedtls examples test</li>
<li><a href="6396e4b7d7"><code>6396e4b</code></a> vet: ignore proto deprecation warnings</li>
<li><a href="0afe9d28d8"><code>0afe9d2</code></a> github: add Github Actions workflow for tests; support in vet.sh (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/4005">#4005</a>)</li>
<li><a href="8a0ca33b85"><code>8a0ca33</code></a> Change version to 1.33.2</li>
<li><a href="c1989b58a5"><code>c1989b5</code></a> protobuf: update all generated code to google.golang.org/protobuf (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/3932">#3932</a>)</li>
<li><a href="b205df69d4"><code>b205df6</code></a> xdsclient: populate error details for NACK (<a href="https://github-redirect.dependabot.com/grpc/grpc-go/issues/3975">#3975</a>)</li>
<li><a href="75e27683ed"><code>75e2768</code></a> internal/credentials: fix a bug and add one more helper function SPIFFEIDFrom...</li>
<li><a href="17493ac067"><code>17493ac</code></a> Change version to 1.33.2-dev</li>
<li>See full diff in <a href="https://github.com/grpc/grpc-go/compare/v1.33.1...v1.33.2">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.33.1&new-version=1.33.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2020-11-13 14:11:08 +01:00
dependabot[bot]
9c32ad4a02 build(deps): Bump google.golang.org/grpc from 1.32.0 to 1.33.1 (#5544)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.32.0 to 1.33.1.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.32.0...v1.33.1)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Erik Grinaker <erik@interchain.berlin>
2020-11-13 14:11:08 +01:00
dependabot[bot]
de0bef5db5 build(deps): Bump github.com/spf13/cobra from 1.1.0 to 1.1.1 (#5526)
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.1.0 to 1.1.1.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/spf13/cobra/releases">github.com/spf13/cobra's releases</a>.</em></p>
<blockquote>
<h2>v1.1.1</h2>
<ul>
<li><strong>Fix:</strong> yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See <a href="https://github-redirect.dependabot.com/spf13/cobra/pull/1259">spf13/cobra#1259</a> for context.</li>
<li><strong>Fix:</strong> correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See <a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1049">spf13/cobra#1049</a> for context.</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="86f8bfd7fe"><code>86f8bfd</code></a> fix manpage building with new go-md2man (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1255">#1255</a>)</li>
<li><a href="f32f4ef15b"><code>f32f4ef</code></a> Don't use yaml.v2 2.3.0 which has a breaking change (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1259">#1259</a>)</li>
<li>See full diff in <a href="https://github.com/spf13/cobra/compare/v1.1.0...v1.1.1">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/cobra&package-manager=go_modules&previous-version=1.1.0&new-version=1.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2020-11-13 14:11:08 +01:00
dependabot[bot]
0a4432baf5 build(deps): Bump github.com/prometheus/client_golang from 1.7.1 to 1.8.0 (#5515)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.7.1 to 1.8.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/prometheus/client_golang/releases">github.com/prometheus/client_golang's releases</a>.</em></p>
<blockquote>
<h2>1.8.0 / 2020-10-15</h2>
<ul>
<li>[CHANGE] API client: Use <code>time.Time</code> rather than <code>string</code> for timestamps in <code>RuntimeinfoResult</code>. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/777">#777</a></li>
<li>[FEATURE] Export <code>MetricVec</code> to facilitate implementation of vectors of custom <code>Metric</code> types. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/803">#803</a></li>
<li>[FEATURE API client: Support <code>/status/tsdb</code> endpoint. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/773">#773</a></li>
<li>[ENHANCEMENT] API client: Enable GET fallback on status code 501. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/802">#802</a></li>
<li>[ENHANCEMENT] Remove <code>Metric</code> references after reslicing to free up more memory. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/784">#784</a></li>
</ul>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md">github.com/prometheus/client_golang's changelog</a>.</em></p>
<blockquote>
<h2>1.8.0 / 2020-10-15</h2>
<ul>
<li>[CHANGE] API client: Use <code>time.Time</code> rather than <code>string</code> for timestamps in <code>RuntimeinfoResult</code>. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/777">#777</a></li>
<li>[FEATURE] Export <code>MetricVec</code> to facilitate implementation of vectors of custom <code>Metric</code> types. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/803">#803</a></li>
<li>[FEATURE API client: Support <code>/status/tsdb</code> endpoint. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/773">#773</a></li>
<li>[ENHANCEMENT] API client: Enable GET fallback on status code 501. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/802">#802</a></li>
<li>[ENHANCEMENT] Remove <code>Metric</code> references after reslicing to free up more memory. <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/784">#784</a></li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="47cfdc9bb8"><code>47cfdc9</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/806">#806</a> from prometheus/beorn7/release</li>
<li><a href="67f573aafe"><code>67f573a</code></a> Cut v1.8.0</li>
<li><a href="ded2474420"><code>ded2474</code></a> Update dependencies</li>
<li><a href="3d1759b4c6"><code>3d1759b</code></a> Run check for unused/missing Go packages only against latest Go version</li>
<li><a href="e6ea98bdda"><code>e6ea98b</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/803">#803</a> from prometheus/beorn7/vec</li>
<li><a href="85aa957f63"><code>85aa957</code></a> Export MetricVec (again)</li>
<li><a href="6007b2b5ca"><code>6007b2b</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/802">#802</a> from prometheus/beorn7/fallback</li>
<li><a href="64b4a9cf9d"><code>64b4a9c</code></a> API client: Enable fallback on status code 501, too</li>
<li><a href="65c5578b2d"><code>65c5578</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/prometheus/client_golang/issues/800">#800</a> from prometheus/beorn7/doc</li>
<li><a href="b54b73c7b1"><code>b54b73c</code></a> Remove spurious commas from links to the docs site</li>
<li>Additional commits viewable in <a href="https://github.com/prometheus/client_golang/compare/v1.7.1...v1.8.0">compare view</a></li>
</ul>
</details>
<br />

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/prometheus/client_golang&package-manager=go_modules&previous-version=1.7.1&new-version=1.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)

</details>
2020-11-13 14:11:08 +01:00
dependabot[bot]
0bdc76a78c build(deps): Bump github.com/spf13/cobra from 1.0.0 to 1.1.0 (#5505)
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.0.0 to 1.1.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/spf13/cobra/releases">github.com/spf13/cobra's releases</a>.</em></p>
<blockquote>
<h2>v1.1.0</h2>
<h2>Notable Changes</h2>
<ul>
<li>Extend Go completions and revamp zsh comp (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1070">#1070</a>)</li>
<li>Add completion for help command (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1136">#1136</a>)</li>
<li>Complete subcommands when TraverseChildren is set (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1171">#1171</a>)</li>
<li>Fix stderr printing functions (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/894">#894</a>)</li>
<li>fix: fish output redirection (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1247">#1247</a>)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/spf13/cobra/blob/master/CHANGELOG.md">github.com/spf13/cobra's changelog</a>.</em></p>
<blockquote>
<h1>Cobra Changelog</h1>
<h2>Pending</h2>
<ul>
<li>Fix man page doc generation - no auto generated tag when <code>cmd.DisableAutoGenTag = true</code> <a href="https://github.com/jpmcb">@jpmcb</a></li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="142dfb15a8"><code>142dfb1</code></a> Add example for making persistent flags required (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1135">#1135</a>)</li>
<li><a href="723d0c36fc"><code>723d0c3</code></a> Add tendermint and cosmos-sdk to the list of projects using cobra (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/855">#855</a>)</li>
<li><a href="b97b5ead31"><code>b97b5ea</code></a> fix: fish output redirection (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1247">#1247</a>)</li>
<li><a href="f64bfa1e08"><code>f64bfa1</code></a> Fix zsh completion not working on the first time in a shell session (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1237">#1237</a>)</li>
<li><a href="40d34bca1b"><code>40d34bc</code></a> Fix stderr printing functions (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/894">#894</a>)</li>
<li><a href="0bc8bfbe59"><code>0bc8bfb</code></a> Remove secondary go mod to prevent broken <code>go get</code> (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1233">#1233</a>)</li>
<li><a href="7f8e83d936"><code>7f8e83d</code></a> Modifying &quot;snake-case&quot; to &quot;kebab-case&quot; for clarity. (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1196">#1196</a>)</li>
<li><a href="8a39cb2614"><code>8a39cb2</code></a> Bug fix in README (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1199">#1199</a>)</li>
<li><a href="2a8d0f327d"><code>2a8d0f3</code></a> Adding Kool to list of projects using cobra (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1224">#1224</a>)</li>
<li><a href="6c06523c96"><code>6c06523</code></a> add arduino-cli to projects using cobra (<a href="https://github-redirect.dependabot.com/spf13/cobra/issues/1117">#1117</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/spf13/cobra/compare/v1.0.0...v1.1.0">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/cobra&package-manager=go_modules&previous-version=1.0.0&new-version=1.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2020-11-13 14:11:08 +01:00
dependabot[bot]
a2addecb3d build(deps): Bump github.com/golang/protobuf from 1.4.2 to 1.4.3 (#5506)
Bumps [github.com/golang/protobuf](https://github.com/golang/protobuf) from 1.4.2 to 1.4.3.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/golang/protobuf/releases">github.com/golang/protobuf's releases</a>.</em></p>
<blockquote>
<h2>v1.4.3</h2>
<p>Notable changes:</p>
<p>(<a href="https://github-redirect.dependabot.com/golang/protobuf/issues/1221">#1221</a>) jsonpb: Fix marshaling of Duration
(<a href="https://github-redirect.dependabot.com/golang/protobuf/issues/1210">#1210</a>) proto: convert integer to rune before converting to string</p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="4846b58453"><code>4846b58</code></a> jsonpb: Fix marshaling of Duration (<a href="https://github-redirect.dependabot.com/golang/protobuf/issues/1221">#1221</a>)</li>
<li><a href="91c84e0db1"><code>91c84e0</code></a> travis.yml: update tested versions of Go (<a href="https://github-redirect.dependabot.com/golang/protobuf/issues/1211">#1211</a>)</li>
<li><a href="3860b2764f"><code>3860b27</code></a> proto: convert integer to rune before converting to string (<a href="https://github-redirect.dependabot.com/golang/protobuf/issues/1210">#1210</a>)</li>
<li>See full diff in <a href="https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3">compare view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/golang/protobuf&package-manager=go_modules&previous-version=1.4.2&new-version=1.4.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)


</details>
2020-11-13 14:11:08 +01:00
Anton Kaliaev
54a0940e40 blockchain/v2: remove peers from the processor (#5607)
after they were pruned by the scheduler

Closes #5513
2020-11-05 16:55:11 +04:00
Anton Kaliaev
25fafb30b5 blockchain/v2: make the removal of an already removed peer a noop (#5553)
also, since multiple StopPeerForError calls may be executed in parallel,
only execute StopPeerForError once

Closes #5541
2020-11-05 14:48:31 +04:00
Erik Grinaker
59f3f63d33 test: fix various E2E test issues (#5576)
* Don't use state sync for nodes starting at initial height.
* Also remove stopped containers when cleaning up.
* Start nodes in order of startAt, mode, name to avoid full nodes starting before their seeds.
* Tweak network waiting to avoid halts caused by validator changes and perturbations.
* Disable most tests for seed nodes, which aren't always able to join consensus.
* Disable `blockchain/v2` due to known bugs.
2020-11-05 11:26:30 +01:00
Callum Waters
9d354c842e evidence: structs can independently form abci evidence (#5610) 2020-11-05 10:38:42 +01:00
Anton Kaliaev
70a62be5c6 light: run detector for sequentially validating light client (#5538) (#5601)
Closes #5445

Backport of #5538
2020-11-02 14:39:50 +04:00
Marko
ad4f54e9b2 privval: make response values non nullable (#5583)
make response values non nullable in privval

Does this need a changelog for master?

Closes: #5581

cc @tarcieri
2020-10-28 16:44:30 +01:00
Marko
0022779e07 ci: tests (#5577)
- use matrix builds to run multiple test jobs
- upload code coverage once not 4 times (produce more accurate codecov reports)
2020-10-28 15:12:54 +01:00
Marko
96dda8810d ci: add goreleaser (#5527)
Co-authored-by: Erik Grinaker <erik@interchain.berlin>
Co-authored-by: Alessio Treglia <alessio@tendermint.com>
2020-10-28 15:12:54 +01:00
Callum Waters
5cfe035362 evidence: don't send committed evidence and ignore inbound evidence that is already committed (#5574) 2020-10-28 09:10:53 +01:00
Callum Waters
4947333e67 evidence: don't gossip consensus evidence too soon (#5528)
and don't return errors on seeing the same evidence twice
2020-10-28 09:10:53 +01:00
Erik Grinaker
8329d12c18 abci/grpc: fix ordering of sync/async callback combinations (#5556)
Fixes #5540, fixes #2965. This is a hack that patches over the problem, but really the whole async handling in gRPC should be redesigned, as should ReqRes callback dispatch.
2020-10-26 22:44:57 +01:00
Erik Grinaker
f093d5837b test: disable E2E misbehaviors due to bugs (#5569)
Disables misbehaviors in E2E testnets due to failures caused by #5554 and #5560. Should be re-enabled once these are fixed.
2020-10-26 20:56:47 +01:00
Erik Grinaker
ceea64ec28 test: fix handling of start height in generated E2E testnets (#5563)
In #5488 the E2E testnet generator changed to setting explicit `StartAt` heights for initial nodes. This broke the runner, which expected all initial nodes to have `StartAt: 0`, as well as validator set scheduling in the generator. Testnet loading now normalizes initial nodes to have `StartAt: 0`.

This also tweaks waiting for misbehavior heights to only use an additional wait if there actually is any misbehavior in the testnet, and to output information when waiting.
2020-10-26 20:56:47 +01:00
Callum Waters
c4f1b2d7db block: fix max commit sig size (#5567) 2020-10-26 11:37:48 +01:00
Erik Grinaker
a0f08686fb github: only notify nightly E2E failures once (#5559) 2020-10-23 16:04:23 +02:00
Callum Waters
dacbfbe1fe test: add evidence e2e tests (#5488) 2020-10-23 16:04:23 +02:00
Erik Grinaker
75879ab1d7 test: tag E2E Docker resources and autoremove them (#5558)
Fixes #5555.
2020-10-23 16:04:23 +02:00
Erik Grinaker
8b4f0dba70 test: run remaining E2E testnets on run-multiple.sh failure (#5557)
Fixes #5542.
2020-10-23 16:04:23 +02:00
Erik Grinaker
2f72f553ac test: enable restart/kill perturbations in E2E tests (#5537)
When #5536 lands we can re-enable restart/kill perturbations in E2E tests.
2020-10-22 13:36:08 +02:00
Erik Grinaker
d113da01cd test: enable blockchain v2 in E2E testnet generator (#5533)
When #5499 and #5530 land, we can re-enable v2 in the E2E testnet generator (and thus the nightly E2E tests).
2020-10-22 13:36:08 +02:00
Erik Grinaker
b17b28a163 test: enable ABCI gRPC client in E2E testnets (#5521)
Once #5520 lands, we can re-enable gRPC ABCI protocol in the E2E testnets.
2020-10-22 13:36:08 +02:00
Erik Grinaker
6473f0178c test: tweak E2E tests for nightly runs (#5512) 2020-10-22 13:36:08 +02:00
Erik Grinaker
4e2e487c7a test: clean up E2E test volumes using a container (#5509) 2020-10-22 13:36:08 +02:00
Erik Grinaker
8ebb39eed6 github: rename e2e jobs (#5502) 2020-10-22 13:36:08 +02:00
Erik Grinaker
5e6e6315ad github: add nightly E2E testnet action (#5480) 2020-10-22 13:36:08 +02:00
Marko
9379bc92fd fix lint failures with 1.31 (#5489) 2020-10-22 13:36:08 +02:00
dependabot[bot]
51b8d3a153 build(deps): Bump technote-space/get-diff-action from v3 to v4 (#5485)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Marko Baricevic <marbar3778@yahoo.com>
2020-10-22 13:36:08 +02:00
dependabot[bot]
bf42bf0fd5 build(deps): Bump golangci/golangci-lint-action from v2.2.0 to v2.2.1 (#5486)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Marko <marbar3778@yahoo.com>
2020-10-22 13:36:08 +02:00
dependabot[bot]
cbdc089321 build(deps): Bump actions/cache from v2.1.1 to v2.1.2 (#5487)
Bumps [actions/cache](https://github.com/actions/cache) from v2.1.1 to v2.1.2.
- [Release notes](https://github.com/actions/cache/releases)
- [Commits](https://github.com/actions/cache/compare/v2.1.1...d1255ad9362389eac595a9ae406b8e8cb3331f16)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2020-10-22 13:36:08 +02:00
Erik Grinaker
f9bfb40d53 test/e2e: add random testnet generator (#5479)
Closes #5291. Adds a randomized testnet generator. Nightly CI job will be submitted separately. A few of the testnets can be a bit flaky, even after disabling known-faulty behavior and making minor tweaks, and the larger networks may be too resource-intensive to run in CI - this will be optimized separately.
2020-10-22 13:36:08 +02:00
Marko
e7568f9e0c ci/e2e: avoid running job when no go files are touched (#5471) 2020-10-22 13:36:08 +02:00
Erik Grinaker
3a4a6ae9ac test: add E2E test for node peering (#5465)
This was a missing test case from the old P2P tests removed in #5453, which makes sure that all nodes are able to peer with each other regardless of how they discover peers.

Fixes #2795, since the default CI testnet uses a combination of (partially meshed) persistent peers and PEX-based seed nodes.
2020-10-22 13:36:08 +02:00
Erik Grinaker
4462e2697c test: remove P2P tests (#5453) 2020-10-22 13:36:08 +02:00
Erik Grinaker
0003aabe65 circleci: remove Gitian reproducible_builds job (#5462) 2020-10-22 13:36:08 +02:00
Erik Grinaker
4b3565fcaa test: add GitHub action for end-to-end tests (#5452)
Partial fix for #5291.
2020-10-22 13:36:08 +02:00
Erik Grinaker
64b0f5b363 test: add basic end-to-end test cases (#5450)
Partial fix for #5291.

This adds a basic set of test cases for core network invariants. Although small, it is sufficient to replace and extend the current set of P2P tests. Further test cases can be added later.
2020-10-22 13:36:08 +02:00
Erik Grinaker
a58454e788 test: add end-to-end testing framework (#5435)
Partial fix for #5291. For details, see [README.md](https://github.com/tendermint/tendermint/blob/erik/e2e-tests/test/e2e/README.md) and [RFC-001](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-end-to-end-testing.md).

This only includes a single test case under `test/e2e/tests/`, as a proof of concept - additional test cases will be submitted separately. A randomized testnet generator will also be submitted separately, there a currently just a handful of static testnets under `test/e2e/networks/`. This will eventually replace the current P2P tests and run in CI.
2020-10-22 13:36:08 +02:00
QuantumExplorer
1b733ea28d fix a few typos (#5402) 2020-10-22 13:36:08 +02:00
Marko
41ab199378 blockchain/v1: add noBlockResponse handling (#5401)
## Description

Add simple `NoBlockResponse` handling to blockchain reactor v1. I tested before and after with erik's e2e testing and was not able to reproduce the inability to sync after the changes were applied

Closes: #5394
2020-10-22 13:08:12 +02:00
Marko
0f3b49a915 ci: docker remove circleci and add github action (#5551) 2020-10-22 12:39:27 +02:00
Anton Kaliaev
55ff694aa6 light/rpc: fix ABCIQuery (#5375)
Closes #5106
2020-10-22 12:17:53 +02:00
Anton Kaliaev
406dd74220 light: cross-check the very first header (#5429)
Closes #5428
2020-10-22 12:17:53 +02:00
Callum Waters
c374fc010a cli: light home dir should default to where the full node default is (#5392) 2020-10-22 12:17:53 +02:00
Callum Waters
3822ab924e simplify commit and validators rpc calls (#5393) 2020-10-22 12:17:53 +02:00
Erik Grinaker
7c17fa115a consensus: open target WAL as read/write during autorepair (#5536) (#5547)
Fixes #5422. That turned out to be a whole lot easier than expected.

Backport of #5536.
2020-10-21 18:24:38 +02:00
Anton Kaliaev
020edbc11d blockchain/v2: fix panic: processed height X+1 but expected height X (#5530)
Before: scheduler receives psBlockProcessed event, but does not mark block as processed because peer timed out (or was removed for other reasons) and all associated blocks were rescheduled.

After: scheduler receives psBlockProcessed event and marks block as processed in any case (even if peer who provided this block errors).

Closes #5387
2020-10-21 13:28:41 +04:00
Anton Kaliaev
79d535dd67 blockchain/v2: fix "panic: duplicate block enqueued by processor" (#5499)
When a peer is stopped due to some network issue, the Reactor calls scheduler#handleRemovePeer, which removes the peer from the scheduler. BUT the peer stays in the processor, which sometimes could lead to "duplicate block enqueued by processor" panic WHEN the same block is requested by the scheduler again from a different peer. The solution is to return scPeerError, which will be propagated to the processor. The processor will clean up the blocks associated with the peer in purgePeer.

Closes #5513, #5517
2020-10-21 13:26:20 +04:00
Erik Grinaker
29ca7de63c abci/grpc: return async responses in order (#5520) (#5531)
Fixes #5439. This is really a workaround for #5519 (unless we require async implementations to return ordered responses, but that kind of defeats the purpose of having an async API).
2020-10-20 10:56:48 +02:00
Marko
6f908eb814 crypto: add in secp256k1 support (#5500)
Secp256k1 was removed in the protobuf migration, this pr adds it back in order to provide this functionality for users (band)

Closes: #5495
2020-10-19 10:07:51 +02:00
Erik Grinaker
b3238cdcd9 statesync: check all necessary heights when adding snapshot to pool (#5516) (#5518)
Fixes #5511.
2020-10-16 14:39:56 +02:00
Tess Rinearson
bd1f43d793 changelog: prepare changelog for RC5 (#5494)
* changelog: prepare changelog for RC5

* Update CHANGELOG.md

Co-authored-by: Marko <marbar3778@yahoo.com>

* Update CHANGELOG.md

Co-authored-by: Marko <marbar3778@yahoo.com>

Co-authored-by: Marko <marbar3778@yahoo.com>
2020-10-13 20:00:59 +02:00
Marko
09982ae407 backport block size fixes (#5492)
* mempool: length prefix txs when getting them from mempool (#5483)

* correctly calculate evidence data size (#5482)

* block: use commit sig size instead of vote size (#5490)

* tx: reduce function to one parameter (#5493)
2020-10-13 18:07:54 +02:00
Callum Waters
7d5d417dc9 evidence: use bytes instead of quantity to limit size (#5449)(#5476) 2020-10-08 14:38:11 +02:00
Marko
dac18d73a7 fix RPC blockresults return (#5459) (#5463) 2020-10-07 12:01:40 +02:00
Tess Rinearson
383bc5337f changelog: add missing date to v0.33.5 release, fix indentation (#5454) (#5455)
I forgot to add the date when we cut 0.33.5. This fixes that. It also fixes a header indentation issue for 0.33.8.
2020-10-05 12:27:58 +02:00
Erik Grinaker
e74176ad1a privval: fix ping message encoding (#5442)
Fixes #5371.
2020-10-01 17:02:11 +02:00
Callum Waters
52994aa2a9 consensus: check block parts don't exceed maximum block bytes (#5436) 2020-10-01 16:11:54 +02:00
Erik Grinaker
6149f21cd6 privval: allow passing options to NewSignerDialerEndpoint (#5434) (#5437)
Required for #5291 to set timeouts for remote signers.
2020-10-01 16:06:34 +02:00
Erik Grinaker
1a2cc933a0 config: set statesync.rpc_servers when generating config file (#5433) (#5438)
Required for #5291, to generate configuration files with state sync RPC servers.
2020-10-01 15:55:57 +02:00
Anton Kaliaev
e0f686ccac mempool: fix nil pointer dereference (#5412)
previously, the second next could return nil, which would be the reason
for panic on line 275:

memTx := next.Value.(*mempoolTx)

Closes #5408
2020-09-30 08:56:01 +04:00
441 changed files with 21514 additions and 26740 deletions

168
.circleci/config.yml Normal file
View File

@@ -0,0 +1,168 @@
version: 2.1
executors:
golang:
docker:
- image: tendermintdev/docker-tendermint-build
working_directory: /go/src/github.com/tendermint/tendermint
environment:
GOBIN: /tmp/bin
release:
machine: true
docs:
docker:
- image: tendermintdev/docker-website-deployment
environment:
AWS_REGION: us-east-1
commands:
run_test:
parameters:
script_path:
type: string
steps:
- attach_workspace:
at: /tmp/bin
- restore_cache:
name: "Restore source code cache"
keys:
- go-src-v1-{{ .Revision }}
- checkout
- restore_cache:
name: "Restore go modules cache"
keys:
- go-mod-v1-{{ checksum "go.sum" }}
- run:
name: "Running test"
command: |
bash << parameters.script_path >>
jobs:
setup_dependencies:
executor: golang
steps:
- checkout
- restore_cache:
name: "Restore go modules cache"
keys:
- go-mod-v1-{{ checksum "go.sum" }}
- run:
command: |
mkdir -p /tmp/bin
- run:
name: Cache go modules
command: make go-mod-cache
- run:
name: tools
command: make tools
- run:
name: "Build binaries"
command: make install install_abci
- save_cache:
name: "Save go modules cache"
key: go-mod-v1-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- save_cache:
name: "Save source code cache"
key: go-src-v1-{{ .Revision }}
paths:
- ".git"
- persist_to_workspace:
root: "/tmp/bin"
paths:
- "."
deploy_docs:
executor: docs
steps:
- checkout
- run:
name: "Pull versions"
command: git fetch origin v0.32 v0.33
- run:
name: "Build docs"
command: make build-docs
- run:
name: "Sync to S3"
command: make sync-docs
prepare_build:
executor: golang
steps:
- restore_cache:
name: "Restore source code cache"
keys:
- go-src-v1-{{ .Revision }}
- checkout
- run:
name: Get next release number
command: |
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
echo "Last tag: ${LAST_TAG}"
if [ -z "${LAST_TAG}" ]; then
export LAST_TAG="${CIRCLE_BRANCH}"
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
fi
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
echo "Next tag: ${NEXT_TAG}"
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
- run:
name: Build dependencies
command: make tools
- persist_to_workspace:
root: .
paths:
- "release-version.source"
- save_cache:
key: v2-release-deps-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
# # Test RPC implementation against the swagger documented specs
# contract_tests:
# working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
# machine:
# image: circleci/classic:latest
# environment:
# GOBIN: /home/circleci/.go_workspace/bin
# GOPATH: /home/circleci/.go_workspace/
# GOOS: linux
# GOARCH: amd64
# parallelism: 1
# steps:
# - checkout
# - run:
# name: Test RPC endpoints against swagger documentation
# command: |
# set -x
# export PATH=~/.local/bin:$PATH
# # install node and dredd
# ./scripts/get_nodejs.sh
# # build the binaries with a proper version of Go
# docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
# # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
# go get github.com/snikch/goodman/cmd/goodman
# make contract-tests
workflows:
version: 2
docs:
jobs:
- deploy_docs:
context: tendermint-docs
filters:
branches:
only:
- master
tags:
only:
- /^v.*/
- deploy_docs:
context: tendermint-docs-staging
filters:
branches:
only:
- docs-staging
# - contract_tests:
# requires:
# - setup_dependencies

23
.github/CODEOWNERS vendored
View File

@@ -1,25 +1,10 @@
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
# Everything goes through the following "global owners" by default.
# Everything goes through the following "global owners" by default.
# Unless a later match takes precedence, these three will be
# requested for review when someone opens a PR.
# requested for review when someone opens a PR.
# Note that the last matching pattern takes precedence, so
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
# Overrides for tooling packages
.github/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
DOCKER/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
# Overrides for core Tendermint packages
abci/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
evidence/ @cmwaters @ebuchman @melekes @tessr
light/ @cmwaters @melekes @ebuchman @tessr
# Overrides for docs
*.md @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
docs/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
* @alexanderbez @cmwaters @ebuchman @marbar3778 @tessr @tychoish

View File

@@ -23,6 +23,5 @@ updates:
reviewers:
- melekes
- tessr
- erikgrinaker
labels:
- T:dependencies

View File

@@ -44,7 +44,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.15"
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
@@ -66,7 +66,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.15"
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
@@ -78,10 +78,6 @@ jobs:
with:
name: "${{ github.sha }}-${{ matrix.part }}"
if: env.GIT_DIFF
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15
- name: test & coverage report creation
run: |
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
@@ -121,7 +117,7 @@ jobs:
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v1.2.1
- uses: codecov/codecov-action@v1.0.13
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -1,32 +0,0 @@
name: Documentation
# This job builds and deploys documentation to github pages.
# It runs on every push to master, and can be manually triggered.
on:
workflow_dispatch: # allow running workflow manually
push:
branches:
- master
jobs:
build-and-deploy:
runs-on: ubuntu-latest
container:
image: tendermintdev/docker-website-deployment
steps:
- name: Checkout 🛎️
uses: actions/checkout@v2.3.1
with:
persist-credentials: false
fetch-depth: 0
- name: Install and Build 🔧
run: |
apk add rsync
make build-docs
- name: Deploy 🚀
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: ~/output

76
.github/workflows/e2e-nightly-34x.yml vendored Normal file
View File

@@ -0,0 +1,76 @@
# Runs randomly generated E2E testnets nightly
# on the 0.34.x release branch
# !! If you change something in this file, you probably want
# to update the e2e-nightly-master workflow as well!
name: e2e-nightly-34x
on:
workflow_dispatch: # allow running workflow manually, in theory
schedule:
- cron: '0 2 * * *'
jobs:
e2e-nightly-test:
# Run parallel jobs for the listed testnet groups (must match the
# ./build/generator -g flag)
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
with:
go-version: '^1.15.4'
- uses: actions/checkout@v2
with:
ref: 'v0.34.x'
- name: Build
working-directory: test/e2e
# Run make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker generator runner
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 4 -d networks/nightly
- name: Run testnets in group ${{ matrix.group }}
working-directory: test/e2e
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
e2e-nightly-fail:
needs: e2e-nightly-test
if: ${{ failure() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@e9db0ef
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':skull:'
SLACK_COLOR: danger
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':white_check_mark:'
SLACK_COLOR: good
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
SLACK_FOOTER: ''

View File

@@ -1,20 +1,22 @@
# Runs randomly generated E2E testnets nightly.
name: e2e-nightly
# Runs randomly generated E2E testnets nightly on master
# !! If you change something in this file, you probably want
# to update the e2e-nightly-34x workflow as well!
name: e2e-nightly-master
on:
workflow_dispatch: # allow running workflow manually
schedule:
- cron: '0 2 * * *'
jobs:
e2e-nightly-test:
e2e-nightly-test-2:
# Run parallel jobs for the listed testnet groups (must match the
# ./build/generator -g flag)
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03']
# todo: expand to multiple versions after 0.35 release
branch: ['master', 'v0.34.x']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
@@ -23,8 +25,6 @@ jobs:
go-version: '1.15'
- uses: actions/checkout@v2
with:
ref: ${{ matrix.branch}}
- name: Build
working-directory: test/e2e
@@ -40,8 +40,8 @@ jobs:
working-directory: test/e2e
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
e2e-nightly-fail:
needs: e2e-nightly-test
e2e-nightly-fail-2:
needs: e2e-nightly-test-2
if: ${{ failure() }}
runs-on: ubuntu-latest
steps:
@@ -53,5 +53,21 @@ jobs:
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':skull:'
SLACK_COLOR: danger
SLACK_MESSAGE: Nightly E2E tests failed
SLACK_MESSAGE: Nightly E2E tests failed on master
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test-2
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':white_check_mark:'
SLACK_COLOR: good
SLACK_MESSAGE: Nightly E2E tests passed on master
SLACK_FOOTER: ''

View File

@@ -2,7 +2,6 @@ name: e2e
# Runs the CI end-to-end test network on all pushes to master or release branches
# and every pull request, but only if any Go files have been changed.
on:
workflow_dispatch: # allow running workflow manually
pull_request:
push:
branches:
@@ -16,7 +15,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.15'
go-version: '^1.15.4'
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:

View File

@@ -44,17 +44,31 @@ jobs:
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
continue-on-error: true
- name: Archive crashers
uses: actions/upload-artifact@v2
with:
name: crashers
path: test/fuzz/**/crashers
retention-days: 1
- name: Archive suppressions
uses: actions/upload-artifact@v2
with:
name: suppressions
path: test/fuzz/**/suppressions
retention-days: 1
- name: Set crashers count
working-directory: test/fuzz
run: echo "::set-output name=crashers-count::$(find . -type d -name "crashers" | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
id: set-crashers-count
outputs:
crashers_count: ${{ steps.set-crashers-count.outputs.crashers-count }}
crashers-count: ${{ steps.set-crashers-count.outputs.count }}
fuzz-nightly-fail:
needs: fuzz-nightly-test
if: ${{ needs.set-crashers-count.outputs.crashers-count != 0 }}
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack if any crashers

View File

@@ -7,6 +7,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.7
with:
folder-path: "docs"

View File

@@ -11,7 +11,7 @@ jobs:
golangci:
name: golangci-lint
runs-on: ubuntu-latest
timeout-minutes: 8
timeout-minutes: 4
steps:
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
@@ -20,10 +20,10 @@ jobs:
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v2.3.0
- uses: golangci/golangci-lint-action@v2.5.1
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.31
version: v1.38
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

View File

@@ -27,5 +27,6 @@ jobs:
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_MD: true
MARKDOWN_CONFIG_FILE: .markdownlint.yml
VALIDATE_OPAENAPI: true
VALIDATE_YAML: true

View File

@@ -2,7 +2,6 @@ name: Protobuf
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a .proto file has been modified
on:
workflow_dispatch: # allow running workflow manually
pull_request:
paths:
- "**.proto"

View File

@@ -2,8 +2,6 @@ name: "Release"
on:
push:
branches:
- "RC[0-9]/**"
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
@@ -18,21 +16,12 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: '1.15'
go-version: '^1.15.4'
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
if: startsWith(github.ref, 'refs/tags/')
- name: Build
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
- name: Release
uses: goreleaser/goreleaser-action@v2
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md

View File

@@ -25,7 +25,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.15"
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
@@ -36,7 +36,7 @@ jobs:
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -44,7 +44,7 @@ jobs:
${{ runner.os }}-go-
if: env.GIT_DIFF
# Cache binaries for use by other jobs
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -57,7 +57,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.15"
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
@@ -65,14 +65,14 @@ jobs:
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -89,7 +89,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.15"
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
@@ -97,14 +97,14 @@ jobs:
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -120,7 +120,7 @@ jobs:
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.15"
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
@@ -128,14 +128,14 @@ jobs:
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.3
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary

59
.gitignore vendored
View File

@@ -1,40 +1,51 @@
*.bak
*.iml
*.log
*.swo
*.swp
*/.glide
*/vendor
.DS_Store
*.swo
.bak
.idea/
.revision
.tendermint
.tendermint-lite
.terraform
.vagrant
.vendor-new/
.vscode/
abci-cli
addrbook.json
artifacts/*
*.bak
.DS_Store
build/*
rpc/test/.tendermint
.tendermint
remote_dump
.revision
vendor
.vagrant
test/e2e/build
test/maverick/maverick
test/e2e/networks/*/
test/p2p/data/
test/logs
coverage.txt
docs/.vuepress/dist
docs/_build
docs/dist
docs/.vuepress/dist
*.log
abci-cli
docs/node_modules/
index.html.md
libs/pubsub/query/fuzz_test/output
profile\.out
remote_dump
rpc/test/.tendermint
scripts/cutWALUntil/cutWALUntil
scripts/wal2json/wal2json
scripts/cutWALUntil/cutWALUntil
.idea/
*.iml
.vscode/
libs/pubsub/query/fuzz_test/output
shunit2
.tendermint-lite
addrbook.json
*/vendor
.vendor-new/
*/.glide
.terraform
terraform.tfstate
terraform.tfstate.backup
terraform.tfstate.d
profile\.out
test/e2e/build
test/e2e/networks/*/
test/logs

View File

@@ -1,11 +1,11 @@
project_name: Tendermint
project_name: tendermint
env:
# Require use of Go modules.
- GO111MODULE=on
builds:
- id: "tendermint"
- id: "Tendermint"
main: ./cmd/tendermint/main.go
ldflags:
- -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }}
@@ -26,11 +26,3 @@ checksum:
release:
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
archives:
- files:
- LICENSE
- README.md
- UPGRADING.md
- SECURITY.md
- CHANGELOG.md

8
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,8 @@
{
"protoc": {
"options": [
"--proto_path=${workspaceRoot}/proto",
"--proto_path=${workspaceRoot}/third_party/proto"
]
}
}

View File

@@ -1,21 +1,186 @@
# Changelog
## v0.34.3
## v0.34.12
*January 19, 2021*
Special thanks to external contributors on this release: @JayT106.
This release includes a fix for a high-severity security vulnerability.
More information on this vulnerability will be released on January 26, 2021
and this changelog will be updated.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
It also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
### FEATURES
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
### IMPROVEMENTS
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
### BUG FIXES
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
## v0.34.11
*June 18, 2021*
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
adding two new parameters to the state sync config.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- Apps
- [Version] [\#6494](https://github.com/tendermint/tendermint/issues/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
### IMPROVEMENTS
- [statesync] [\#6566](https://github.com/tendermint/tendermint/issues/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [statesync] [\#6378](https://github.com/tendermint/tendermint/issues/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
- [statesync] [\#6582](https://github.com/tendermint/tendermint/issues/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
### BUG FIXES
- [evidence] [\#6375](https://github.com/tendermint/tendermint/issues/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
## v0.34.10
*April 14, 2021*
This release fixes a bug where peers would sometimes try to send messages
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
this issue!
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
## v0.34.9
*April 8, 2021*
This release fixes a moderate severity security issue, Security Advisory Alderfly,
which impacts all networks that rely on Tendermint light clients.
Further details will be released once networks have upgraded.
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
Special thanks to our external contributors on this release: @gchaincl
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- Go API
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
### FEATURES
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
### BUG FIXES
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
## v0.34.8
*February 25, 2021*
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
introduces changes that should mean the logs are much, much quieter. 🎉
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### IMPROVEMENTS
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
### BUG FIXES
- [abci] [\#6124](https://github.com/tendermint/tendermint/issues/6124) Fixes a panic condition during callback execution in `ReCheckTx` during high tx load. (@alexanderbez)
## v0.34.7
*February 18, 2021*
This release fixes a downstream security issue which impacts Cosmos SDK
users who are:
* Using Cosmos SDK v0.40.0 or later, AND
* Running validator nodes, AND
* Using the file-based `FilePV` implementation for their consensus keys
Users who fulfill all the above criteria were susceptible to leaking
private key material in the logs. All other users are unaffected.
The root cause was a discrepancy
between the Tendermint Core (untyped) logger and the Cosmos SDK (typed) logger:
Tendermint Core's logger automatically stringifies Go interfaces whenever possible;
however, the Cosmos SDK's logger uses reflection to log the fields within a Go interface.
The introduction of the typed logger meant that previously un-logged fields within
interfaces are now sometimes logged, including the private key material inside the
`FilePV` struct.
Tendermint Core v0.34.7 fixes this issue; however, we strongly recommend that all validators
use remote signer implementations instead of `FilePV` in production.
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
shout-out to @marbar3778 for diagnosing it quickly.
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [evidence] [N/A] Use correct source of evidence time (@cmwaters)
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
## v0.34.6
*February 18, 2021*
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to build tooling problems._
## v0.34.4
*February 11, 2021*
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
All Tendermint clients are recommended to upgrade.
Thank you to our friends at Crypto.com for the initial report of this memory leak!
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
## v0.34.3
*January 19, 2021*
This release includes a fix for a high-severity security vulnerability,
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
## v0.34.2
@@ -109,14 +274,14 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
- `MaxBatchBytes` new config setting defines the max size of one batch.
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
- Blockchain Protocol
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
@@ -175,7 +340,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
@@ -213,7 +378,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)

View File

@@ -1,6 +1,6 @@
# Unreleased Changes
## vX.X
## v0.34.13
Special thanks to external contributors on this release:
@@ -9,53 +9,18 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
### BREAKING CHANGES
- CLI/RPC/Config
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- Apps
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- P2P Protocol
- Go API
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- Blockchain Protocol
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
### FEATURES
### IMPROVEMENTS
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
### BUG FIXES
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
### BUG FIXES

View File

@@ -106,40 +106,22 @@ specify exactly the dependency you want to update, eg.
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
There are two ways to generate your proto stubs.
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
## Vagrant
### Installation Instructions
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
hacking Tendermint with the commands below.
To install `protoc`, download an appropriate release (<https://github.com/protocolbuffers/protobuf>) and then move the provided binaries into your PATH (follow instructions in README included with the download).
To install `gogoproto`, do the following:
NOTE: In case you installed Vagrant in 2017, you might need to run
`vagrant box update` to upgrade to the latest `ubuntu/xenial64`.
```sh
go get github.com/gogo/protobuf/gogoproto
cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things
make install
```
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
### Visual Studio Code
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
```json
{
"protoc": {
"options": [
"--proto_path=${workspaceRoot}/proto",
"--proto_path=${workspaceRoot}/third_party/proto"
]
}
}
vagrant up
vagrant ssh
make test
```
## Changelog
@@ -247,36 +229,6 @@ Each PR should have one commit once it lands on `master`; this can be accomplish
#### Major Release
This major release process assumes that this release was preceded by release candidates.
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
1. Start on the latest RC branch (`RCx/vX.X.0`).
2. Run integration tests.
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
- "Squash" changes from the changelog entries for the RCs into a single entry,
and add all changes included in `CHANGELOG_PENDING.md`.
(Squashing includes both combining all entries, as well as removing or simplifying
any intra-RC changes. It may also help to alphabetize the entries by package name.)
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all PRs
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
or other upgrading flows.
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
- `git checkout RCx/vX.X.0`
- `git checkout -b release/vX.X.0`
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
- `git push origin vX.X.0`
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
new major release series.
##### Major Release (from `master`)
1. Start on `master`
2. Run integration tests (see `test_integrations` in Makefile)
3. Prepare release in a pull request against `master` (to be squash merged):
@@ -284,10 +236,11 @@ If there were no release candidates, and you'd like to cut a major release direc
had release candidates, squash all the RC updates into one
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- Reset the `CHANGELOG_PENDING.md`
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Make sure all significant breaking changes are covered in `UPGRADING.md`
@@ -295,39 +248,45 @@ If there were no release candidates, and you'd like to cut a major release direc
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
5. Update the `CHANGELOG.md` file on master with the releases changelog.
5. Update the changelog.md file on master with the releases changelog.
6. Delete any RC branches and tags for this release (if applicable)
#### Minor Release (Point Releases)
#### Minor Release
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
Minor releases are done differently from major releases: They are built off of long-lived release candidate branches, rather than from master.
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
To create a minor release:
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
1. Checkout the long-lived release candidate branch: `git checkout rcX/vX.X.X`
2. Run integration tests: `make test_integrations`
3. Check out a new branch and prepare the release:
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- Reset the `CHANGELOG_PENDING.md`
- Bump the ABCI version number, if necessary.
(Note that ABCI follows semver, and that ABCI versions are the only versions
which can change during minor releases, and only field additions are valid minor changes.)
3. Prepare the release:
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
- run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- reset the `CHANGELOG_PENDING.md`
- bump P2P and block protocol versions in `version.go`, if necessary
- bump ABCI protocol version in `version.go`, if necessary
- make sure all significant breaking changes are covered in `UPGRADING.md`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes that will land them back on `vX.X.x`
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
4. Create a release branch `release/vX.X.x` off the release candidate branch:
- `git checkout -b release/vX.X.x`
- `git push -u origin release/vX.X.x`
- Note that all branches prefixed with `release` are protected once pushed. You will need admin help to make any changes to the branch.
5. Once the release branch has been approved, make sure to pull it locally, then push a tag.
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
- Remove all `R:minor` labels from the pull requests that were included in the release.
- Do not merge the backport branch into master.
- Do not merge the release branch into master.
7. Delete the former long lived release candidate branch once the release has been made.
8. Create a new release candidate branch to be used for the next release.
#### Backport Release
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
2. Cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
3. Follow steps 2 and 3 from [Major Release](#major-release)
4. Push changes to release/vX.X.X branch
5. Open a PR against the existing vX.X branch
#### Release Candidates
@@ -351,12 +310,86 @@ have distinct names from the tags/release names.)
## Testing
All repos should be hooked up to [CircleCI](https://circleci.com/).
### Unit tests
If they have `.go` files in the root directory, they will be automatically
tested by circle using `go test -v -race ./...`. If not, they will need a
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
includes its continuous integration status using a badge in the `README.md`.
Unit tests are located in `_test.go` files as directed by [the Go testing
package](https://golang.org/pkg/testing/). If you're adding or removing a
function, please check there's a `TestType_Method` test for it.
Run: `make test`
### Integration tests
Integration tests are also located in `_test.go` files. What differentiates
them is a more complicated setup, which usually involves setting up two or more
components.
Run: `make test_integrations`
### End-to-end tests
End-to-end tests are used to verify a fully integrated Tendermint network.
See [README](./test/e2e/README.md) for details.
Run:
```sh
cd test/e2e && \
make && \
./build/runner -f networks/ci.toml
```
### Maverick
**If you're changing the code in `consensus` package, please make sure to
replicate all the changes in `./test/maverick/consensus`**. Maverick is a
byzantine node used to assert that the validator gets punished for malicious
behavior.
See [README](./test/maverick/README.md) for details.
### Model-based tests (ADVANCED)
*NOTE: if you're just submitting your first PR, you won't need to touch these
most probably (99.9%)*.
For components, that have been [formally
verified](https://en.wikipedia.org/wiki/Formal_verification) using
[TLA+](https://en.wikipedia.org/wiki/TLA%2B), it may be possible to generate
tests using a combination of the [Apalache Model
Checker](https://apalache.informal.systems/) and [tendermint-rs testgen
util](https://github.com/informalsystems/tendermint-rs/tree/master/testgen).
Now, I know there's a lot to take in. If you want to learn more, check out [
this video](https://www.youtube.com/watch?v=aveoIMphzW8) by Andrey Kupriyanov
& Igor Konnov.
At the moment, we have model-based tests for the light client, located in the
`./light/mbt` directory.
Run: `cd light/mbt && go test`
### Fuzz tests (ADVANCED)
*NOTE: if you're just submitting your first PR, you won't need to touch these
most probably (99.9%)*.
[Fuzz tests](https://en.wikipedia.org/wiki/Fuzzing) can be found inside the
`./test/fuzz` directory. See [README.md](./test/fuzz/README.md) for details.
Run: `cd test/fuzz && make fuzz-{PACKAGE-COMPONENT}`
### Jepsen tests (ADVANCED)
*NOTE: if you're just submitting your first PR, you won't need to touch these
most probably (99.9%)*.
[Jepsen](http://jepsen.io/) tests are used to verify the
[linearizability](https://jepsen.io/consistency/models/linearizable) property
of the Tendermint consensus. They are located in a separate repository
-> <https://github.com/tendermint/jepsen>. Please refer to its README for more
information.
### RPC Testing
@@ -369,4 +402,8 @@ make build-linux build-contract-tests-hooks
make contract-tests
```
This command will popup a network and check every endpoint against what has been documented
**WARNING: these are currently broken due to <https://github.com/apiaryio/dredd>
not supporting complete OpenAPI 3**.
This command will popup a network and check every endpoint against what has
been documented.

View File

@@ -49,7 +49,7 @@ ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain
COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["start"]
CMD ["node"]
# Expose the data directory as a volume since there's mutable state in there
VOLUME [ "$TMHOME" ]

View File

@@ -32,7 +32,7 @@ A quick example of a built-in app and Tendermint core in one container.
```sh
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy-app=kvstore
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
```
## Local cluster

View File

@@ -6,11 +6,11 @@ if [ ! -d "$TMHOME/config" ]; then
tendermint init
sed -i \
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
-e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \
-e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \
-e 's/^addr-book-strict\s*=.*/addr-book-strict = false/' \
-e 's/^timeout-commit\s*=.*/timeout-commit = "500ms"/' \
-e 's/^index-all-tags\s*=.*/index-all-tags = true/' \
-e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \
-e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \
-e 's/^index_all_tags\s*=.*/index_all_tags = true/' \
-e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \
-e 's/^prometheus\s*=.*/prometheus = true/' \
"$TMHOME/config/config.toml"

View File

@@ -1,7 +1,5 @@
#!/usr/bin/make -f
PACKAGES=$(shell go list ./...)
BUILDDIR ?= $(CURDIR)/build
OUTPUT?=build/tendermint
BUILD_TAGS?=tendermint
@@ -58,25 +56,20 @@ LD_FLAGS += $(LDFLAGS)
all: check build test install
.PHONY: all
# The below include contains the tools.
include tools/Makefile
include test/Makefile
include tests.mk
###############################################################################
### Build Tendermint ###
### Build Tendermint ###
###############################################################################
build: $(BUILDDIR)/
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(BUILDDIR)/ ./cmd/tendermint/
build:
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/tendermint/
.PHONY: build
install:
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
.PHONY: install
$(BUILDDIR)/:
mkdir -p $@
###############################################################################
### Protobuf ###
###############################################################################
@@ -85,19 +78,10 @@ proto-all: proto-gen proto-lint proto-check-breaking
.PHONY: proto-all
proto-gen:
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
## Note the $< here is substituted for the %.proto
## Note the $@ here is substituted for the %.pb.go
@sh scripts/protocgen.sh
.PHONY: proto-gen
proto-gen-docker:
@docker pull -q tendermintdev/docker-build-proto
@echo "Generating Protobuf files"
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
.PHONY: proto-gen-docker
.PHONY: proto-gen
proto-lint:
@$(DOCKER_BUF) check lint --error-format=json
@@ -128,27 +112,6 @@ install_abci:
@go install -mod=readonly ./abci/cmd/...
.PHONY: install_abci
###############################################################################
### Privval Server ###
###############################################################################
build_privval_server:
@go build -mod=readonly -o $(BUILDDIR)/ -i ./cmd/priv_val_server/...
.PHONY: build_privval_server
generate_test_cert:
# generate self signing ceritificate authority
@certstrap init --common-name "root CA" --expires "20 years"
# generate server cerificate
@certstrap request-cert -cn server -ip 127.0.0.1
# self-sign server cerificate with rootCA
@certstrap sign server --CA "root CA"
# generate client cerificate
@certstrap request-cert -cn client -ip 127.0.0.1
# self-sign client cerificate with rootCA
@certstrap sign client --CA "root CA"
.PHONY: generate_test_cert
###############################################################################
### Distribution ###
###############################################################################
@@ -177,7 +140,7 @@ draw_deps:
get_deps_bin_size:
@# Copy of build recipe with additional flags to perform binary size analysis
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(BUILDDIR)/ ./cmd/tendermint/ 2>&1))
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
.PHONY: get_deps_bin_size
@@ -221,23 +184,31 @@ DESTINATION = ./index.html.md
###############################################################################
### Documentation ###
###############################################################################
# todo remove once tendermint.com DNS is solved
build-docs:
@cd docs && \
while read -r branch path_prefix; do \
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
mkdir -p ~/output/$${path_prefix} ; \
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
cp ~/output/$${path_prefix}/index.html ~/output ; \
cd docs && \
while read p; do \
(git checkout $${p} . && npm install && VUEPRESS_BASE="/$${p}/" npm run build) ; \
mkdir -p ~/output/$${p} ; \
cp -r .vuepress/dist/* ~/output/$${p}/ ; \
cp ~/output/$${p}/index.html ~/output ; \
done < versions ;
.PHONY: build-docs
sync-docs:
cd ~/output && \
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \
aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
.PHONY: sync-docs
###############################################################################
### Docker image ###
###############################################################################
build-docker: build-linux
cp $(BUILDDIR)/tendermint DOCKER/tendermint
cp $(OUTPUT) DOCKER/tendermint
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
rm -rf DOCKER/tendermint
.PHONY: build-docker
@@ -247,7 +218,7 @@ build-docker: build-linux
###############################################################################
# Build linux binary on other platforms
build-linux: tools
build-linux:
GOOS=linux GOARCH=amd64 $(MAKE) build
.PHONY: build-linux
@@ -291,17 +262,3 @@ endif
contract-tests:
dredd
.PHONY: contract-tests
clean:
rm -rf $(CURDIR)/artifacts/ $(BUILDDIR)/
build-reproducible:
docker rm latest-build || true
docker run --volume=$(CURDIR):/sources:ro \
--env TARGET_PLATFORMS='linux/amd64 linux/arm64 darwin/amd64 windows/amd64' \
--env APP=tendermint \
--env COMMIT=$(shell git rev-parse --short=8 HEAD) \
--env VERSION=$(shell git describe --tags) \
--name latest-build cosmossdk/rbuilder:latest
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
.PHONY: build-reproducible

158
PHILOSOPHY.md Normal file
View File

@@ -0,0 +1,158 @@
# Design goals
The design goals for Tendermint (and the SDK and related libraries) are:
* Simplicity and Legibility
* Parallel performance, namely ability to utilize multicore architecture
* Ability to evolve the codebase bug-free
* Debuggability
* Complete correctness that considers all edge cases, esp in concurrency
* Future-proof modular architecture, message protocol, APIs, and encapsulation
## Justification
Legibility is key to maintaining bug-free software as it evolves toward more
optimizations, more ease of debugging, and additional features.
It is too easy to introduce bugs over time by replacing lines of code with
those that may panic, which means ideally locks are unlocked by defer
statements.
For example,
```go
func (obj *MyObj) something() {
mtx.Lock()
obj.something = other
mtx.Unlock()
}
```
It is too easy to refactor the codebase in the future to replace `other` with
`other.String()` for example, and this may introduce a bug that causes a
deadlock. So as much as reasonably possible, we need to be using defer
statements, even though it introduces additional overhead.
If it is necessary to optimize the unlocking of mutex locks, the solution is
more modularity via smaller functions, so that defer'd unlocks are scoped
within a smaller function.
Similarly, idiomatic for-loops should always be preferred over those that use
custom counters, because it is too easy to evolve the body of a for-loop to
become more complicated over time, and it becomes more and more difficult to
assess the correctness of such a for-loop by visual inspection.
## On performance
It doesn't matter whether there are alternative implementations that are 2x or
3x more performant, when the software doesn't work, deadlocks, or if bugs
cannot be debugged. By taking advantage of multicore concurrency, the
Tendermint implementation will at least be an order of magnitude within the
range of what is theoretically possible. The design philosophy of Tendermint,
and the choice of Go as implementation language, is designed to make Tendermint
implementation the standard specification for concurrent BFT software.
By focusing on the message protocols (e.g. ABCI, p2p messages), and
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
implementing a standard implementation to be used as the specification for
future implementations in more optimizable languages like Rust, Java, and C++;
as well as creating sufficiently performant software. Tendermint Core will
never be as fast as future implementations of the Tendermint Spec, because Go
isn't designed to be as fast as possible. The advantage of using Go is that we
can develop the whole stack of modular components **faster** than in other
languages.
Furthermore, the real bottleneck is in the application layer, and it isn't
necessary to support more than a sufficiently decentralized set of validators
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
Instead of optimizing Tendermint performance down to the metal, lets focus on
optimizing on other matters, namely ability to push feature complete software
that works well enough, can be debugged and maintained, and can serve as a spec
for future implementations.
## On encapsulation
In order to create maintainable, forward-optimizable software, it is critical
to develop well-encapsulated objects that have well understood properties, and
to re-use these easy-to-use-correctly components as building blocks for further
encapsulated meta-objects.
For example, mutexes are cheap enough for Tendermint's design goals when there
isn't goroutine contention, so it is encouraged to create concurrency safe
structures with struct-level mutexes. If they are used in the context of
non-concurrent logic, then the performance is good enough. If they are used in
the context of concurrent logic, then it will still perform correctly.
Examples of this design principle can be seen in the types.ValidatorSet struct,
and the rand.Rand struct. It's one single struct declaration that can be used
in both concurrent and non-concurrent logic, and due to its well encapsulation,
it's easy to get the usage of the mutex right.
### example: rand.Rand
`The default Source is safe for concurrent use by multiple goroutines, but
Sources created by NewSource are not`. The reason why the default
package-level source is safe for concurrent use is because it is protected (see
`lockedSource` in <https://golang.org/src/math/rand/rand.go>).
But we shouldn't rely on the global source, we should be creating our own
Rand/Source instances and using them, especially for determinism in testing.
So it is reasonable to have rand.Rand be protected by a mutex. Whether we want
our own implementation of Rand is another question, but the answer there is
also in the affirmative. Sometimes you want to know where Rand is being used
in your code, so it becomes a simple matter of dropping in a log statement to
inject inspectability into Rand usage. Also, it is nice to be able to extend
the functionality of Rand with custom methods. For these reasons, and for the
reasons which is outlined in this design philosophy document, we should
continue to use the rand.Rand object, with mutex protection.
Another key aspect of good encapsulation is the choice of exposed vs unexposed
methods. It should be clear to the reader of the code, which methods are
intended to be used in what context, and what safe usage is. Part of this is
solved by hiding methods via unexported methods. Another part of this is
naming conventions on the methods (e.g. underscores) with good documentation,
and code organization. If there are too many exposed methods and it isn't
clear what methods have what side effects, then there is something wrong about
the design of abstractions that should be revisited.
## On concurrency
In order for Tendermint to remain relevant in the years to come, it is vital
for Tendermint to take advantage of multicore architectures. Due to the nature
of the problem, namely consensus across a concurrent p2p gossip network, and to
handle RPC requests for a large number of consuming subscribers, it is
unavoidable for Tendermint development to require expertise in concurrency
design, especially when it comes to the reactor design, and also for RPC
request handling.
# Guidelines
Here are some guidelines for designing for (sufficient) performance and concurrency:
* Mutex locks are cheap enough when there isn't contention.
* Do not optimize code without analytical or observed proof that it is in a hot path.
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
* The need to drain channels are often a hint of unconsidered edge cases.
* The creation of O(N) one-off goroutines is generally technical debt that
needs to get addressed sooner than later. Avoid creating too many
goroutines as a patch around incomplete concurrency design, or at least be
aware of the debt and do not invest in the debt. On the other hand, Tendermint
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
try to create more modular functions that do make use of defer statements.
# Mantras
* Premature optimization kills
* Readability is paramount
* Beautiful is better than fast.
* In the face of ambiguity, refuse the temptation to guess.
* In the face of bugs, refuse the temptation to cover the bug.
* There should be one-- and preferably only one --obvious way to do it.

View File

@@ -9,14 +9,14 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.15-blue.svg)](https://github.com/moovweb/gvm)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
[![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
| Branch | Tests | Coverage | Linting |
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
| master | ![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) |
| Branch | Tests | Coverage | Linting |
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- |
| master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) </br> ![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) |
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
@@ -36,13 +36,13 @@ However, we are still making breaking changes to the protocol and the APIs and h
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
## Security
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/tendermint).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md)
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
@@ -50,7 +50,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
## Minimum requirements
| Requirement | Notes |
|-------------|------------------|
| ----------- | ---------------- |
| Go version | Go1.15 or higher |
## Documentation
@@ -96,6 +96,7 @@ CHANGELOG even if they don't lead to MINOR version bumps:
- crypto
- config
- libs
- bech32
- bits
- bytes
- json
@@ -157,14 +158,4 @@ Additional tooling can be found in [/docs/tools](/docs/tools).
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
## Join us!
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
that also maintains [tendermint.com](https://tendermint.com).
- [Blog](https://blog.cosmos.network/tendermint/home)

View File

@@ -2,43 +2,13 @@
This guide provides instructions for upgrading to specific versions of Tendermint Core.
## Unreleased
### ABCI Changes
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
### Config Changes
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
you have updated all the variables in your `config.toml` file.
### CLI Changes
* If you had previously used `tendermint gen_node_key` to generate a new node
key, keep in mind that it no longer saves the output to a file. You can use
`tendermint init` or pipe the output of `tendermint gen_node_key` to
`$TMHOME/config/node_key.json`:
```
$ tendermint gen_node_key > $TMHOME/config/node_key.json
```
* CLI commands and flags are all now hyphen-case instead of snake_case.
Make sure to adjust any scripts that calls a cli command with snake_casing
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
This release is not compatible with previous blockchains due to changes to
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
### ABCI Changes
@@ -72,8 +42,6 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`.
For more, see "Crypto," below.
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
### P2P Protocol
The default codec is now proto3, not amino. The schema files can be found in the `/proto`
@@ -81,9 +49,12 @@ directory. For more, see "Protobuf," below.
### Blockchain Protocol
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
fields.
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
* `BeginBlock#Events`
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
GasWanted, GasUsed, Events)` responses
* `BeginBlock#Events`
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
@@ -141,7 +112,7 @@ Tendermint 0.34 includes new and updated consensus parameters.
#### Evidence Parameters
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
### Crypto
@@ -187,7 +158,6 @@ Other user-relevant changes include:
* The `Verifier` was broken up into two pieces:
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* The new light clients stores headers & validator sets as `LightBlock`s
* The RPC client can be found in the `/rpc` directory.
* The HTTP(S) proxy is located in the `/proxy` directory.
@@ -218,7 +188,7 @@ blockchains, we recommend that you check the chain ID.
### Version
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
Example:
@@ -226,7 +196,7 @@ Example:
go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd
```
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
## v0.33.4

66
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,66 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/focal64"
config.vm.provider "virtualbox" do |v|
v.memory = 4096
v.cpus = 2
end
config.vm.provision "shell", inline: <<-SHELL
apt-get update
# install base requirements
apt-get install -y --no-install-recommends wget curl jq zip \
make shellcheck bsdmainutils psmisc
apt-get install -y language-pack-en
# install docker
apt-get install -y --no-install-recommends apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get update
apt-get install -y docker-ce
usermod -aG docker vagrant
# install go
wget -q https://dl.google.com/go/go1.15.linux-amd64.tar.gz
tar -xvf go1.15.linux-amd64.tar.gz
mv go /usr/local
rm -f go1.15.linux-amd64.tar.gz
# install nodejs (for docs)
curl -sL https://deb.nodesource.com/setup_11.x | bash -
apt-get install -y nodejs
# cleanup
apt-get autoremove -y
# set env variables
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
mkdir -p /home/vagrant/go/bin
mkdir -p /home/vagrant/go/src/github.com/tendermint
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
chown -R vagrant:vagrant /home/vagrant/go
chown vagrant:vagrant /home/vagrant/.bash_profile
# get all deps and tools, ready to install/test
su - vagrant -c 'source /home/vagrant/.bash_profile'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
SHELL
end

View File

@@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
A detailed description of the ABCI methods and message types is contained in:
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
- [A protobuf file](../proto/tendermint/abci/types.proto)
- [A protobuf file](./types/types.proto)
- [A Go interface](./types/application.go)
## Protocol Buffers

View File

@@ -1,7 +1,6 @@
package abcicli
import (
"context"
"fmt"
"sync"
@@ -15,53 +14,48 @@ const (
echoRetryIntervalSeconds = 1
)
//go:generate mockery --case underscore --name Client
// Client defines an interface for an ABCI client.
//
// All `Async` methods return a `ReqRes` object and an error.
// All `Async` methods return a `ReqRes` object.
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
//
// NOTE these are client errors, eg. ABCI socket connectivity issues.
// Application-related errors are reflected in response via ABCI error codes
// and logs.
// Note these are client errors, eg. ABCI socket connectivity issues.
// Application-related errors are reflected in response via ABCI error codes and logs.
type Client interface {
service.Service
SetResponseCallback(Callback)
Error() error
// Asynchronous requests
FlushAsync(context.Context) (*ReqRes, error)
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
CommitAsync(context.Context) (*ReqRes, error)
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
FlushAsync() *ReqRes
EchoAsync(msg string) *ReqRes
InfoAsync(types.RequestInfo) *ReqRes
SetOptionAsync(types.RequestSetOption) *ReqRes
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
CheckTxAsync(types.RequestCheckTx) *ReqRes
QueryAsync(types.RequestQuery) *ReqRes
CommitAsync() *ReqRes
InitChainAsync(types.RequestInitChain) *ReqRes
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
EndBlockAsync(types.RequestEndBlock) *ReqRes
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
// Synchronous requests
FlushSync(context.Context) error
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
CommitSync(context.Context) (*types.ResponseCommit, error)
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
FlushSync() error
EchoSync(msg string) (*types.ResponseEcho, error)
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
CommitSync() (*types.ResponseCommit, error)
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
}
//----------------------------------------
@@ -80,12 +74,8 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
return
}
//----------------------------------------
type Callback func(*types.Request, *types.Response)
//----------------------------------------
type ReqRes struct {
*types.Request
*sync.WaitGroup
@@ -107,34 +97,50 @@ func NewReqRes(req *types.Request) *ReqRes {
}
}
// Sets the callback for this ReqRes atomically.
// If reqRes is already done, calls cb immediately.
// NOTE: reqRes.cb should not change if reqRes.done.
// NOTE: only one callback is supported.
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
reqRes.mtx.Lock()
// Sets sets the callback. If reqRes is already done, it will call the cb
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
// callback is supported.
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
r.mtx.Lock()
if reqRes.done {
reqRes.mtx.Unlock()
cb(reqRes.Response)
if r.done {
r.mtx.Unlock()
cb(r.Response)
return
}
reqRes.cb = cb
reqRes.mtx.Unlock()
r.cb = cb
r.mtx.Unlock()
}
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
reqRes.mtx.Lock()
defer reqRes.mtx.Unlock()
return reqRes.cb
// InvokeCallback invokes a thread-safe execution of the configured callback
// if non-nil.
func (r *ReqRes) InvokeCallback() {
r.mtx.Lock()
defer r.mtx.Unlock()
if r.cb != nil {
r.cb(r.Response)
}
}
// NOTE: it should be safe to read reqRes.cb without locks after this.
func (reqRes *ReqRes) SetDone() {
reqRes.mtx.Lock()
reqRes.done = true
reqRes.mtx.Unlock()
// GetCallback returns the configured callback of the ReqRes object which may be
// nil. Note, it is not safe to concurrently call this in cases where it is
// marked done and SetCallback is called before calling GetCallback as that
// will invoke the callback twice and create a potential race condition.
//
// ref: https://github.com/tendermint/tendermint/issues/5439
func (r *ReqRes) GetCallback() func(*types.Response) {
r.mtx.Lock()
defer r.mtx.Unlock()
return r.cb
}
// SetDone marks the ReqRes object as done.
func (r *ReqRes) SetDone() {
r.mtx.Lock()
r.done = true
r.mtx.Unlock()
}
func waitGroup1() (wg *sync.WaitGroup) {

View File

@@ -1,29 +0,0 @@
// Package abcicli provides an ABCI implementation in Go.
//
// There are 3 clients available:
// 1. socket (unix or TCP)
// 2. local (in memory)
// 3. gRPC
//
// ## Socket client
//
// async: the client maintains an internal buffer of a fixed size. when the
// buffer becomes full, all Async calls will return an error immediately.
//
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
// Flush requests 3) waiting for the Flush response
//
// ## Local client
//
// async: global mutex is locked during each call (meaning it's not really async!)
// sync: global mutex is locked during each call
//
// ## gRPC client
//
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
// to store responses and later call callbacks (separate goroutine per
// response).
//
// sync: waits for all Async calls to complete (essentially what Flush does in
// the socket client) and calls Sync method.
package abcicli

View File

@@ -1,12 +1,12 @@
package abcicli
import (
"context"
"fmt"
"net"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
@@ -15,7 +15,10 @@ import (
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// A gRPC client.
var _ Client = (*grpcClient)(nil)
// A stripped copy of the remoteClient that makes
// synchronous calls using grpc
type grpcClient struct {
service.BaseService
mustConnect bool
@@ -30,18 +33,6 @@ type grpcClient struct {
resCb func(*types.Request, *types.Response) // listens to all callbacks
}
var _ Client = (*grpcClient)(nil)
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
// start. Note Client#Start returns an error if connection is unsuccessful and
// mustConnect is true.
//
// GRPC calls are synchronous, but some callbacks expect to be called
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
// from cache). To accommodate, we finish each call in its own go-routine,
// which is expensive, but easy - if you want something better, use the socket
// protocol! maybe one day, if people really want it, we use grpc streams, but
// hopefully not :D
func NewGRPCClient(addr string, mustConnect bool) Client {
cli := &grpcClient{
addr: addr,
@@ -63,6 +54,10 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
}
func (cli *grpcClient) OnStart() error {
if err := cli.BaseService.OnStart(); err != nil {
return err
}
// This processes asynchronous request/response messages and dispatches
// them to callbacks.
go func() {
@@ -125,6 +120,8 @@ RETRY_LOOP:
}
func (cli *grpcClient) OnStop() {
cli.BaseService.OnStop()
if cli.conn != nil {
cli.conn.Close()
}
@@ -163,168 +160,155 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
}
//----------------------------------------
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
// To accommodate, we finish each call in its own go-routine,
// which is expensive, but easy - if you want something better, use the socket protocol!
// maybe one day, if people really want it, we use grpc streams,
// but hopefully not :D
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
req := types.ToRequestEcho(msg)
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
func (cli *grpcClient) FlushAsync() *ReqRes {
req := types.ToRequestFlush()
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
req := types.ToRequestInfo(params)
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
req := types.ToRequestSetOption(params)
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
}
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
req := types.ToRequestDeliverTx(params)
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
req := types.ToRequestCheckTx(params)
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
req := types.ToRequestQuery(params)
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
func (cli *grpcClient) CommitAsync() *ReqRes {
req := types.ToRequestCommit()
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
req := types.ToRequestInitChain(params)
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes {
req := types.ToRequestListSnapshots(params)
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes {
req := types.ToRequestOfferSnapshot(params)
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) LoadSnapshotChunkAsync(
ctx context.Context,
params types.RequestLoadSnapshotChunk,
) (*ReqRes, error) {
func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes {
req := types.ToRequestLoadSnapshotChunk(params)
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ApplySnapshotChunkAsync(
ctx context.Context,
params types.RequestApplySnapshotChunk,
) (*ReqRes, error) {
func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes {
req := types.ToRequestApplySnapshotChunk(params)
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
if err != nil {
return nil, err
cli.StopForError(err)
}
return cli.finishAsyncCall(
ctx,
req,
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
)
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
}
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
// with the response. We don't complete it until it's been ordered via the channel.
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
reqres := NewReqRes(req)
reqres.Response = res
select {
case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered
return reqres, nil
case <-ctx.Done():
return nil, ctx.Err()
}
cli.chReqRes <- reqres // use channel for async responses, since they must be ordered
return reqres
}
// finishSyncCall waits for an async call to complete. It is necessary to call all
@@ -357,150 +341,79 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
//----------------------------------------
func (cli *grpcClient) FlushSync(ctx context.Context) error {
func (cli *grpcClient) FlushSync() error {
return nil
}
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqres, err := cli.EchoAsync(ctx, msg)
if err != nil {
return nil, err
}
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
reqres := cli.EchoAsync(msg)
// StopForError should already have been called if error is set
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
}
func (cli *grpcClient) InfoSync(
ctx context.Context,
req types.RequestInfo,
) (*types.ResponseInfo, error) {
reqres, err := cli.InfoAsync(ctx, req)
if err != nil {
return nil, err
}
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
reqres := cli.InfoAsync(req)
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
}
func (cli *grpcClient) DeliverTxSync(
ctx context.Context,
params types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
reqres := cli.SetOptionAsync(req)
return reqres.Response.GetSetOption(), cli.Error()
}
reqres, err := cli.DeliverTxAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
reqres := cli.DeliverTxAsync(params)
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTxSync(
ctx context.Context,
params types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
reqres, err := cli.CheckTxAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
reqres := cli.CheckTxAsync(params)
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
}
func (cli *grpcClient) QuerySync(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
reqres, err := cli.QueryAsync(ctx, req)
if err != nil {
return nil, err
}
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
reqres := cli.QueryAsync(req)
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
}
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
reqres, err := cli.CommitAsync(ctx)
if err != nil {
return nil, err
}
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
reqres := cli.CommitAsync()
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
}
func (cli *grpcClient) InitChainSync(
ctx context.Context,
params types.RequestInitChain,
) (*types.ResponseInitChain, error) {
reqres, err := cli.InitChainAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
reqres := cli.InitChainAsync(params)
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlockSync(
ctx context.Context,
params types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.BeginBlockAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
reqres := cli.BeginBlockAsync(params)
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlockSync(
ctx context.Context,
params types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.EndBlockAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
reqres := cli.EndBlockAsync(params)
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
}
func (cli *grpcClient) ListSnapshotsSync(
ctx context.Context,
params types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
reqres, err := cli.ListSnapshotsAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
reqres := cli.ListSnapshotsAsync(params)
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
}
func (cli *grpcClient) OfferSnapshotSync(
ctx context.Context,
params types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
reqres, err := cli.OfferSnapshotAsync(ctx, params)
if err != nil {
return nil, err
}
func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
reqres := cli.OfferSnapshotAsync(params)
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
}
func (cli *grpcClient) LoadSnapshotChunkSync(
ctx context.Context,
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
if err != nil {
return nil, err
}
reqres := cli.LoadSnapshotChunkAsync(params)
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
}
func (cli *grpcClient) ApplySnapshotChunkSync(
ctx context.Context,
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
if err != nil {
return nil, err
}
reqres := cli.ApplySnapshotChunkAsync(params)
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
}

View File

@@ -1,13 +1,13 @@
package abcicli
import (
"context"
types "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
var _ Client = (*localClient)(nil)
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
// case of malicious tx or query). It only makes sense for publicly exposed
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
@@ -20,12 +20,6 @@ type localClient struct {
Callback
}
var _ Client = (*localClient)(nil)
// NewLocalClient creates a local client, which will be directly calling the
// methods of the given app.
//
// Both Async and Sync methods ignore the given context.Context parameter.
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
if mtx == nil {
mtx = new(tmsync.Mutex)
@@ -49,22 +43,22 @@ func (app *localClient) Error() error {
return nil
}
func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
func (app *localClient) FlushAsync() *ReqRes {
// Do nothing
return newLocalReqRes(types.ToRequestFlush(), nil), nil
return newLocalReqRes(types.ToRequestFlush(), nil)
}
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
func (app *localClient) EchoAsync(msg string) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
return app.callback(
types.ToRequestEcho(msg),
types.ToResponseEcho(msg),
), nil
)
}
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -72,10 +66,21 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
return app.callback(
types.ToRequestInfo(req),
types.ToResponseInfo(res),
), nil
)
}
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.SetOption(req)
return app.callback(
types.ToRequestSetOption(req),
types.ToResponseSetOption(res),
)
}
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -83,10 +88,10 @@ func (app *localClient) DeliverTxAsync(ctx context.Context, params types.Request
return app.callback(
types.ToRequestDeliverTx(params),
types.ToResponseDeliverTx(res),
), nil
)
}
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -94,10 +99,10 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
return app.callback(
types.ToRequestCheckTx(req),
types.ToResponseCheckTx(res),
), nil
)
}
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -105,10 +110,10 @@ func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery)
return app.callback(
types.ToRequestQuery(req),
types.ToResponseQuery(res),
), nil
)
}
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
func (app *localClient) CommitAsync() *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -116,10 +121,10 @@ func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
return app.callback(
types.ToRequestCommit(),
types.ToResponseCommit(res),
), nil
)
}
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -127,10 +132,10 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
return app.callback(
types.ToRequestInitChain(req),
types.ToResponseInitChain(res),
), nil
)
}
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -138,10 +143,10 @@ func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBe
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
), nil
)
}
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -149,10 +154,10 @@ func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndB
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
), nil
)
}
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -160,10 +165,10 @@ func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.Reques
return app.callback(
types.ToRequestListSnapshots(req),
types.ToResponseListSnapshots(res),
), nil
)
}
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -171,13 +176,10 @@ func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.Reques
return app.callback(
types.ToRequestOfferSnapshot(req),
types.ToResponseOfferSnapshot(res),
), nil
)
}
func (app *localClient) LoadSnapshotChunkAsync(
ctx context.Context,
req types.RequestLoadSnapshotChunk,
) (*ReqRes, error) {
func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -185,13 +187,10 @@ func (app *localClient) LoadSnapshotChunkAsync(
return app.callback(
types.ToRequestLoadSnapshotChunk(req),
types.ToResponseLoadSnapshotChunk(res),
), nil
)
}
func (app *localClient) ApplySnapshotChunkAsync(
ctx context.Context,
req types.RequestApplySnapshotChunk,
) (*ReqRes, error) {
func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -199,20 +198,20 @@ func (app *localClient) ApplySnapshotChunkAsync(
return app.callback(
types.ToRequestApplySnapshotChunk(req),
types.ToResponseApplySnapshotChunk(res),
), nil
)
}
//-------------------------------------------------------
func (app *localClient) FlushSync(ctx context.Context) error {
func (app *localClient) FlushSync() error {
return nil
}
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -220,11 +219,15 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
return &res, nil
}
func (app *localClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.SetOption(req)
return &res, nil
}
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -232,10 +235,7 @@ func (app *localClient) DeliverTxSync(
return &res, nil
}
func (app *localClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -243,10 +243,7 @@ func (app *localClient) CheckTxSync(
return &res, nil
}
func (app *localClient) QuerySync(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -254,7 +251,7 @@ func (app *localClient) QuerySync(
return &res, nil
}
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -262,11 +259,7 @@ func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit,
return &res, nil
}
func (app *localClient) InitChainSync(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -274,11 +267,7 @@ func (app *localClient) InitChainSync(
return &res, nil
}
func (app *localClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -286,11 +275,7 @@ func (app *localClient) BeginBlockSync(
return &res, nil
}
func (app *localClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -298,11 +283,7 @@ func (app *localClient) EndBlockSync(
return &res, nil
}
func (app *localClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -310,11 +291,7 @@ func (app *localClient) ListSnapshotsSync(
return &res, nil
}
func (app *localClient) OfferSnapshotSync(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -323,9 +300,7 @@ func (app *localClient) OfferSnapshotSync(
}
func (app *localClient) LoadSnapshotChunkSync(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -334,9 +309,7 @@ func (app *localClient) LoadSnapshotChunkSync(
}
func (app *localClient) ApplySnapshotChunkSync(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
app.mtx.Lock()
defer app.mtx.Unlock()

View File

@@ -1,12 +1,9 @@
// Code generated by mockery v2.3.0. DO NOT EDIT.
// Code generated by mockery v1.1.1. DO NOT EDIT.
package mocks
import (
context "context"
abcicli "github.com/tendermint/tendermint/abci/client"
log "github.com/tendermint/tendermint/libs/log"
mock "github.com/stretchr/testify/mock"
@@ -19,36 +16,29 @@ type Client struct {
mock.Mock
}
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0
func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
// ApplySnapshotChunkSync provides a mock function with given fields: _a0
func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseApplySnapshotChunk
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
@@ -56,8 +46,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -65,36 +55,29 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
return r0, r1
}
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// BeginBlockAsync provides a mock function with given fields: _a0
func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// BeginBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0, _a1)
// BeginBlockSync provides a mock function with given fields: _a0
func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseBeginBlock)
@@ -102,8 +85,8 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -111,36 +94,29 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
return r0, r1
}
// CheckTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// CheckTxAsync provides a mock function with given fields: _a0
func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// CheckTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
ret := _m.Called(_a0, _a1)
// CheckTxSync provides a mock function with given fields: _a0
func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseCheckTx)
@@ -148,30 +124,7 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CommitAsync provides a mock function with given fields: _a0
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
@@ -180,13 +133,29 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
return r0, r1
}
// CommitSync provides a mock function with given fields: _a0
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
ret := _m.Called(_a0)
// CommitAsync provides a mock function with given fields:
func (_m *Client) CommitAsync() *abcicli.ReqRes {
ret := _m.Called()
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
return r0
}
// CommitSync provides a mock function with given fields:
func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
ret := _m.Called()
var r0 *types.ResponseCommit
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
r0 = rf(_a0)
if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseCommit)
@@ -194,7 +163,46 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0
func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
return r0
}
// DeliverTxSync provides a mock function with given fields: _a0
func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
@@ -203,82 +211,29 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// EchoAsync provides a mock function with given fields: msg
func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes {
ret := _m.Called(msg)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok {
r0 = rf(msg)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// DeliverTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoAsync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
ret := _m.Called(ctx, msg)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
r0 = rf(ctx, msg)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, msg)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoSync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
ret := _m.Called(ctx, msg)
// EchoSync provides a mock function with given fields: msg
func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
ret := _m.Called(msg)
var r0 *types.ResponseEcho
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
r0 = rf(ctx, msg)
if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok {
r0 = rf(msg)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEcho)
@@ -286,8 +241,8 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, msg)
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(msg)
} else {
r1 = ret.Error(1)
}
@@ -295,36 +250,29 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
return r0, r1
}
// EndBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// EndBlockAsync provides a mock function with given fields: _a0
func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// EndBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0, _a1)
// EndBlockSync provides a mock function with given fields: _a0
func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseEndBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEndBlock)
@@ -332,8 +280,8 @@ func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -355,12 +303,42 @@ func (_m *Client) Error() error {
return r0
}
// FlushAsync provides a mock function with given fields: _a0
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
// FlushAsync provides a mock function with given fields:
func (_m *Client) FlushAsync() *abcicli.ReqRes {
ret := _m.Called()
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
return r0
}
// FlushSync provides a mock function with given fields:
func (_m *Client) FlushSync() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// InfoAsync provides a mock function with given fields: _a0
func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
@@ -368,8 +346,24 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
}
}
return r0
}
// InfoSync provides a mock function with given fields: _a0
func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseInfo
if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseInfo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
@@ -378,96 +372,29 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
return r0, r1
}
// FlushSync provides a mock function with given fields: _a0
func (_m *Client) FlushSync(_a0 context.Context) error {
// InitChainAsync provides a mock function with given fields: _a0
func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
return r0
}
// InfoAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// InfoSync provides a mock function with given fields: _a0, _a1
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseInfo
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseInfo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// InitChainAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// InitChainSync provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
ret := _m.Called(_a0, _a1)
// InitChainSync provides a mock function with given fields: _a0
func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseInitChain
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseInitChain)
@@ -475,8 +402,8 @@ func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -498,36 +425,29 @@ func (_m *Client) IsRunning() bool {
return r0
}
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// ListSnapshotsAsync provides a mock function with given fields: _a0
func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
ret := _m.Called(_a0, _a1)
// ListSnapshotsSync provides a mock function with given fields: _a0
func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseListSnapshots
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseListSnapshots)
@@ -535,8 +455,8 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -544,36 +464,29 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
return r0, r1
}
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0
func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
// LoadSnapshotChunkSync provides a mock function with given fields: _a0
func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseLoadSnapshotChunk
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
@@ -581,8 +494,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -590,36 +503,29 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
return r0, r1
}
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// OfferSnapshotAsync provides a mock function with given fields: _a0
func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
ret := _m.Called(_a0, _a1)
// OfferSnapshotSync provides a mock function with given fields: _a0
func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseOfferSnapshot
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
@@ -627,8 +533,8 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -669,36 +575,29 @@ func (_m *Client) OnStop() {
_m.Called()
}
// QueryAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
// QueryAsync provides a mock function with given fields: _a0
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// QuerySync provides a mock function with given fields: _a0, _a1
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
ret := _m.Called(_a0, _a1)
// QuerySync provides a mock function with given fields: _a0
func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseQuery
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
r0 = rf(_a0, _a1)
if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseQuery)
@@ -706,8 +605,8 @@ func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
r1 = rf(_a0, _a1)
if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
@@ -750,6 +649,45 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
_m.Called(_a0)
}
// SetOptionAsync provides a mock function with given fields: _a0
func (_m *Client) SetOptionAsync(_a0 types.RequestSetOption) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
return r0
}
// SetOptionSync provides a mock function with given fields: _a0
func (_m *Client) SetOptionSync(_a0 types.RequestSetOption) (*types.ResponseSetOption, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseSetOption
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *types.ResponseSetOption); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseSetOption)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(types.RequestSetOption) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SetResponseCallback provides a mock function with given fields: _a0
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
_m.Called(_a0)

View File

@@ -3,7 +3,6 @@ package abcicli
import (
"bufio"
"container/list"
"context"
"errors"
"fmt"
"io"
@@ -19,18 +18,10 @@ import (
)
const (
// reqQueueSize is the max number of queued async requests.
// (memory: 256MB max assuming 1MB transactions)
reqQueueSize = 256
// Don't wait longer than...
flushThrottleMS = 20
reqQueueSize = 256 // TODO make configurable
flushThrottleMS = 20 // Don't wait longer than...
)
type reqResWithContext struct {
R *ReqRes
C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored)
}
// This is goroutine-safe, but users should beware that the application in
// general is not meant to be interfaced with concurrent callers.
type socketClient struct {
@@ -40,7 +31,7 @@ type socketClient struct {
mustConnect bool
conn net.Conn
reqQueue chan *reqResWithContext
reqQueue chan *ReqRes
flushTimer *timer.ThrottleTimer
mtx tmsync.Mutex
@@ -56,7 +47,7 @@ var _ Client = (*socketClient)(nil)
// if it fails to connect.
func NewSocketClient(addr string, mustConnect bool) Client {
cli := &socketClient{
reqQueue: make(chan *reqResWithContext, reqQueueSize),
reqQueue: make(chan *ReqRes, reqQueueSize),
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
mustConnect: mustConnect,
@@ -132,20 +123,15 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
case reqres := <-cli.reqQueue:
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
if reqres.C.Err() != nil {
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
continue
}
cli.willSendReq(reqres.R)
err := types.WriteMessage(reqres.R.Request, w)
cli.willSendReq(reqres)
err := types.WriteMessage(reqres.Request, w)
if err != nil {
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
return
}
// If it's a flush request, flush the current buffer.
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
err = w.Flush()
if err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
@@ -154,7 +140,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
}
case <-cli.flushTimer.Ch: // flush queue
select {
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
default:
// Probably will fill the buffer, or retry later.
}
@@ -226,273 +212,218 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
//
// NOTE: It is possible this callback isn't set on the reqres object. At this
// point, in which case it will be called after, when it is set.
if cb := reqres.GetCallback(); cb != nil {
cb(res)
}
reqres.InvokeCallback()
return nil
}
//----------------------------------------
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
return cli.queueRequest(types.ToRequestEcho(msg))
}
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
func (cli *socketClient) FlushAsync() *ReqRes {
return cli.queueRequest(types.ToRequestFlush())
}
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
return cli.queueRequest(types.ToRequestInfo(req))
}
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
return cli.queueRequest(types.ToRequestSetOption(req))
}
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
return cli.queueRequest(types.ToRequestDeliverTx(req))
}
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
return cli.queueRequest(types.ToRequestCheckTx(req))
}
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
return cli.queueRequest(types.ToRequestQuery(req))
}
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
func (cli *socketClient) CommitAsync() *ReqRes {
return cli.queueRequest(types.ToRequestCommit())
}
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
return cli.queueRequest(types.ToRequestInitChain(req))
}
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
return cli.queueRequest(types.ToRequestBeginBlock(req))
}
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
return cli.queueRequest(types.ToRequestEndBlock(req))
}
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
return cli.queueRequest(types.ToRequestListSnapshots(req))
}
func (cli *socketClient) LoadSnapshotChunkAsync(
ctx context.Context,
req types.RequestLoadSnapshotChunk,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
return cli.queueRequest(types.ToRequestOfferSnapshot(req))
}
func (cli *socketClient) ApplySnapshotChunkAsync(
ctx context.Context,
req types.RequestApplySnapshotChunk,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
}
func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync(ctx context.Context) error {
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
if err != nil {
return queueErr(err)
}
func (cli *socketClient) FlushSync() error {
reqRes := cli.queueRequest(types.ToRequestFlush())
if err := cli.Error(); err != nil {
return err
}
gotResp := make(chan struct{})
go func() {
// NOTE: if we don't flush the queue, its possible to get stuck here
reqRes.Wait()
close(gotResp)
}()
select {
case <-gotResp:
return cli.Error()
case <-ctx.Done():
return ctx.Err()
}
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
return cli.Error()
}
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
if err != nil {
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
reqres := cli.queueRequest(types.ToRequestEcho(msg))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetEcho(), nil
return reqres.Response.GetEcho(), cli.Error()
}
func (cli *socketClient) InfoSync(
ctx context.Context,
req types.RequestInfo,
) (*types.ResponseInfo, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
if err != nil {
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
reqres := cli.queueRequest(types.ToRequestInfo(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetInfo(), nil
return reqres.Response.GetInfo(), cli.Error()
}
func (cli *socketClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
if err != nil {
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
reqres := cli.queueRequest(types.ToRequestSetOption(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetDeliverTx(), nil
return reqres.Response.GetSetOption(), cli.Error()
}
func (cli *socketClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
if err != nil {
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetCheckTx(), nil
return reqres.Response.GetDeliverTx(), cli.Error()
}
func (cli *socketClient) QuerySync(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
if err != nil {
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetQuery(), nil
return reqres.Response.GetCheckTx(), cli.Error()
}
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
if err != nil {
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
reqres := cli.queueRequest(types.ToRequestQuery(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetCommit(), nil
return reqres.Response.GetQuery(), cli.Error()
}
func (cli *socketClient) InitChainSync(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
if err != nil {
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
reqres := cli.queueRequest(types.ToRequestCommit())
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetInitChain(), nil
return reqres.Response.GetCommit(), cli.Error()
}
func (cli *socketClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
if err != nil {
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
reqres := cli.queueRequest(types.ToRequestInitChain(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetBeginBlock(), nil
return reqres.Response.GetInitChain(), cli.Error()
}
func (cli *socketClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
if err != nil {
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetEndBlock(), nil
return reqres.Response.GetBeginBlock(), cli.Error()
}
func (cli *socketClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
if err != nil {
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetListSnapshots(), nil
return reqres.Response.GetEndBlock(), cli.Error()
}
func (cli *socketClient) OfferSnapshotSync(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
reqres := cli.queueRequest(types.ToRequestListSnapshots(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetOfferSnapshot(), nil
return reqres.Response.GetListSnapshots(), cli.Error()
}
func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetOfferSnapshot(), cli.Error()
}
func (cli *socketClient) LoadSnapshotChunkSync(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetLoadSnapshotChunk(), nil
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
}
func (cli *socketClient) ApplySnapshotChunkSync(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetApplySnapshotChunk(), nil
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
}
//----------------------------------------
// queueRequest enqueues req onto the queue. If the queue is full, it ether
// returns an error (sync=false) or blocks (sync=true).
//
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
// used later to determine if request should be dropped (if ctx.Err is
// non-nil).
//
// The caller is responsible for checking cli.Error.
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
reqres := NewReqRes(req)
if sync {
select {
case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}:
case <-ctx.Done():
return nil, ctx.Err()
}
} else {
select {
case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}:
default:
return nil, errors.New("buffer is full")
}
}
// TODO: set cli.err if reqQueue times out
cli.reqQueue <- reqres
// Maybe auto-flush, or unset auto-flush
switch req.Value.(type) {
@@ -502,41 +433,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
cli.flushTimer.Set()
}
return reqres, nil
}
func (cli *socketClient) queueRequestAsync(
ctx context.Context,
req *types.Request,
) (*ReqRes, error) {
reqres, err := cli.queueRequest(ctx, req, false)
if err != nil {
return nil, queueErr(err)
}
return reqres, cli.Error()
}
func (cli *socketClient) queueRequestAndFlushSync(
ctx context.Context,
req *types.Request,
) (*ReqRes, error) {
reqres, err := cli.queueRequest(ctx, req, true)
if err != nil {
return nil, queueErr(err)
}
if err := cli.FlushSync(ctx); err != nil {
return nil, err
}
return reqres, cli.Error()
}
func queueErr(e error) error {
return fmt.Errorf("can't queue req: %w", e)
return reqres
}
func (cli *socketClient) flushQueue() {
@@ -554,7 +451,7 @@ LOOP:
for {
select {
case reqres := <-cli.reqQueue:
reqres.R.Done()
reqres.Done()
default:
break LOOP
}
@@ -571,6 +468,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_SetOption:
_, ok = res.Value.(*types.Response_SetOption)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
@@ -603,10 +502,12 @@ func (cli *socketClient) stopForError(err error) {
}
cli.mtx.Lock()
cli.err = err
if cli.err == nil {
cli.err = err
}
cli.mtx.Unlock()
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
if err := cli.Stop(); err != nil {
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
}

View File

@@ -1,7 +1,6 @@
package abcicli_test
import (
"context"
"fmt"
"testing"
"time"
@@ -16,8 +15,6 @@ import (
"github.com/tendermint/tendermint/libs/service"
)
var ctx = context.Background()
func TestProperSyncCalls(t *testing.T) {
app := slowApp{}
@@ -36,12 +33,11 @@ func TestProperSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// This is BeginBlockSync unrolled....
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
assert.NoError(t, err)
err = c.FlushSync(context.Background())
assert.NoError(t, err)
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
err := c.FlushSync()
require.NoError(t, err)
res := reqres.Response.GetBeginBlock()
assert.NotNil(t, res)
require.NotNil(t, res)
resp <- c.Error()
}()
@@ -72,16 +68,14 @@ func TestHangingSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// Start BeginBlock and flush it
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
assert.NoError(t, err)
flush, err := c.FlushAsync(ctx)
assert.NoError(t, err)
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
flush := c.FlushAsync()
// wait 20 ms for all events to travel socket, but
// no response yet from server
time.Sleep(20 * time.Millisecond)
// kill the server, so the connections break
err = s.Stop()
assert.NoError(t, err)
err := s.Stop()
require.NoError(t, err)
// wait for the response from BeginBlock
reqres.Wait()

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"context"
"encoding/hex"
"errors"
"fmt"
@@ -30,8 +29,6 @@ import (
var (
client abcicli.Client
logger log.Logger
ctx = context.Background()
)
// flags
@@ -151,6 +148,7 @@ func addCommands() {
RootCmd.AddCommand(consoleCmd)
RootCmd.AddCommand(echoCmd)
RootCmd.AddCommand(infoCmd)
RootCmd.AddCommand(setOptionCmd)
RootCmd.AddCommand(deliverTxCmd)
RootCmd.AddCommand(checkTxCmd)
RootCmd.AddCommand(commitCmd)
@@ -178,6 +176,7 @@ you'd like to run:
where example.file looks something like:
set_option serial on
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
@@ -199,7 +198,7 @@ This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
RunE: cmdConsole,
}
@@ -217,6 +216,13 @@ var infoCmd = &cobra.Command{
Args: cobra.ExactArgs(0),
RunE: cmdInfo,
}
var setOptionCmd = &cobra.Command{
Use: "set_option",
Short: "set an option on the application",
Long: "set an option on the application",
Args: cobra.ExactArgs(2),
RunE: cmdSetOption,
}
var deliverTxCmd = &cobra.Command{
Use: "deliver_tx",
@@ -318,6 +324,7 @@ func cmdTest(cmd *cobra.Command, args []string) error {
return compose(
[]func() error{
func() error { return servertest.InitChain(client) },
func() error { return servertest.SetOption(client, "serial", "on") },
func() error { return servertest.Commit(client, nil) },
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
func() error { return servertest.Commit(client, nil) },
@@ -432,6 +439,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
return cmdInfo(cmd, actualArgs)
case "query":
return cmdQuery(cmd, actualArgs)
case "set_option":
return cmdSetOption(cmd, actualArgs)
default:
return cmdUnimplemented(cmd, pArgs)
}
@@ -455,6 +464,7 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
fmt.Println("Use \"[command] --help\" for more information about a command.")
return nil
@@ -466,7 +476,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
msg = args[0]
}
res, err := client.EchoSync(ctx, msg)
res, err := client.EchoSync(msg)
if err != nil {
return err
}
@@ -482,7 +492,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
version = args[0]
}
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
res, err := client.InfoSync(types.RequestInfo{Version: version})
if err != nil {
return err
}
@@ -494,6 +504,25 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
const codeBad uint32 = 10
// Set an option on the application
func cmdSetOption(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want at least arguments of the form: <key> <value>",
})
return nil
}
key, val := args[0], args[1]
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
if err != nil {
return err
}
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
return nil
}
// Append a new tx to application
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
@@ -507,7 +536,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
if err != nil {
return err
}
@@ -533,7 +562,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
if err != nil {
return err
}
@@ -548,7 +577,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
// Get application Merkle root hash
func cmdCommit(cmd *cobra.Command, args []string) error {
res, err := client.CommitSync(ctx)
res, err := client.CommitSync()
if err != nil {
return err
}
@@ -573,7 +602,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return err
}
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
resQuery, err := client.QuerySync(types.RequestQuery{
Data: queryBytes,
Path: flagPath,
Height: int64(flagHeight),

View File

@@ -24,6 +24,24 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
key, value := req.Key, req.Value
if key == "serial" && value == "on" {
app.serial = true
} else {
/*
TODO Panic and have the ABCI server pass an exception.
The client can call SetOptionSync() and get an `error`.
return types.ResponseSetOption{
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
}
*/
return types.ResponseSetOption{}
}
return types.ResponseSetOption{}
}
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
if app.serial {
if len(req.Tx) > 8 {
@@ -51,7 +69,6 @@ func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)

View File

@@ -1,7 +1,6 @@
package example
import (
"context"
"fmt"
"math/rand"
"net"
@@ -14,6 +13,8 @@ import (
"google.golang.org/grpc"
"golang.org/x/net/context"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
@@ -44,7 +45,7 @@ func TestGRPC(t *testing.T) {
}
func testStream(t *testing.T, app types.Application) {
const numDeliverTxs = 20000
numDeliverTxs := 20000
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
@@ -52,8 +53,9 @@ func testStream(t *testing.T, app types.Application) {
// Start the listener
server := abciserver.NewSocketServer(socket, app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
err := server.Start()
require.NoError(t, err)
if err := server.Start(); err != nil {
require.NoError(t, err, "Error starting socket server")
}
t.Cleanup(func() {
if err := server.Stop(); err != nil {
t.Error(err)
@@ -63,8 +65,9 @@ func testStream(t *testing.T, app types.Application) {
// Connect to the socket
client := abcicli.NewSocketClient(socket, false)
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
err = client.Start()
require.NoError(t, err)
if err := client.Start(); err != nil {
t.Fatalf("Error starting socket client: %v", err.Error())
}
t.Cleanup(func() {
if err := client.Stop(); err != nil {
t.Error(err)
@@ -98,24 +101,22 @@ func testStream(t *testing.T, app types.Application) {
}
})
ctx := context.Background()
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
require.NoError(t, err)
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
_ = reqRes
// check err ?
// Sometimes send flush messages
if counter%128 == 0 {
err = client.FlushSync(context.Background())
require.NoError(t, err)
if counter%123 == 0 {
client.FlushAsync()
// check err ?
}
}
// Send final flush message
_, err = client.FlushAsync(ctx)
require.NoError(t, err)
client.FlushAsync()
<-done
}

View File

@@ -1,7 +1,6 @@
package kvstore
import (
"context"
"fmt"
"io/ioutil"
"sort"
@@ -24,8 +23,6 @@ const (
testValue = "def"
)
var ctx = context.Background()
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestDeliverTx{Tx: tx}
ar := app.DeliverTx(req)
@@ -326,23 +323,23 @@ func runClientTests(t *testing.T, client abcicli.Client) {
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// commit
_, err = app.CommitSync(ctx)
_, err = app.CommitSync()
require.NoError(t, err)
info, err := app.InfoSync(ctx, types.RequestInfo{})
info, err := app.InfoSync(types.RequestInfo{})
require.NoError(t, err)
require.NotZero(t, info.LastBlockHeight)
// make sure query is fine
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
resQuery, err := app.QuerySync(types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
@@ -353,7 +350,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string)
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
// make sure proof is fine
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
resQuery, err = app.QuerySync(types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,

View File

@@ -62,6 +62,10 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
return res
}
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
return app.app.SetOption(req)
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set

View File

@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_SetOption:
res := s.app.SetOption(*r.SetOption)
responses <- types.ToResponseSetOption(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(*r.DeliverTx)
responses <- types.ToResponseDeliverTx(res)

View File

@@ -2,7 +2,6 @@ package testsuite
import (
"bytes"
"context"
"errors"
"fmt"
@@ -11,8 +10,6 @@ import (
tmrand "github.com/tendermint/tendermint/libs/rand"
)
var ctx = context.Background()
func InitChain(client abcicli.Client) error {
total := 10
vals := make([]types.ValidatorUpdate, total)
@@ -21,7 +18,7 @@ func InitChain(client abcicli.Client) error {
power := tmrand.Int()
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
}
_, err := client.InitChainSync(ctx, types.RequestInitChain{
_, err := client.InitChainSync(types.RequestInitChain{
Validators: vals,
})
if err != nil {
@@ -32,8 +29,19 @@ func InitChain(client abcicli.Client) error {
return nil
}
func SetOption(client abcicli.Client, key, value string) error {
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
fmt.Println("Failed test: SetOption")
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
return err
}
fmt.Println("Passed test: SetOption")
return nil
}
func Commit(client abcicli.Client, hashExp []byte) error {
res, err := client.CommitSync(ctx)
res, err := client.CommitSync()
data := res.Data
if err != nil {
fmt.Println("Failed test: Commit")
@@ -50,7 +58,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
@@ -69,7 +77,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
}
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: CheckTx")

View File

@@ -2,7 +2,6 @@ package main
import (
"bytes"
"context"
"fmt"
"os"
@@ -11,8 +10,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
)
var ctx = context.Background()
func startClient(abciType string) abcicli.Client {
// Start client
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
@@ -28,8 +25,15 @@ func startClient(abciType string) abcicli.Client {
return client
}
func setOption(client abcicli.Client, key, value string) {
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
panicf("setting %v=%v: \nerr: %v", key, value, err)
}
}
func commit(client abcicli.Client, hashExp []byte) {
res, err := client.CommitSync(ctx)
res, err := client.CommitSync()
if err != nil {
panicf("client error: %v", err)
}
@@ -39,7 +43,7 @@ func commit(client abcicli.Client, hashExp []byte) {
}
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
if err != nil {
panicf("client error: %v", err)
}
@@ -51,6 +55,24 @@ func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
}
}
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.CheckTxSync(txBytes)
if err != nil {
panicf("client error: %v", err)
}
if res.IsErr() {
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
}
if res.Code != codeExp {
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("CheckTx response data was unexpected. Got %X expected %X",
res.Data, dataExp)
}
}*/
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}

View File

@@ -7,6 +7,7 @@ import (
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
@@ -40,7 +41,7 @@ func ensureABCIIsUp(typ string, n int) error {
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
<-time.After(500 * time.Millisecond)
}
return err
}
@@ -78,16 +79,17 @@ func testCounter() {
}
}()
// commit(client, nil)
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
setOption(client, "serial", "on")
commit(client, nil)
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

View File

@@ -1,3 +1,4 @@
set_option serial on
check_tx 0x00
check_tx 0xff
deliver_tx 0x00

View File

@@ -1,3 +1,7 @@
> set_option serial on
-> code: OK
-> log: OK (SetOption doesn't return anything.)
> check_tx 0x00
-> code: OK
@@ -8,16 +12,18 @@
-> code: OK
> check_tx 0x00
-> code: OK
-> code: 2
-> log: Invalid nonce. Expected >= 1, got 0
> deliver_tx 0x01
-> code: OK
> deliver_tx 0x04
-> code: OK
-> code: 2
-> log: Invalid nonce. Expected 2, got 4
> info
-> code: OK
-> data: {"hashes":0,"txs":3}
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
-> data: {"hashes":0,"txs":2}
-> data.hex: 0x7B22686173686573223A302C22747873223A327D

View File

@@ -1,7 +1,7 @@
package types
import (
"context"
context "golang.org/x/net/context"
)
// Application is an interface that enables any finite, deterministic state machine
@@ -10,8 +10,9 @@ import (
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
type Application interface {
// Info/Query Connection
Info(RequestInfo) ResponseInfo // Return application info
Query(RequestQuery) ResponseQuery // Query for state
Info(RequestInfo) ResponseInfo // Return application info
SetOption(RequestSetOption) ResponseSetOption // Set application option
Query(RequestQuery) ResponseQuery // Query for state
// Mempool Connection
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
@@ -46,6 +47,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
return ResponseSetOption{}
}
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
@@ -114,6 +119,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
return &res, nil
}
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
res := app.app.SetOption(*req)
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(*req)
return &res, nil

View File

@@ -1,10 +1,11 @@
package types
import (
"bufio"
"encoding/binary"
"io"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/libs/protoio"
)
const (
@@ -13,19 +14,57 @@ const (
// WriteMessage writes a varint length-delimited protobuf message.
func WriteMessage(msg proto.Message, w io.Writer) error {
protoWriter := protoio.NewDelimitedWriter(w)
_, err := protoWriter.WriteMsg(msg)
bz, err := proto.Marshal(msg)
if err != nil {
return err
}
return nil
return encodeByteSlice(w, bz)
}
// ReadMessage reads a varint length-delimited protobuf message.
func ReadMessage(r io.Reader, msg proto.Message) error {
_, err := protoio.NewDelimitedReader(r, maxMsgSize).ReadMsg(msg)
return err
return readProtoMsg(r, msg, maxMsgSize)
}
func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
reader, ok := r.(*bufio.Reader)
if !ok {
reader = bufio.NewReader(r)
}
length64, err := binary.ReadVarint(reader)
if err != nil {
return err
}
length := int(length64)
if length < 0 || length > maxSize {
return io.ErrShortBuffer
}
buf := make([]byte, length)
if _, err := io.ReadFull(reader, buf); err != nil {
return err
}
return proto.Unmarshal(buf, msg)
}
//-----------------------------------------------------------------------
// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
// go-wire as a dep
func encodeByteSlice(w io.Writer, bz []byte) (err error) {
err = encodeVarint(w, int64(len(bz)))
if err != nil {
return
}
_, err = w.Write(bz)
return
}
func encodeVarint(w io.Writer, i int64) (err error) {
var buf [10]byte
n := binary.PutVarint(buf[:], i)
_, err = w.Write(buf[0:n])
return
}
//----------------------------------------
@@ -48,6 +87,12 @@ func ToRequestInfo(req RequestInfo) *Request {
}
}
func ToRequestSetOption(req RequestSetOption) *Request {
return &Request{
Value: &Request_SetOption{&req},
}
}
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
return &Request{
Value: &Request_DeliverTx{&req},
@@ -139,6 +184,13 @@ func ToResponseInfo(res ResponseInfo) *Response {
Value: &Response_Info{&res},
}
}
func ToResponseSetOption(res ResponseSetOption) *Response {
return &Response{
Value: &Response_SetOption{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},

View File

@@ -52,6 +52,16 @@ var (
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
)
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
@@ -116,5 +126,6 @@ var _ jsonRoundTripper = (*ResponseCommit)(nil)
var _ jsonRoundTripper = (*ResponseQuery)(nil)
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
var _ jsonRoundTripper = (*ResponseSetOption)(nil)
var _ jsonRoundTripper = (*EventAttribute)(nil)

File diff suppressed because it is too large Load Diff

12
appveyor.yml Normal file
View File

@@ -0,0 +1,12 @@
version: 1.0.{build}
configuration: Release
platform:
- x64
- x86
clone_folder: c:\go\path\src\github.com\tendermint\tendermint
before_build:
- cmd: set GOPATH=%GOROOT%\path
- cmd: set PATH=%GOPATH%\bin;%PATH%
build_script:
- cmd: make test
test: off

View File

@@ -8,7 +8,7 @@ import (
// `peerID` identifies the peer and reason characterizes the specific
// behaviour performed by the peer.
type PeerBehaviour struct {
peerID p2p.NodeID
peerID p2p.ID
reason interface{}
}
@@ -17,7 +17,7 @@ type badMessage struct {
}
// BadMessage returns a badMessage PeerBehaviour.
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
}
@@ -26,7 +26,7 @@ type messageOutOfOrder struct {
}
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
}
@@ -35,7 +35,7 @@ type consensusVote struct {
}
// ConsensusVote returns a consensusVote PeerBehaviour.
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
}
@@ -44,6 +44,6 @@ type blockPart struct {
}
// BlockPart returns blockPart PeerBehaviour.
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour {
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
}

View File

@@ -51,14 +51,14 @@ func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
// behaviour in manufactured scenarios.
type MockReporter struct {
mtx tmsync.RWMutex
pb map[p2p.NodeID][]PeerBehaviour
pb map[p2p.ID][]PeerBehaviour
}
// NewMockReporter returns a Reporter which records all reported
// behaviours in memory.
func NewMockReporter() *MockReporter {
return &MockReporter{
pb: map[p2p.NodeID][]PeerBehaviour{},
pb: map[p2p.ID][]PeerBehaviour{},
}
}
@@ -72,7 +72,7 @@ func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
}
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
func (mpbr *MockReporter) GetBehaviours(peerID p2p.NodeID) []PeerBehaviour {
func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
mpbr.mtx.RLock()
defer mpbr.mtx.RUnlock()
if items, ok := mpbr.pb[peerID]; ok {

View File

@@ -11,7 +11,7 @@ import (
// TestMockReporter tests the MockReporter's ability to store reported
// peer behaviour in memory indexed by the peerID.
func TestMockReporter(t *testing.T) {
var peerID p2p.NodeID = "MockPeer"
var peerID p2p.ID = "MockPeer"
pr := bh.NewMockReporter()
behaviours := pr.GetBehaviours(peerID)
@@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
}
type scriptItem struct {
peerID p2p.NodeID
peerID p2p.ID
behaviour bh.PeerBehaviour
}
@@ -76,10 +76,10 @@ func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
// freequencies that those behaviours occur.
func TestEqualPeerBehaviours(t *testing.T) {
var (
peerID p2p.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
peerID p2p.ID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
left []bh.PeerBehaviour
right []bh.PeerBehaviour
}{
@@ -128,7 +128,7 @@ func TestEqualPeerBehaviours(t *testing.T) {
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
var (
behaviourScript = []struct {
peerID p2p.NodeID
peerID p2p.ID
behaviours []bh.PeerBehaviour
}{
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},

View File

@@ -1,17 +0,0 @@
/*
Package blockchain provides two implementations of the fast-sync protocol.
- v0 was the very first implementation. it's battle tested, but does not have a
lot of test coverage.
- v2 is the newest implementation, with a focus on testability and readability.
Check out ADR-40 for the formal model and requirements.
# Termination criteria
1. the maximum peer height is reached
2. termination timeout is triggered, which is set if the peer set is empty or
there are no pending requests.
*/
package blockchain

View File

@@ -1,12 +1,110 @@
package blockchain
import (
"errors"
"fmt"
"github.com/gogo/protobuf/proto"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)
const (
MaxMsgSize = types.MaxBlockSizeBytes +
bcproto.BlockResponseMessagePrefixSize +
bcproto.BlockResponseMessageFieldKeySize
// NOTE: keep up to date with bcproto.BlockResponse
BlockResponseMessagePrefixSize = 4
BlockResponseMessageFieldKeySize = 1
MaxMsgSize = types.MaxBlockSizeBytes +
BlockResponseMessagePrefixSize +
BlockResponseMessageFieldKeySize
)
// EncodeMsg encodes a Protobuf message
func EncodeMsg(pb proto.Message) ([]byte, error) {
msg := bcproto.Message{}
switch pb := pb.(type) {
case *bcproto.BlockRequest:
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
case *bcproto.BlockResponse:
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
case *bcproto.NoBlockResponse:
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
case *bcproto.StatusRequest:
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
case *bcproto.StatusResponse:
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
default:
return nil, fmt.Errorf("unknown message type %T", pb)
}
bz, err := proto.Marshal(&msg)
if err != nil {
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
}
return bz, nil
}
// DecodeMsg decodes a Protobuf message.
func DecodeMsg(bz []byte) (proto.Message, error) {
pb := &bcproto.Message{}
err := proto.Unmarshal(bz, pb)
if err != nil {
return nil, err
}
switch msg := pb.Sum.(type) {
case *bcproto.Message_BlockRequest:
return msg.BlockRequest, nil
case *bcproto.Message_BlockResponse:
return msg.BlockResponse, nil
case *bcproto.Message_NoBlockResponse:
return msg.NoBlockResponse, nil
case *bcproto.Message_StatusRequest:
return msg.StatusRequest, nil
case *bcproto.Message_StatusResponse:
return msg.StatusResponse, nil
default:
return nil, fmt.Errorf("unknown message type %T", msg)
}
}
// ValidateMsg validates a message.
func ValidateMsg(pb proto.Message) error {
if pb == nil {
return errors.New("message cannot be nil")
}
switch msg := pb.(type) {
case *bcproto.BlockRequest:
if msg.Height < 0 {
return errors.New("negative Height")
}
case *bcproto.BlockResponse:
_, err := types.BlockFromProto(msg.Block)
if err != nil {
return err
}
case *bcproto.NoBlockResponse:
if msg.Height < 0 {
return errors.New("negative Height")
}
case *bcproto.StatusResponse:
if msg.Base < 0 {
return errors.New("negative Base")
}
if msg.Height < 0 {
return errors.New("negative Height")
}
if msg.Base > msg.Height {
return fmt.Errorf("base %v cannot be greater than height %v", msg.Base, msg.Height)
}
case *bcproto.StatusRequest:
return nil
default:
return fmt.Errorf("unknown message type %T", msg)
}
return nil
}

View File

@@ -1,18 +1,19 @@
package blockchain_test
package blockchain
import (
"encoding/hex"
math "math"
"math"
"testing"
proto "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)
func TestBlockRequest_Validate(t *testing.T) {
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
testCases := []struct {
testName string
requestHeight int64
@@ -26,15 +27,13 @@ func TestBlockRequest_Validate(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
msg := &bcproto.Message{}
require.NoError(t, msg.Wrap(&bcproto.BlockRequest{Height: tc.requestHeight}))
require.Equal(t, tc.expectErr, msg.Validate() != nil)
request := bcproto.BlockRequest{Height: tc.requestHeight}
assert.Equal(t, tc.expectErr, ValidateMsg(&request) != nil, "Validate Basic had an unexpected result")
})
}
}
func TestNoBlockResponse_Validate(t *testing.T) {
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
testCases := []struct {
testName string
nonResponseHeight int64
@@ -48,21 +47,18 @@ func TestNoBlockResponse_Validate(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
msg := &bcproto.Message{}
require.NoError(t, msg.Wrap(&bcproto.NoBlockResponse{Height: tc.nonResponseHeight}))
require.Equal(t, tc.expectErr, msg.Validate() != nil)
nonResponse := bcproto.NoBlockResponse{Height: tc.nonResponseHeight}
assert.Equal(t, tc.expectErr, ValidateMsg(&nonResponse) != nil, "Validate Basic had an unexpected result")
})
}
}
func TestStatusRequest_Validate(t *testing.T) {
msg := &bcproto.Message{}
require.NoError(t, msg.Wrap(&bcproto.StatusRequest{}))
require.NoError(t, msg.Validate())
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
request := bcproto.StatusRequest{}
assert.NoError(t, ValidateMsg(&request))
}
func TestStatusResponse_Validate(t *testing.T) {
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
testCases := []struct {
testName string
responseHeight int64
@@ -76,15 +72,13 @@ func TestStatusResponse_Validate(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
msg := &bcproto.Message{}
require.NoError(t, msg.Wrap(&bcproto.StatusResponse{Height: tc.responseHeight}))
require.Equal(t, tc.expectErr, msg.Validate() != nil)
response := bcproto.StatusResponse{Height: tc.responseHeight}
assert.Equal(t, tc.expectErr, ValidateMsg(&response) != nil, "Validate Basic had an unexpected result")
})
}
}
// nolint:lll
// nolint:lll // ignore line length in tests
func TestBlockchainMessageVectors(t *testing.T) {
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
block.Version.Block = 11 // overwrite updated protocol version
@@ -123,8 +117,8 @@ func TestBlockchainMessageVectors(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
bz, err := proto.Marshal(tc.bmsg)
require.NoError(t, err)
bz, _ := proto.Marshal(tc.bmsg)
require.Equal(t, tc.expBytes, hex.EncodeToString(bz))
})
}

View File

@@ -30,7 +30,6 @@ eg, L = latency = 0.1s
const (
requestIntervalMS = 2
maxTotalRequesters = 600
maxPeerErrBuffer = 1000
maxPendingRequests = maxTotalRequesters
maxPendingRequestsPerPeer = 20
@@ -59,24 +58,17 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
are not at peer limits, we can probably switch to consensus reactor
*/
// BlockRequest stores a block request identified by the block Height and the
// PeerID responsible for delivering the block.
type BlockRequest struct {
Height int64
PeerID p2p.NodeID
}
// BlockPool keeps track of the fast sync peers, block requests and block responses.
type BlockPool struct {
service.BaseService
lastAdvance time.Time
startTime time.Time
mtx tmsync.RWMutex
mtx tmsync.Mutex
// block requests
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
// peers
peers map[p2p.NodeID]*bpPeer
peers map[p2p.ID]*bpPeer
maxPeerHeight int64 // the biggest reported height
// atomic
@@ -90,7 +82,7 @@ type BlockPool struct {
// requests and errors will be sent to requestsCh and errorsCh accordingly.
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
bp := &BlockPool{
peers: make(map[p2p.NodeID]*bpPeer),
peers: make(map[p2p.ID]*bpPeer),
requesters: make(map[int64]*bpRequester),
height: start,
@@ -106,8 +98,8 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
// OnStart implements service.Service by spawning requesters routine and recording
// pool's start time.
func (pool *BlockPool) OnStart() error {
pool.lastAdvance = time.Now()
go pool.makeRequestersRoutine()
pool.startTime = time.Now()
return nil
}
@@ -142,7 +134,6 @@ func (pool *BlockPool) removeTimedoutPeers() {
defer pool.mtx.Unlock()
for _, peer := range pool.peers {
// check if peer timed out
if !peer.didTimeout && peer.numPending > 0 {
curRate := peer.recvMonitor.Status().CurRate
// curRate can be 0 on start
@@ -156,7 +147,6 @@ func (pool *BlockPool) removeTimedoutPeers() {
peer.didTimeout = true
}
}
if peer.didTimeout {
pool.removePeer(peer.id)
}
@@ -166,25 +156,33 @@ func (pool *BlockPool) removeTimedoutPeers() {
// GetStatus returns pool's height, numPending requests and the number of
// requesters.
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
}
// IsCaughtUp returns true if this node is caught up, false - otherwise.
// TODO: relax conditions, prevent abuse.
func (pool *BlockPool) IsCaughtUp() bool {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
pool.mtx.Lock()
defer pool.mtx.Unlock()
// Need at least 1 peer to be considered caught up.
if len(pool.peers) == 0 {
pool.Logger.Debug("Blockpool has no peers")
return false
}
// NOTE: we use maxPeerHeight - 1 because to sync block H requires block H+1
// Some conditions to determine if we're caught up.
// Ensures we've either received a block or waited some amount of time,
// and that we're synced to the highest known height.
// Note we use maxPeerHeight - 1 because to sync block H requires block H+1
// to verify the LastCommit.
return pool.height >= (pool.maxPeerHeight - 1)
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
return isCaughtUp
}
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
@@ -192,8 +190,8 @@ func (pool *BlockPool) IsCaughtUp() bool {
// So we peek two blocks at a time.
// The caller will verify the commit.
func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
pool.mtx.Lock()
defer pool.mtx.Unlock()
if r := pool.requesters[pool.height]; r != nil {
first = r.getBlock()
@@ -211,12 +209,16 @@ func (pool *BlockPool) PopRequest() {
defer pool.mtx.Unlock()
if r := pool.requesters[pool.height]; r != nil {
/* The block can disappear at any time, due to removePeer().
if r := pool.requesters[pool.height]; r == nil || r.block == nil {
PanicSanity("PopRequest() requires a valid block")
}
*/
if err := r.Stop(); err != nil {
pool.Logger.Error("Error stopping requester", "err", err)
}
delete(pool.requesters, pool.height)
pool.height++
pool.lastAdvance = time.Now()
} else {
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
}
@@ -225,13 +227,13 @@ func (pool *BlockPool) PopRequest() {
// RedoRequest invalidates the block at pool.height,
// Remove the peer and redo request from others.
// Returns the ID of the removed peer.
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
pool.mtx.Lock()
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
if peerID != p2p.NodeID("") {
if peerID != p2p.ID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
@@ -240,14 +242,20 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
// TODO: ensure that blocks come in order for each peer.
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
requester := pool.requesters[block.Height]
if requester == nil {
pool.Logger.Error("peer sent us a block we didn't expect",
"peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
pool.Logger.Info(
"peer sent us a block we didn't expect",
"peer",
peerID,
"curHeight",
pool.height,
"blockHeight",
block.Height)
diff := pool.height - block.Height
if diff < 0 {
diff *= -1
@@ -265,29 +273,20 @@ func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize
peer.decrPending(blockSize)
}
} else {
err := errors.New("requester is different or block already exists")
pool.Logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height)
pool.sendError(err, peerID)
pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height)
pool.sendError(errors.New("invalid peer"), peerID)
}
}
// MaxPeerHeight returns the highest reported height.
func (pool *BlockPool) MaxPeerHeight() int64 {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.maxPeerHeight
}
// LastAdvance returns the time when the last block was processed (or start
// time if no blocks were processed).
func (pool *BlockPool) LastAdvance() time.Time {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
return pool.lastAdvance
}
// SetPeerRange sets the peer's alleged blockchain base and height.
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -308,14 +307,14 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64)
// RemovePeer removes the peer with peerID from the pool. If there's no peer
// with peerID, function is a no-op.
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
pool.removePeer(peerID)
}
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
func (pool *BlockPool) removePeer(peerID p2p.ID) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo(peerID)
@@ -396,14 +395,14 @@ func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) {
if !pool.IsRunning() {
return
}
pool.requestsCh <- BlockRequest{height, peerID}
}
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
if !pool.IsRunning() {
return
}
@@ -437,7 +436,7 @@ type bpPeer struct {
height int64
base int64
pool *BlockPool
id p2p.NodeID
id p2p.ID
recvMonitor *flow.Monitor
timeout *time.Timer
@@ -445,7 +444,7 @@ type bpPeer struct {
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
func newBPPeer(pool *BlockPool, peerID p2p.ID, base int64, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@@ -510,10 +509,10 @@ type bpRequester struct {
pool *BlockPool
height int64
gotBlockCh chan struct{}
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
redoCh chan p2p.ID // redo may send multitime, add peerId to identify repeat
mtx tmsync.Mutex
peerID p2p.NodeID
peerID p2p.ID
block *types.Block
}
@@ -522,7 +521,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
redoCh: make(chan p2p.NodeID, 1),
redoCh: make(chan p2p.ID, 1),
peerID: "",
block: nil,
@@ -537,7 +536,7 @@ func (bpr *bpRequester) OnStart() error {
}
// Returns true if the peer matches and block doesn't already exist.
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
bpr.mtx.Unlock()
@@ -559,7 +558,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
return bpr.block
}
func (bpr *bpRequester) getPeerID() p2p.NodeID {
func (bpr *bpRequester) getPeerID() p2p.ID {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
return bpr.peerID
@@ -581,7 +580,7 @@ func (bpr *bpRequester) reset() {
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
func (bpr *bpRequester) redo(peerID p2p.ID) {
select {
case bpr.redoCh <- peerID:
default:
@@ -602,6 +601,7 @@ OUTER_LOOP:
}
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
if peer == nil {
// log.Info("No peers available", "height", height)
time.Sleep(requestIntervalMS * time.Millisecond)
continue PICK_PEER_LOOP
}
@@ -638,3 +638,10 @@ OUTER_LOOP:
}
}
}
// BlockRequest stores a block request identified by the block Height and the PeerID responsible for
// delivering the block
type BlockRequest struct {
Height int64
PeerID p2p.ID
}

View File

@@ -19,7 +19,7 @@ func init() {
}
type testPeer struct {
id p2p.NodeID
id p2p.ID
base int64
height int64
inputChan chan inputData // make sure each peer's data is sequential
@@ -49,7 +49,7 @@ func (p testPeer) simulateInput(input inputData) {
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
}
type testPeers map[p2p.NodeID]testPeer
type testPeers map[p2p.ID]testPeer
func (ps testPeers) start() {
for _, v := range ps {
@@ -66,7 +66,7 @@ func (ps testPeers) stop() {
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
peers := make(testPeers, numPeers)
for i := 0; i < numPeers; i++ {
peerID := p2p.NodeID(tmrand.Str(12))
peerID := p2p.ID(tmrand.Str(12))
height := minHeight + tmrand.Int63n(maxHeight-minHeight)
base := minHeight + int64(i)
if base > height {
@@ -182,7 +182,7 @@ func TestBlockPoolTimeout(t *testing.T) {
// Pull from channels
counter := 0
timedOut := map[p2p.NodeID]struct{}{}
timedOut := map[p2p.ID]struct{}{}
for {
select {
case err := <-errorsCh:
@@ -203,7 +203,7 @@ func TestBlockPoolTimeout(t *testing.T) {
func TestBlockPoolRemovePeer(t *testing.T) {
peers := make(testPeers, 10)
for i := 0; i < 10; i++ {
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
peerID := p2p.ID(fmt.Sprintf("%d", i+1))
height := int64(i + 1)
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
}
@@ -227,10 +227,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
assert.EqualValues(t, 10, pool.MaxPeerHeight())
// remove not-existing peer
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) })
// remove peer with biggest height
pool.RemovePeer(p2p.NodeID("10"))
pool.RemovePeer(p2p.ID("10"))
assert.EqualValues(t, 9, pool.MaxPeerHeight())
// remove all peers

View File

@@ -2,12 +2,11 @@ package v0
import (
"fmt"
"sync"
"reflect"
"time"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
@@ -15,55 +14,31 @@ import (
"github.com/tendermint/tendermint/types"
)
var (
_ service.Service = (*Reactor)(nil)
// ChannelShims contains a map of ChannelDescriptorShim objects, where each
// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding
// p2p proto.Message the new p2p Channel is responsible for handling.
//
//
// TODO: Remove once p2p refactor is complete.
// ref: https://github.com/tendermint/tendermint/issues/5670
ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
BlockchainChannel: {
MsgType: new(bcproto.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: byte(BlockchainChannel),
Priority: 5,
SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
},
},
}
)
const (
// BlockchainChannel is a channel for blocks and status updates
BlockchainChannel = p2p.ChannelID(0x40)
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
// stop syncing when last block's time is
// within this much of the system time.
// stopSyncingDurationMinutes = 10
// ask for best height every 10s
statusUpdateIntervalSeconds = 10
// check if we should switch to consensus reactor
switchToConsensusIntervalSeconds = 1
// switch to consensus after this duration of inactivity
syncTimeout = 60 * time.Second
)
type consensusReactor interface {
// For when we switch from blockchain reactor and fast sync to the consensus
// machine.
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
}
type peerError struct {
err error
peerID p2p.NodeID
peerID p2p.ID
}
func (e peerError) Error() string {
@@ -71,510 +46,386 @@ func (e peerError) Error() string {
}
// BlockchainReactor handles long-term catchup syncing.
type Reactor struct {
service.BaseService
type BlockchainReactor struct {
p2p.BaseReactor
// immutable
initialState sm.State
blockExec *sm.BlockExecutor
store *store.BlockStore
pool *BlockPool
consReactor consensusReactor
fastSync bool
blockchainCh *p2p.Channel
peerUpdates *p2p.PeerUpdatesCh
closeCh chan struct{}
blockExec *sm.BlockExecutor
store *store.BlockStore
pool *BlockPool
fastSync bool
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
// poolWG is used to synchronize the graceful shutdown of the poolRoutine and
// requestRoutine spawned goroutines when stopping the reactor and before
// stopping the p2p Channel(s).
poolWG sync.WaitGroup
}
// NewReactor returns new reactor instance.
func NewReactor(
logger log.Logger,
state sm.State,
blockExec *sm.BlockExecutor,
store *store.BlockStore,
consReactor consensusReactor,
blockchainCh *p2p.Channel,
peerUpdates *p2p.PeerUpdatesCh,
fastSync bool,
) (*Reactor, error) {
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
const capacity = 1000 // must be bigger than peers count
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
pool := NewBlockPool(startHeight, requestsCh, errorsCh)
requestsCh := make(chan BlockRequest, maxTotalRequesters)
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
r := &Reactor{
bcR := &BlockchainReactor{
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
pool: pool,
fastSync: fastSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
}
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
return r, nil
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
return bcR
}
// OnStart starts separate go routines for each p2p Channel and listens for
// envelopes on each. In addition, it also listens for peer updates and handles
// messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed.
//
// If fastSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart() error {
if r.fastSync {
if err := r.pool.Start(); err != nil {
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.pool.Logger = l
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
if bcR.fastSync {
err := bcR.pool.Start()
if err != nil {
return err
}
r.poolWG.Add(1)
go r.poolRoutine(false)
go bcR.poolRoutine(false)
}
go r.processBlockchainCh()
go r.processPeerUpdates()
return nil
}
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit.
func (r *Reactor) OnStop() {
if r.fastSync {
if err := r.pool.Stop(); err != nil {
r.Logger.Error("failed to stop pool", "err", err)
}
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
// wait for the poolRoutine and requestRoutine goroutines to gracefully exit
r.poolWG.Wait()
// Close closeCh to signal to all spawned goroutines to gracefully exit. All
// p2p Channels should execute Close().
close(r.closeCh)
// Wait for all p2p Channels to be closed before returning. This ensures we
// can easily reason about synchronization of all p2p Channels and ensure no
// panics will occur.
<-r.blockchainCh.Done()
<-r.peerUpdates.Done()
}
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
// Otherwise, we'll respond saying we do not have it.
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
block := r.store.LoadBlock(msg.Height)
if block != nil {
blockProto, err := block.ToProto()
if err != nil {
r.Logger.Error("failed to convert msg to protobuf", "err", err)
return
}
r.blockchainCh.Out() <- p2p.Envelope{
To: peerID,
Message: &bcproto.BlockResponse{Block: blockProto},
}
return
}
r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
r.blockchainCh.Out() <- p2p.Envelope{
To: peerID,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}
}
// handleBlockchainMessage handles envelopes sent from peers on the
// BlockchainChannel. It returns an error only if the Envelope.Message is unknown
// for this channel. This should never be called outside of handleMessage.
func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error {
logger := r.Logger.With("peer", envelope.From)
switch msg := envelope.Message.(type) {
case *bcproto.BlockRequest:
r.respondToPeer(msg, envelope.From)
case *bcproto.BlockResponse:
block, err := types.BlockFromProto(msg.Block)
if err != nil {
logger.Error("failed to convert block from proto", "err", err)
return err
}
r.pool.AddBlock(envelope.From, block, block.Size())
case *bcproto.StatusRequest:
r.blockchainCh.Out() <- p2p.Envelope{
To: envelope.From,
Message: &bcproto.StatusResponse{
Height: r.store.Height(),
Base: r.store.Base(),
},
}
case *bcproto.StatusResponse:
r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height)
case *bcproto.NoBlockResponse:
logger.Debug("peer does not have the requested block", "height", msg.Height)
default:
return fmt.Errorf("received unknown message: %T", msg)
}
return nil
}
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.Logger.Error("recovering from processing message panic", "err", err)
}
}()
r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
switch chID {
case BlockchainChannel:
err = r.handleBlockchainMessage(envelope)
default:
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
}
return err
}
// processBlockchainCh initiates a blocking process where we listen for and handle
// envelopes on the BlockchainChannel. Any error encountered during message
// execution will result in a PeerError being sent on the BlockchainChannel. When
// the reactor is stopped, we will catch the signal and close the p2p Channel
// gracefully.
func (r *Reactor) processBlockchainCh() {
defer r.blockchainCh.Close()
for {
select {
case envelope := <-r.blockchainCh.In():
if err := r.handleMessage(r.blockchainCh.ID(), envelope); err != nil {
r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID(), "envelope", envelope, "err", err)
r.blockchainCh.Error() <- p2p.PeerError{
PeerID: envelope.From,
Err: err,
Severity: p2p.PeerErrorSeverityLow,
}
}
case <-r.closeCh:
r.Logger.Debug("stopped listening on blockchain channel; closing...")
return
}
}
}
// processPeerUpdate processes a PeerUpdate.
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID, "status", peerUpdate.Status)
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
if len(peerUpdate.PeerID) == 0 {
return
}
switch peerUpdate.Status {
case p2p.PeerStatusNew, p2p.PeerStatusUp:
// send a status update the newly added peer
r.blockchainCh.Out() <- p2p.Envelope{
To: peerUpdate.PeerID,
Message: &bcproto.StatusResponse{
Base: r.store.Base(),
Height: r.store.Height(),
},
}
case p2p.PeerStatusDown, p2p.PeerStatusRemoved, p2p.PeerStatusBanned:
r.pool.RemovePeer(peerUpdate.PeerID)
}
}
// processPeerUpdates initiates a blocking process where we listen for and handle
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
// close the p2p PeerUpdatesCh gracefully.
func (r *Reactor) processPeerUpdates() {
defer r.peerUpdates.Close()
for {
select {
case peerUpdate := <-r.peerUpdates.Updates():
r.processPeerUpdate(peerUpdate)
case <-r.closeCh:
r.Logger.Debug("stopped listening on peer updates channel; closing...")
return
}
}
}
// SwitchToFastSync is called by the state sync reactor when switching to fast
// sync.
func (r *Reactor) SwitchToFastSync(state sm.State) error {
r.fastSync = true
r.initialState = state
r.pool.height = state.LastBlockHeight + 1
if err := r.pool.Start(); err != nil {
bcR.pool.height = state.LastBlockHeight + 1
err := bcR.pool.Start()
if err != nil {
return err
}
r.poolWG.Add(1)
go r.poolRoutine(true)
go bcR.poolRoutine(true)
return nil
}
func (r *Reactor) requestRoutine() {
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
if bcR.fastSync {
if err := bcR.pool.Stop(); err != nil {
bcR.Logger.Error("Error stopping pool", "err", err)
}
}
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 5,
SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height()})
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return
}
peer.Send(BlockchainChannel, msgBytes)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.SetPeerRange
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
bcR.pool.RemovePeer(peer.ID())
}
// respondToPeer loads a block and sends it to the requesting peer,
// if we have it. Otherwise, we'll respond saying we don't have it.
func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
bl, err := block.ToProto()
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return false
}
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
if err != nil {
bcR.Logger.Error("could not marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := bc.DecodeMsg(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
bcR.Switch.StopPeerForError(src, err)
return
}
if err = bc.ValidateMsg(msg); err != nil {
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
bcR.Switch.StopPeerForError(src, err)
return
}
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcproto.BlockRequest:
bcR.respondToPeer(msg, src)
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("Block content is invalid", "err", err)
return
}
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
case *bcproto.StatusRequest:
// Send peer our state.
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Height: bcR.store.Height(),
Base: bcR.store.Base(),
})
if err != nil {
bcR.Logger.Error("could not convert msg to protobut", "err", err)
return
}
src.TrySend(BlockchainChannel, msgBytes)
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
case *bcproto.NoBlockResponse:
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
default:
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
defer trySyncTicker.Stop()
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
defer statusUpdateTicker.Stop()
r.poolWG.Add(1)
defer r.poolWG.Done()
for {
select {
case <-r.closeCh:
return
case <-r.pool.Quit():
return
case request := <-r.requestsCh:
r.blockchainCh.Out() <- p2p.Envelope{
To: request.PeerID,
Message: &bcproto.BlockRequest{Height: request.Height},
}
case pErr := <-r.errorsCh:
r.blockchainCh.Error() <- p2p.PeerError{
PeerID: pErr.peerID,
Err: pErr.err,
Severity: p2p.PeerErrorSeverityLow,
}
case <-statusUpdateTicker.C:
r.poolWG.Add(1)
go func() {
defer r.poolWG.Done()
r.blockchainCh.Out() <- p2p.Envelope{
Broadcast: true,
Message: &bcproto.StatusRequest{},
}
}()
}
}
}
// poolRoutine handles messages from the poolReactor telling the reactor what to
// do.
//
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
func (r *Reactor) poolRoutine(stateSynced bool) {
var (
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
blocksSynced = uint64(0)
chainID = r.initialState.ChainID
state = r.initialState
lastHundred = time.Now()
lastRate = 0.0
didProcessCh = make(chan struct{}, 1)
)
defer trySyncTicker.Stop()
switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
defer switchToConsensusTicker.Stop()
go r.requestRoutine()
blocksSynced := uint64(0)
defer r.poolWG.Done()
chainID := bcR.initialState.ChainID
state := bcR.initialState
lastHundred := time.Now()
lastRate := 0.0
didProcessCh := make(chan struct{}, 1)
go func() {
for {
select {
case <-bcR.Quit():
return
case <-bcR.pool.Quit():
return
case request := <-bcR.requestsCh:
peer := bcR.Switch.Peers().Get(request.PeerID)
if peer == nil {
continue
}
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
if err != nil {
bcR.Logger.Error("could not convert msg to proto", "err", err)
continue
}
queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
}
case err := <-bcR.errorsCh:
peer := bcR.Switch.Peers().Get(err.peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, err)
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() // nolint: errcheck
}
}
}()
FOR_LOOP:
for {
select {
case <-switchToConsensusTicker.C:
var (
height, numPending, lenRequesters = r.pool.GetStatus()
lastAdvance = r.pool.LastAdvance()
)
height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
"outbound", outbound, "inbound", inbound)
if bcR.pool.IsCaughtUp() {
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
if err := bcR.pool.Stop(); err != nil {
bcR.Logger.Error("Error stopping pool", "err", err)
}
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
}
// else {
// should only happen during testing
// }
r.Logger.Debug(
"consensus ticker",
"num_pending", numPending,
"total", lenRequesters,
"height", height,
)
switch {
case r.pool.IsCaughtUp():
r.Logger.Info("switching to consensus reactor", "height", height)
case time.Since(lastAdvance) > syncTimeout:
r.Logger.Error("no progress since last advance", "last_advance", lastAdvance)
default:
r.Logger.Info(
"not caught up yet",
"height", height,
"max_peer_height", r.pool.MaxPeerHeight(),
"timeout_in", syncTimeout-time.Since(lastAdvance),
)
continue
break FOR_LOOP
}
if err := r.pool.Stop(); err != nil {
r.Logger.Error("failed to stop pool", "err", err)
}
if r.consReactor != nil {
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
}
break FOR_LOOP
case <-trySyncTicker.C:
case <-trySyncTicker.C: // chan time
select {
case didProcessCh <- struct{}{}:
default:
}
case <-didProcessCh:
// NOTE: It is a subtle mistake to process more than a single block at a
// time (e.g. 10) here, because we only send one BlockRequest per loop
// iteration. The ratio mismatch can result in starving of blocks, i.e. a
// sudden burst of requests and responses, and repeat. Consequently, it is
// better to split these routines rather than coupling them as it is
// written here.
//
// TODO: Uncouple from request routine.
// NOTE: It is a subtle mistake to process more than a single block
// at a time (e.g. 10) here, because we only TrySend 1 request per
// loop. The ratio mismatch can result in starving of blocks, a
// sudden burst of requests and responses, and repeat.
// Consequently, it is better to split these routines rather than
// coupling them as it's written here. TODO uncouple from request
// routine.
// see if there are any blocks to sync
first, second := r.pool.PeekTwoBlocks()
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
// bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// we need both to sync the first block
// We need both to sync the first block.
continue FOR_LOOP
} else {
// try again quickly next loop
// Try again quickly next loop.
didProcessCh <- struct{}{}
}
var (
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader = firstParts.Header()
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
)
// Finally, verify the first block using the second's commit.
//
// NOTE: We can probably make this more efficient, but note that calling
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader := firstParts.Header()
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
err := state.Validators.VerifyCommitLight(
chainID, firstID, first.Height, second.LastCommit)
if err != nil {
err = fmt.Errorf("invalid last commit: %w", err)
r.Logger.Error(
err.Error(),
"last_commit", second.LastCommit,
"block_id", firstID,
"height", first.Height,
)
// NOTE: We've already removed the peer's request, but we still need
// to clean up the rest.
peerID := r.pool.RedoRequest(first.Height)
r.blockchainCh.Error() <- p2p.PeerError{
PeerID: peerID,
Err: err,
Severity: p2p.PeerErrorSeverityLow,
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer, fmt.Errorf("blockchainReactor validation error: %v", err))
}
peerID2 := r.pool.RedoRequest(second.Height)
if peerID2 != peerID {
r.blockchainCh.Error() <- p2p.PeerError{
PeerID: peerID2,
Err: err,
Severity: p2p.PeerErrorSeverityLow,
}
peerID2 := bcR.pool.RedoRequest(second.Height)
peer2 := bcR.Switch.Peers().Get(peerID2)
if peer2 != nil && peer2 != peer {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
}
continue FOR_LOOP
} else {
r.pool.PopRequest()
bcR.pool.PopRequest()
// TODO: batch saves so we do not persist to disk every block
r.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
// TODO: Same thing for app - but we would need a way to get the hash
// without persisting the state.
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO: This is bad, are we zombie?
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
// TODO This is bad, are we zombie?
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"fast sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
)
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
}
continue FOR_LOOP
case <-r.closeCh:
case <-bcR.Quit():
break FOR_LOOP
}
}
}
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
if err != nil {
bcR.Logger.Error("could not convert msg to proto", "err", err)
return fmt.Errorf("could not convert msg to proto: %w", err)
}
bcR.Switch.Broadcast(BlockchainChannel, bm)
return nil
}

View File

@@ -2,59 +2,71 @@ package v0
import (
"fmt"
"math/rand"
"os"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
tmtime "github.com/tendermint/tendermint/types/time"
)
var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
var config *cfg.Config
type reactorTestSuite struct {
reactor *Reactor
app proxy.AppConns
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
peerID p2p.NodeID
blockchainChannel *p2p.Channel
blockchainInCh chan p2p.Envelope
blockchainOutCh chan p2p.Envelope
blockchainPeerErrCh chan p2p.PeerError
peerUpdatesCh chan p2p.PeerUpdate
peerUpdates *p2p.PeerUpdatesCh
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
}
func setup(
t *testing.T,
type BlockchainReactorPair struct {
reactor *BlockchainReactor
app proxy.AppConns
}
func newBlockchainReactor(
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64,
chBuf uint,
) *reactorTestSuite {
t.Helper()
maxBlockHeight int64) BlockchainReactorPair {
if len(privVals) != 1 {
panic("only support one validator")
}
require.Len(t, privVals, 1, "only one validator can be supported")
app := &abci.BaseApplication{}
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
require.NoError(t, proxyApp.Start())
err := proxyApp.Start()
if err != nil {
panic(fmt.Errorf("error start app: %w", err))
}
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
@@ -62,24 +74,25 @@ func setup(
blockStore := store.NewBlockStore(blockDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
require.NoError(t, err)
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
// Make the BlockchainReactor itself.
// NOTE we have to create and commit the blocks first because
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
}
blockExec := sm.NewBlockExecutor(
stateStore,
log.TestingLogger(),
proxyApp.Consensus(),
mock.Mempool{},
sm.EmptyEvidencePool{},
)
require.NoError(t, stateStore.Save(state))
// let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
if blockHeight > 1 {
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1)
@@ -92,197 +105,60 @@ func setup(
lastBlock.Header.ChainID,
time.Now(),
)
require.NoError(t, err)
lastCommit = types.NewCommit(
vote.Height,
vote.Round,
lastBlockMeta.BlockID,
[]types.CommitSig{vote.CommitSig()},
)
if err != nil {
panic(err)
}
lastCommit = types.NewCommit(vote.Height, vote.Round,
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
}
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
require.NoError(t, err)
if err != nil {
panic(fmt.Errorf("error apply block: %w", err))
}
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}
pID := make([]byte, 16)
_, err = rng.Read(pID)
require.NoError(t, err)
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
bcReactor.SetLogger(logger.With("module", "blockchain"))
peerUpdatesCh := make(chan p2p.PeerUpdate, chBuf)
rts := &reactorTestSuite{
app: proxyApp,
blockchainInCh: make(chan p2p.Envelope, chBuf),
blockchainOutCh: make(chan p2p.Envelope, chBuf),
blockchainPeerErrCh: make(chan p2p.PeerError, chBuf),
peerUpdatesCh: peerUpdatesCh,
peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh),
peerID: p2p.NodeID(fmt.Sprintf("%x", pID)),
}
rts.blockchainChannel = p2p.NewChannel(
BlockchainChannel,
new(bcproto.Message),
rts.blockchainInCh,
rts.blockchainOutCh,
rts.blockchainPeerErrCh,
)
reactor, err := NewReactor(
log.TestingLogger().With("module", "blockchain", "node", rts.peerID),
state.Copy(),
blockExec,
blockStore,
nil,
rts.blockchainChannel,
rts.peerUpdates,
fastSync,
)
require.NoError(t, err)
rts.reactor = reactor
require.NoError(t, rts.reactor.Start())
require.True(t, rts.reactor.IsRunning())
t.Cleanup(func() {
require.NoError(t, rts.reactor.Stop())
require.NoError(t, rts.app.Stop())
require.False(t, rts.reactor.IsRunning())
})
return rts
return BlockchainReactorPair{bcReactor, proxyApp}
}
func simulateRouter(primary *reactorTestSuite, suites []*reactorTestSuite, dropChErr bool) {
// create a mapping for efficient suite lookup by peer ID
suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite)
for _, suite := range suites {
suitesByPeerID[suite.peerID] = suite
}
// Simulate a router by listening for all outbound envelopes and proxying the
// envelope to the respective peer (suite).
go func() {
for envelope := range primary.blockchainOutCh {
if envelope.Broadcast {
for _, s := range suites {
// broadcast to everyone except source
if s.peerID != primary.peerID {
s.blockchainInCh <- p2p.Envelope{
From: primary.peerID,
To: s.peerID,
Message: envelope.Message,
}
}
}
} else {
suitesByPeerID[envelope.To].blockchainInCh <- p2p.Envelope{
From: primary.peerID,
To: envelope.To,
Message: envelope.Message,
}
}
}
}()
go func() {
for pErr := range primary.blockchainPeerErrCh {
if dropChErr {
primary.reactor.Logger.Debug("dropped peer error", "err", pErr.Err)
} else {
primary.peerUpdatesCh <- p2p.PeerUpdate{
PeerID: pErr.PeerID,
Status: p2p.PeerStatusRemoved,
}
}
}
}()
}
func TestReactor_AbruptDisconnect(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
func TestNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(64)
testSuites := []*reactorTestSuite{
setup(t, genDoc, privVals, maxBlockHeight, 0),
setup(t, genDoc, privVals, 0, 0),
}
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
for _, s := range testSuites {
simulateRouter(s, testSuites, true)
// connect reactor to every other reactor
for _, ss := range testSuites {
if s.peerID != ss.peerID {
s.peerUpdatesCh <- p2p.PeerUpdate{
Status: p2p.PeerStatusUp,
PeerID: ss.peerID,
}
}
}
}
secondaryPool := testSuites[1].reactor.pool
require.Eventually(
t,
func() bool {
height, _, _ := secondaryPool.GetStatus()
return secondaryPool.MaxPeerHeight() > 0 && height > 0 && height < 10
},
10*time.Second,
10*time.Millisecond,
"expected node to be partially synced",
)
// Remove synced node from the syncing node which should not result in any
// deadlocks or race conditions within the context of poolRoutine.
testSuites[1].peerUpdatesCh <- p2p.PeerUpdate{
Status: p2p.PeerStatusDown,
PeerID: testSuites[0].peerID,
}
}
func TestReactor_NoBlockResponse(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(65)
testSuites := []*reactorTestSuite{
setup(t, genDoc, privVals, maxBlockHeight, 0),
setup(t, genDoc, privVals, 0, 0),
}
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
reactorPairs := make([]BlockchainReactorPair, 2)
for _, s := range testSuites {
simulateRouter(s, testSuites, true)
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
// connect reactor to every other reactor
for _, ss := range testSuites {
if s.peerID != ss.peerID {
s.peerUpdatesCh <- p2p.PeerUpdate{
Status: p2p.PeerStatusUp,
PeerID: ss.peerID,
}
}
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
require.NoError(t, err)
err = r.app.Stop()
require.NoError(t, err)
}
}
}()
testCases := []struct {
tests := []struct {
height int64
existent bool
}{
@@ -292,114 +168,161 @@ func TestReactor_NoBlockResponse(t *testing.T) {
{100, false},
}
secondaryPool := testSuites[1].reactor.pool
require.Eventually(
t,
func() bool { return secondaryPool.MaxPeerHeight() > 0 && secondaryPool.IsCaughtUp() },
10*time.Second,
10*time.Millisecond,
"expected node to be fully synced",
)
for {
if reactorPairs[1].reactor.pool.IsCaughtUp() {
break
}
for _, tc := range testCases {
block := testSuites[1].reactor.store.LoadBlock(tc.height)
if tc.existent {
require.True(t, block != nil)
time.Sleep(10 * time.Millisecond)
}
assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height())
for _, tt := range tests {
block := reactorPairs[1].reactor.store.LoadBlock(tt.height)
if tt.existent {
assert.True(t, block != nil)
} else {
require.Nil(t, block)
assert.True(t, block == nil)
}
}
}
func TestReactor_BadBlockStopsPeer(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
// NOTE: This is too hard to test without
// an easy way to add test peer to switch
// or without significant refactoring of the module.
// Alternatively we could actually dial a TCP conn but
// that seems extreme.
func TestBadBlockStopsPeer(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(48)
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(148)
testSuites := []*reactorTestSuite{
setup(t, genDoc, privVals, maxBlockHeight, 1000), // fully synced node
setup(t, genDoc, privVals, 0, 1000),
setup(t, genDoc, privVals, 0, 1000),
setup(t, genDoc, privVals, 0, 1000),
setup(t, genDoc, privVals, 0, 1000), // new node
}
// Other chain needs a different validator set
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
defer func() {
err := otherChain.reactor.Stop()
require.Error(t, err)
err = otherChain.app.Stop()
require.NoError(t, err)
}()
for _, s := range testSuites[:len(testSuites)-1] {
simulateRouter(s, testSuites, true)
reactorPairs := make([]BlockchainReactorPair, 4)
// connect reactor to every other reactor except the new node
for _, ss := range testSuites[:len(testSuites)-1] {
if s.peerID != ss.peerID {
s.peerUpdatesCh <- p2p.PeerUpdate{
Status: p2p.PeerStatusUp,
PeerID: ss.peerID,
}
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
reactorPairs[2] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
reactorPairs[3] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
require.NoError(t, err)
err = r.app.Stop()
require.NoError(t, err)
}
}()
for {
time.Sleep(1 * time.Second)
caughtUp := true
for _, r := range reactorPairs {
if !r.reactor.pool.IsCaughtUp() {
caughtUp = false
}
}
if caughtUp {
break
}
}
require.Eventually(
t,
func() bool {
caughtUp := true
for _, s := range testSuites[1 : len(testSuites)-1] {
if s.reactor.pool.MaxPeerHeight() == 0 || !s.reactor.pool.IsCaughtUp() {
caughtUp = false
}
}
// at this time, reactors[0-3] is the newest
assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
return caughtUp
},
10*time.Minute,
10*time.Millisecond,
"expected all nodes to be fully synced",
)
// Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data
// race, but can't be easily avoided.
reactorPairs[3].reactor.store = otherChain.reactor.store
for _, s := range testSuites[:len(testSuites)-1] {
require.Len(t, s.reactor.pool.peers, 3)
lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
reactorPairs = append(reactorPairs, lastReactorPair)
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
return s
}, p2p.Connect2Switches)...)
for i := 0; i < len(reactorPairs)-1; i++ {
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
}
// Mark testSuites[3] as an invalid peer which will cause newSuite to disconnect
// from this peer.
otherGenDoc, otherPrivVals := randGenesisDoc(config, 1, false, 30)
otherSuite := setup(t, otherGenDoc, otherPrivVals, maxBlockHeight, 0)
testSuites[3].reactor.store = otherSuite.reactor.store
// add a fake peer just so we do not wait for the consensus ticker to timeout
otherSuite.reactor.pool.SetPeerRange("00ff", 10, 10)
// start the new peer's faux router
newSuite := testSuites[len(testSuites)-1]
simulateRouter(newSuite, testSuites, false)
// connect all nodes to the new peer
for _, s := range testSuites[:len(testSuites)-1] {
newSuite.peerUpdatesCh <- p2p.PeerUpdate{
Status: p2p.PeerStatusUp,
PeerID: s.peerID,
for {
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
break
}
time.Sleep(1 * time.Second)
}
// wait for the new peer to catch up and become fully synced
require.Eventually(
t,
func() bool { return newSuite.reactor.pool.MaxPeerHeight() > 0 && newSuite.reactor.pool.IsCaughtUp() },
10*time.Minute,
10*time.Millisecond,
"expected new node to be fully synced",
)
require.Eventuallyf(
t,
func() bool { return len(newSuite.reactor.pool.peers) < len(testSuites)-1 },
10*time.Minute,
10*time.Millisecond,
"invalid number of peers; expected < %d, got: %d",
len(testSuites)-1,
len(newSuite.reactor.pool.peers),
)
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
}
//----------------------------------------------
// utility funcs
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}
type testApp struct {
abci.BaseApplication
}
var _ abci.Application = (*testApp)(nil)
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
return abci.ResponseInfo{}
}
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
return abci.ResponseBeginBlock{}
}
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
return abci.ResponseEndBlock{}
}
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
return abci.ResponseDeliverTx{Events: []abci.Event{}}
}
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
return abci.ResponseCheckTx{}
}
func (app *testApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{}
}
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
return
}

View File

@@ -1,50 +0,0 @@
package v0
import (
"sort"
cfg "github.com/tendermint/tendermint/config"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func randGenesisDoc(
config *cfg.Config,
numValidators int,
randPower bool,
minPower int64,
) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
}
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}

211
blockchain/v1/peer.go Normal file
View File

@@ -0,0 +1,211 @@
package v1
import (
"fmt"
"math"
"time"
flow "github.com/tendermint/tendermint/libs/flowrate"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
//--------
// Peer
// BpPeerParams stores the peer parameters that are used when creating a peer.
type BpPeerParams struct {
timeout time.Duration
minRecvRate int64
sampleRate time.Duration
windowSize time.Duration
}
// BpPeer is the datastructure associated with a fast sync peer.
type BpPeer struct {
logger log.Logger
ID p2p.ID
Base int64 // the peer reported base
Height int64 // the peer reported height
NumPendingBlockRequests int // number of requests still waiting for block responses
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
blockResponseTimer *time.Timer
recvMonitor *flow.Monitor
params *BpPeerParams // parameters for timer and monitor
onErr func(err error, peerID p2p.ID) // function to call on error
}
// NewBpPeer creates a new peer.
func NewBpPeer(peerID p2p.ID, base int64, height int64,
onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
if params == nil {
params = BpPeerDefaultParams()
}
return &BpPeer{
ID: peerID,
Base: base,
Height: height,
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
logger: log.NewNopLogger(),
onErr: onErr,
params: params,
}
}
// String returns a string representation of a peer.
func (peer *BpPeer) String() string {
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
}
// SetLogger sets the logger of the peer.
func (peer *BpPeer) SetLogger(l log.Logger) {
peer.logger = l
}
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
func (peer *BpPeer) Cleanup() {
if peer.blockResponseTimer != nil {
peer.blockResponseTimer.Stop()
}
if peer.NumPendingBlockRequests != 0 {
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
}
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
}
for h := range peer.blocks {
delete(peer.blocks, h)
}
peer.NumPendingBlockRequests = 0
peer.recvMonitor = nil
}
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
block, ok := peer.blocks[height]
if !ok {
return nil, errMissingBlock
}
if block == nil {
return nil, errMissingBlock
}
return peer.blocks[height], nil
}
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
// The peer must have a pending request for this block.
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
if block == nil || recvSize < 0 {
panic("bad parameters")
}
existingBlock, ok := peer.blocks[block.Height]
if !ok {
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
return errMissingBlock
}
if existingBlock != nil {
peer.logger.Error("already have a block for height", "height", block.Height)
return errDuplicateBlock
}
if peer.NumPendingBlockRequests == 0 {
panic("peer does not have pending requests")
}
peer.blocks[block.Height] = block
peer.NumPendingBlockRequests--
if peer.NumPendingBlockRequests == 0 {
peer.stopMonitor()
peer.stopBlockResponseTimer()
} else {
peer.recvMonitor.Update(recvSize)
peer.resetBlockResponseTimer()
}
return nil
}
// RemoveBlock removes the block of given height
func (peer *BpPeer) RemoveBlock(height int64) {
delete(peer.blocks, height)
}
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
func (peer *BpPeer) RequestSent(height int64) {
peer.blocks[height] = nil
if peer.NumPendingBlockRequests == 0 {
peer.startMonitor()
peer.resetBlockResponseTimer()
}
peer.NumPendingBlockRequests++
}
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
func (peer *BpPeer) CheckRate() error {
if peer.NumPendingBlockRequests == 0 {
return nil
}
curRate := peer.recvMonitor.Status().CurRate
// curRate can be 0 on start
if curRate != 0 && curRate < peer.params.minRecvRate {
err := errSlowPeer
peer.logger.Error("SendTimeout", "peer", peer,
"reason", err,
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
return err
}
return nil
}
func (peer *BpPeer) onTimeout() {
peer.onErr(errNoPeerResponse, peer.ID)
}
func (peer *BpPeer) stopMonitor() {
peer.recvMonitor.Done()
peer.recvMonitor = nil
}
func (peer *BpPeer) startMonitor() {
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
initialValue := float64(peer.params.minRecvRate) * math.E
peer.recvMonitor.SetREMA(initialValue)
}
func (peer *BpPeer) resetBlockResponseTimer() {
if peer.blockResponseTimer == nil {
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
} else {
peer.blockResponseTimer.Reset(peer.params.timeout)
}
}
func (peer *BpPeer) stopBlockResponseTimer() bool {
if peer.blockResponseTimer == nil {
return false
}
return peer.blockResponseTimer.Stop()
}
// BpPeerDefaultParams returns the default peer parameters.
func BpPeerDefaultParams() *BpPeerParams {
return &BpPeerParams{
// Timeout for a peer to respond to a block request.
timeout: 15 * time.Second,
// Minimum recv rate to ensure we're receiving blocks from a peer fast
// enough. If a peer is not sending data at at least that rate, we
// consider them to have timedout and we disconnect.
//
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
// sending data across atlantic ~ 7.5 KB/s.
minRecvRate: int64(7680),
// Monitor parameters
sampleRate: time.Second,
windowSize: 40 * time.Second,
}
}

280
blockchain/v1/peer_test.go Normal file
View File

@@ -0,0 +1,280 @@
package v1
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
func TestPeerMonitor(t *testing.T) {
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 10,
func(err error, _ p2p.ID) {},
nil)
peer.SetLogger(log.TestingLogger())
peer.startMonitor()
assert.NotNil(t, peer.recvMonitor)
peer.stopMonitor()
assert.Nil(t, peer.recvMonitor)
}
func TestPeerResetBlockResponseTimer(t *testing.T) {
var (
numErrFuncCalls int // number of calls to the errFunc
lastErr error // last generated error
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
)
params := &BpPeerParams{timeout: 20 * time.Millisecond}
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 10,
func(err error, _ p2p.ID) {
peerTestMtx.Lock()
defer peerTestMtx.Unlock()
lastErr = err
numErrFuncCalls++
},
params)
peer.SetLogger(log.TestingLogger())
checkByStoppingPeerTimer(t, peer, false)
// initial reset call with peer having a nil timer
peer.resetBlockResponseTimer()
assert.NotNil(t, peer.blockResponseTimer)
// make sure timer is running and stop it
checkByStoppingPeerTimer(t, peer, true)
// reset with running timer
peer.resetBlockResponseTimer()
time.Sleep(5 * time.Millisecond)
peer.resetBlockResponseTimer()
assert.NotNil(t, peer.blockResponseTimer)
// let the timer expire and ...
time.Sleep(50 * time.Millisecond)
// ... check timer is not running
checkByStoppingPeerTimer(t, peer, false)
peerTestMtx.Lock()
// ... check errNoPeerResponse has been sent
assert.Equal(t, 1, numErrFuncCalls)
assert.Equal(t, lastErr, errNoPeerResponse)
peerTestMtx.Unlock()
}
func TestPeerRequestSent(t *testing.T) {
params := &BpPeerParams{timeout: 2 * time.Millisecond}
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 10,
func(err error, _ p2p.ID) {},
params)
peer.SetLogger(log.TestingLogger())
peer.RequestSent(1)
assert.NotNil(t, peer.recvMonitor)
assert.NotNil(t, peer.blockResponseTimer)
assert.Equal(t, 1, peer.NumPendingBlockRequests)
peer.RequestSent(1)
assert.NotNil(t, peer.recvMonitor)
assert.NotNil(t, peer.blockResponseTimer)
assert.Equal(t, 2, peer.NumPendingBlockRequests)
}
func TestPeerGetAndRemoveBlock(t *testing.T) {
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 100,
func(err error, _ p2p.ID) {},
nil)
// Change peer height
peer.Height = int64(10)
assert.Equal(t, int64(10), peer.Height)
// request some blocks and receive few of them
for i := 1; i <= 10; i++ {
peer.RequestSent(int64(i))
if i > 5 {
// only receive blocks 1..5
continue
}
_ = peer.AddBlock(makeSmallBlock(i), 10)
}
tests := []struct {
name string
height int64
wantErr error
blockPresent bool
}{
{"no request", 100, errMissingBlock, false},
{"no block", 6, errMissingBlock, false},
{"block 1 present", 1, nil, true},
{"block max present", 5, nil, true},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// try to get the block
b, err := peer.BlockAtHeight(tt.height)
assert.Equal(t, tt.wantErr, err)
assert.Equal(t, tt.blockPresent, b != nil)
// remove the block
peer.RemoveBlock(tt.height)
_, err = peer.BlockAtHeight(tt.height)
assert.Equal(t, errMissingBlock, err)
})
}
}
func TestPeerAddBlock(t *testing.T) {
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 100,
func(err error, _ p2p.ID) {},
nil)
// request some blocks, receive one
for i := 1; i <= 10; i++ {
peer.RequestSent(int64(i))
if i == 5 {
// receive block 5
_ = peer.AddBlock(makeSmallBlock(i), 10)
}
}
tests := []struct {
name string
height int64
wantErr error
blockPresent bool
}{
{"no request", 50, errMissingBlock, false},
{"duplicate block", 5, errDuplicateBlock, true},
{"block 1 successfully received", 1, nil, true},
{"block max successfully received", 10, nil, true},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// try to get the block
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
assert.Equal(t, tt.wantErr, err)
_, err = peer.BlockAtHeight(tt.height)
assert.Equal(t, tt.blockPresent, err == nil)
})
}
}
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
params := &BpPeerParams{timeout: 10 * time.Millisecond}
var (
numErrFuncCalls int // number of calls to the onErr function
lastErr error // last generated error
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
)
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 10,
func(err error, _ p2p.ID) {
peerTestMtx.Lock()
defer peerTestMtx.Unlock()
lastErr = err
numErrFuncCalls++
},
params)
peer.SetLogger(log.TestingLogger())
peer.RequestSent(1)
time.Sleep(50 * time.Millisecond)
// timer should have expired by now, check that the on error function was called
peerTestMtx.Lock()
assert.Equal(t, 1, numErrFuncCalls)
assert.Equal(t, errNoPeerResponse, lastErr)
peerTestMtx.Unlock()
}
func TestPeerCheckRate(t *testing.T) {
params := &BpPeerParams{
timeout: time.Second,
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
}
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 10,
func(err error, _ p2p.ID) {},
params)
peer.SetLogger(log.TestingLogger())
require.Nil(t, peer.CheckRate())
for i := 0; i < 40; i++ {
peer.RequestSent(int64(i))
}
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
time.Sleep(900 * time.Millisecond)
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
for i := 0; i < 10; i++ {
_ = peer.AddBlock(makeSmallBlock(i), 11)
time.Sleep(100 * time.Millisecond)
require.Nil(t, peer.CheckRate())
}
// slow peer - send a bit less than 10 bytes/100msec
for i := 10; i < 20; i++ {
_ = peer.AddBlock(makeSmallBlock(i), 9)
time.Sleep(100 * time.Millisecond)
}
// check peer is considered slow
assert.Equal(t, errSlowPeer, peer.CheckRate())
}
func TestPeerCleanup(t *testing.T) {
params := &BpPeerParams{timeout: 2 * time.Millisecond}
peer := NewBpPeer(
p2p.ID(tmrand.Str(12)), 0, 10,
func(err error, _ p2p.ID) {},
params)
peer.SetLogger(log.TestingLogger())
assert.Nil(t, peer.blockResponseTimer)
peer.RequestSent(1)
assert.NotNil(t, peer.blockResponseTimer)
peer.Cleanup()
checkByStoppingPeerTimer(t, peer, false)
}
// Check if peer timer is running or not (a running timer can be successfully stopped).
// Note: stops the timer.
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
assert.NotPanics(t, func() {
stopped := peer.stopBlockResponseTimer()
if running {
assert.True(t, stopped)
} else {
assert.False(t, stopped)
}
})
}
func makeSmallBlock(height int) *types.Block {
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
}

370
blockchain/v1/pool.go Normal file
View File

@@ -0,0 +1,370 @@
package v1
import (
"sort"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
// BlockPool keeps track of the fast sync peers, block requests and block responses.
type BlockPool struct {
logger log.Logger
// Set of peers that have sent status responses, with height bigger than pool.Height
peers map[p2p.ID]*BpPeer
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
blocks map[int64]p2p.ID
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
nextRequestHeight int64 // next height to be added to plannedRequests
Height int64 // height of next block to execute
MaxPeerHeight int64 // maximum height of all peers
toBcR bcReactor
}
// NewBlockPool creates a new BlockPool.
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
return &BlockPool{
Height: height,
MaxPeerHeight: 0,
peers: make(map[p2p.ID]*BpPeer),
blocks: make(map[int64]p2p.ID),
plannedRequests: make(map[int64]struct{}),
nextRequestHeight: height,
toBcR: toBcR,
}
}
// SetLogger sets the logger of the pool.
func (pool *BlockPool) SetLogger(l log.Logger) {
pool.logger = l
}
// ReachedMaxHeight check if the pool has reached the maximum peer height.
func (pool *BlockPool) ReachedMaxHeight() bool {
return pool.Height >= pool.MaxPeerHeight
}
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
pool.plannedRequests[height] = struct{}{}
delete(pool.blocks, height)
pool.peers[peerID].RemoveBlock(height)
}
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
func (pool *BlockPool) updateMaxPeerHeight() {
var newMax int64
for _, peer := range pool.peers {
peerHeight := peer.Height
if peerHeight > newMax {
newMax = peerHeight
}
}
pool.MaxPeerHeight = newMax
}
// UpdatePeer adds a new peer or updates an existing peer with a new base and height.
// If a peer is short it is not added.
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error {
peer := pool.peers[peerID]
if peer == nil {
if height < pool.Height {
pool.logger.Info("Peer height too small",
"peer", peerID, "height", height, "fsm_height", pool.Height)
return errPeerTooShort
}
// Add new peer.
peer = NewBpPeer(peerID, base, height, pool.toBcR.sendPeerError, nil)
peer.SetLogger(pool.logger.With("peer", peerID))
pool.peers[peerID] = peer
pool.logger.Info("added peer", "peerID", peerID, "base", base, "height", height, "num_peers", len(pool.peers))
} else {
// Check if peer is lowering its height. This is not allowed.
if height < peer.Height {
pool.RemovePeer(peerID, errPeerLowersItsHeight)
return errPeerLowersItsHeight
}
// Update existing peer.
peer.Base = base
peer.Height = height
}
// Update the pool's MaxPeerHeight if needed.
pool.updateMaxPeerHeight()
return nil
}
// Cleans and deletes the peer. Recomputes the max peer height.
func (pool *BlockPool) deletePeer(peer *BpPeer) {
if peer == nil {
return
}
peer.Cleanup()
delete(pool.peers, peer.ID)
if peer.Height == pool.MaxPeerHeight {
pool.updateMaxPeerHeight()
}
}
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
peer := pool.peers[peerID]
if peer == nil {
return
}
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
// Reschedule the block requests made to the peer, or received and not processed yet.
// Note that some of the requests may be removed further down.
for h := range pool.peers[peerID].blocks {
pool.rescheduleRequest(peerID, h)
}
oldMaxPeerHeight := pool.MaxPeerHeight
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
pool.deletePeer(peer)
// Check if the pool's MaxPeerHeight has been lowered.
// This may happen if the tallest peer has been removed.
if oldMaxPeerHeight > pool.MaxPeerHeight {
// Remove any planned requests for heights over the new MaxPeerHeight.
for h := range pool.plannedRequests {
if h > pool.MaxPeerHeight {
delete(pool.plannedRequests, h)
}
}
// Adjust the nextRequestHeight to the new max plus one.
if pool.nextRequestHeight > pool.MaxPeerHeight {
pool.nextRequestHeight = pool.MaxPeerHeight + 1
}
}
}
func (pool *BlockPool) removeShortPeers() {
for _, peer := range pool.peers {
if peer.Height < pool.Height {
pool.RemovePeer(peer.ID, nil)
}
}
}
func (pool *BlockPool) removeBadPeers() {
pool.removeShortPeers()
for _, peer := range pool.peers {
if err := peer.CheckRate(); err != nil {
pool.RemovePeer(peer.ID, err)
pool.toBcR.sendPeerError(err, peer.ID)
}
}
}
// MakeNextRequests creates more requests if the block pool is running low.
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
heights := pool.makeRequestBatch(maxNumRequests)
if len(heights) != 0 {
pool.logger.Info("makeNextRequests will make following requests",
"number", len(heights), "heights", heights)
}
for _, height := range heights {
h := int64(height)
if !pool.sendRequest(h) {
// If a good peer was not found for sending the request at height h then return,
// as it shouldn't be possible to find a peer for h+1.
return
}
delete(pool.plannedRequests, h)
}
}
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
pool.removeBadPeers()
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
// - peers timed out or were removed by switch
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
// allowed
numNeeded := maxNumRequests - len(pool.blocks)
for len(pool.plannedRequests) < numNeeded {
if pool.nextRequestHeight > pool.MaxPeerHeight {
break
}
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
pool.nextRequestHeight++
}
heights := make([]int, 0, len(pool.plannedRequests))
for k := range pool.plannedRequests {
heights = append(heights, int(k))
}
sort.Ints(heights)
return heights
}
func (pool *BlockPool) sendRequest(height int64) bool {
for _, peer := range pool.peers {
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
continue
}
if peer.Base > height || peer.Height < height {
continue
}
err := pool.toBcR.sendBlockRequest(peer.ID, height)
if err == errNilPeerForBlockRequest {
// Switch does not have this peer, remove it and continue to look for another peer.
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
peer.ID, "height", height)
pool.RemovePeer(peer.ID, err)
continue
}
if err == errSendQueueFull {
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
continue
}
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
pool.blocks[height] = peer.ID
peer.RequestSent(height)
return true
}
pool.logger.Error("could not find peer to send request for block at height", "height", height)
return false
}
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
peer, ok := pool.peers[peerID]
if !ok {
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
return errBadDataFromPeer
}
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
pool.logger.Error("block received from wrong peer", "height", block.Height,
"peer", peerID, "expected_peer", wantPeerID)
return errBadDataFromPeer
}
return peer.AddBlock(block, blockSize)
}
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
type BlockData struct {
block *types.Block
peer *BpPeer
}
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
// Returns errMissingBlock if a block was not found
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
peerID := pool.blocks[height]
peer := pool.peers[peerID]
if peer == nil {
return nil, errMissingBlock
}
block, err := peer.BlockAtHeight(height)
if err != nil {
return nil, err
}
return &BlockData{peer: peer, block: block}, nil
}
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
first, err = pool.BlockAndPeerAtHeight(pool.Height)
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
if err == nil {
err = err2
}
return
}
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
if err1 == nil {
pool.RemovePeer(first.peer.ID, err)
}
if err2 == nil {
pool.RemovePeer(second.peer.ID, err)
}
}
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
// the peers that are now short.
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
peerID, peerOk := pool.blocks[pool.Height]
if peerOk {
pool.peers[peerID].RemoveBlock(pool.Height)
}
delete(pool.blocks, pool.Height)
pool.logger.Debug("removed block at height", "height", pool.Height)
pool.Height++
pool.removeShortPeers()
}
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
// This function is called when the FSM is not able to make progress for some time.
// This happens if either the block H or H+1 have not been delivered.
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
peerID := pool.blocks[pool.Height]
peer, ok := pool.peers[peerID]
if ok {
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
"peer", peerID, "height", pool.Height)
pool.RemovePeer(peerID, err)
return
}
}
peerID = pool.blocks[pool.Height+1]
peer, ok = pool.peers[peerID]
if ok {
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
"peer", peerID, "height", pool.Height+1)
pool.RemovePeer(peerID, err)
return
}
}
}
// Cleanup performs pool and peer cleanup
func (pool *BlockPool) Cleanup() {
for id, peer := range pool.peers {
peer.Cleanup()
delete(pool.peers, id)
}
pool.plannedRequests = make(map[int64]struct{})
pool.blocks = make(map[int64]p2p.ID)
pool.nextRequestHeight = 0
pool.Height = 0
pool.MaxPeerHeight = 0
}
// NumPeers returns the number of peers in the pool
func (pool *BlockPool) NumPeers() int {
return len(pool.peers)
}
// NeedsBlocks returns true if more blocks are required.
func (pool *BlockPool) NeedsBlocks() bool {
return len(pool.blocks) < maxNumRequests
}

691
blockchain/v1/pool_test.go Normal file
View File

@@ -0,0 +1,691 @@
package v1
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
type testPeer struct {
id p2p.ID
base int64
height int64
}
type testBcR struct {
logger log.Logger
}
type testValues struct {
numRequestsSent int
}
var testResults testValues
func resetPoolTestResults() {
testResults.numRequestsSent = 0
}
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
}
func (testR *testBcR) sendStatusRequest() {
}
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
testResults.numRequestsSent++
return nil
}
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
}
func (testR *testBcR) switchToConsensus() {
}
func newTestBcR() *testBcR {
testBcR := &testBcR{logger: log.TestingLogger()}
return testBcR
}
type tPBlocks struct {
id p2p.ID
create bool
}
// Makes a block pool with specified current height, list of peers, block requests and block responses
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
bPool := NewBlockPool(height, bcr)
bPool.SetLogger(bcr.logger)
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
var maxH int64
for _, p := range peers {
if p.Height > maxH {
maxH = p.Height
}
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Base, p.Height, bcr.sendPeerError, nil)
bPool.peers[p.ID].SetLogger(bcr.logger)
}
bPool.MaxPeerHeight = maxH
for h, p := range blocks {
bPool.blocks[h] = p.id
bPool.peers[p.id].RequestSent(h)
if p.create {
// simulate that a block at height h has been received
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100)
}
}
return bPool
}
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
assert.Equal(t, len(set1), len(set2))
for peerID, peer1 := range set1 {
peer2 := set2[peerID]
assert.NotNil(t, peer2)
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
assert.Equal(t, peer1.Height, peer2.Height)
assert.Equal(t, peer1.Base, peer2.Base)
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
for h, block1 := range peer1.blocks {
block2 := peer2.blocks[h]
// block1 and block2 could be nil if a request was made but no block was received
assert.Equal(t, block1, block2)
}
}
}
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
assert.Equal(t, poolWanted.blocks, pool.blocks)
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
assert.Equal(t, poolWanted.Height, pool.Height)
}
func TestBlockPoolUpdatePeer(t *testing.T) {
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
args testPeer
poolWanted *BlockPool
errWanted error
}{
{
name: "add a first short peer",
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
args: testPeer{"P1", 0, 50},
errWanted: errPeerTooShort,
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
{
name: "add a first good peer",
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
args: testPeer{"P1", 0, 101},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
},
{
name: "add a first good peer with base",
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
args: testPeer{"P1", 10, 101},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Base: 10, Height: 101}}, map[int64]tPBlocks{}),
},
{
name: "increase the height of P1 from 120 to 123",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
args: testPeer{"P1", 0, 123},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
},
{
name: "decrease the height of P1 from 120 to 110",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
args: testPeer{"P1", 0, 110},
errWanted: errPeerLowersItsHeight,
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
{
name: "decrease the height of P1 from 105 to 102 with blocks",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
map[int64]tPBlocks{
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
args: testPeer{"P1", 0, 102},
errWanted: errPeerLowersItsHeight,
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
map[int64]tPBlocks{}),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
pool := tt.pool
err := pool.UpdatePeer(tt.args.id, tt.args.base, tt.args.height)
assert.Equal(t, tt.errWanted, err)
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
})
}
}
func TestBlockPoolRemovePeer(t *testing.T) {
testBcR := newTestBcR()
type args struct {
peerID p2p.ID
err error
}
tests := []struct {
name string
pool *BlockPool
args args
poolWanted *BlockPool
}{
{
name: "attempt to delete non-existing peer",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
args: args{"P99", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
},
{
name: "delete the only peer without blocks",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
args: args{"P1", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
{
name: "delete the shortest of two peers without blocks",
pool: makeBlockPool(
testBcR,
100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
map[int64]tPBlocks{}),
args: args{"P1", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
},
{
name: "delete the tallest of two peers without blocks",
pool: makeBlockPool(
testBcR,
100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
map[int64]tPBlocks{}),
args: args{"P2", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
},
{
name: "delete the only peer with block requests sent and blocks received",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
args: args{"P1", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
{
name: "delete the shortest of two peers with block requests sent and blocks received",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
args: args{"P1", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
},
{
name: "delete the tallest of two peers with block requests sent and blocks received",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
args: args{"P1", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
})
}
}
func TestBlockPoolRemoveShortPeers(t *testing.T) {
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
poolWanted *BlockPool
}{
{
name: "no short peers",
pool: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
poolWanted: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
},
{
name: "one short peer",
pool: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
poolWanted: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
},
{
name: "all short peers",
pool: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
pool := tt.pool
pool.removeShortPeers()
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
})
}
}
func TestBlockPoolSendRequestBatch(t *testing.T) {
type testPeerResult struct {
id p2p.ID
numPendingBlockRequests int
}
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
maxRequestsPerPeer int
expRequests map[int64]bool
expRequestsSent int
expPeerResults []testPeerResult
}{
{
name: "one peer - send up to maxRequestsPerPeer block requests",
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
maxRequestsPerPeer: 2,
expRequests: map[int64]bool{10: true, 11: true},
expRequestsSent: 2,
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
},
{
name: "multiple peers - stops at gap between height and base",
pool: makeBlockPool(testBcR, 10, []BpPeer{
{ID: "P1", Base: 1, Height: 12},
{ID: "P2", Base: 15, Height: 100},
}, map[int64]tPBlocks{}),
maxRequestsPerPeer: 10,
expRequests: map[int64]bool{10: true, 11: true, 12: true},
expRequestsSent: 3,
expPeerResults: []testPeerResult{
{id: "P1", numPendingBlockRequests: 3},
{id: "P2", numPendingBlockRequests: 0},
},
},
{
name: "n peers - send n*maxRequestsPerPeer block requests",
pool: makeBlockPool(
testBcR,
10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{}),
maxRequestsPerPeer: 2,
expRequests: map[int64]bool{10: true, 11: true},
expRequestsSent: 4,
expPeerResults: []testPeerResult{
{id: "P1", numPendingBlockRequests: 2},
{id: "P2", numPendingBlockRequests: 2}},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
resetPoolTestResults()
var pool = tt.pool
maxRequestsPerPeer = tt.maxRequestsPerPeer
pool.MakeNextRequests(10)
assert.Equal(t, tt.expRequestsSent, testResults.numRequestsSent)
for _, tPeer := range tt.expPeerResults {
var peer = pool.peers[tPeer.id]
assert.NotNil(t, peer)
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
}
})
}
}
func TestBlockPoolAddBlock(t *testing.T) {
testBcR := newTestBcR()
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
type args struct {
peerID p2p.ID
block *types.Block
blockSize int
}
tests := []struct {
name string
pool *BlockPool
args args
poolWanted *BlockPool
errWanted error
}{
{name: "block from unknown peer",
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
args: args{
peerID: "P2",
block: types.MakeBlock(int64(10), txs, nil, nil),
blockSize: 100,
},
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
errWanted: errBadDataFromPeer,
},
{name: "unexpected block 11 from known peer - waiting for 10",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{10: {"P1", false}}),
args: args{
peerID: "P1",
block: types.MakeBlock(int64(11), txs, nil, nil),
blockSize: 100,
},
poolWanted: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{10: {"P1", false}}),
errWanted: errMissingBlock,
},
{name: "unexpected block 10 from known peer - already have 10",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
args: args{
peerID: "P1",
block: types.MakeBlock(int64(10), txs, nil, nil),
blockSize: 100,
},
poolWanted: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
errWanted: errDuplicateBlock,
},
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{10: {"P1", false}}),
args: args{
peerID: "P2",
block: types.MakeBlock(int64(10), txs, nil, nil),
blockSize: 100,
},
poolWanted: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{10: {"P1", false}}),
errWanted: errBadDataFromPeer,
},
{name: "expected block from known peer",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{10: {"P1", false}}),
args: args{
peerID: "P1",
block: types.MakeBlock(int64(10), txs, nil, nil),
blockSize: 100,
},
poolWanted: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{10: {"P1", true}}),
errWanted: nil,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
assert.Equal(t, tt.errWanted, err)
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
})
}
}
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
firstWanted int64
secondWanted int64
errWanted error
}{
{
name: "both blocks missing",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
errWanted: errMissingBlock,
},
{
name: "second block missing",
pool: makeBlockPool(testBcR, 15,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
firstWanted: 15,
errWanted: errMissingBlock,
},
{
name: "first block missing",
pool: makeBlockPool(testBcR, 15,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
secondWanted: 16,
errWanted: errMissingBlock,
},
{
name: "both blocks present",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
firstWanted: 10,
secondWanted: 11,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
pool := tt.pool
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
assert.Equal(t, tt.errWanted, err)
if tt.firstWanted != 0 {
peer := pool.blocks[tt.firstWanted]
block := pool.peers[peer].blocks[tt.firstWanted]
assert.Equal(t, block, gotFirst.block,
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
tt.firstWanted, gotFirst.block.Height)
}
if tt.secondWanted != 0 {
peer := pool.blocks[tt.secondWanted]
block := pool.peers[peer].blocks[tt.secondWanted]
assert.Equal(t, block, gotSecond.block,
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
tt.secondWanted, gotSecond.block.Height)
}
})
}
}
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
poolWanted *BlockPool
}{
{
name: "both blocks missing",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
poolWanted: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
},
{
name: "second block missing",
pool: makeBlockPool(testBcR, 15,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
poolWanted: makeBlockPool(testBcR, 15,
[]BpPeer{{ID: "P2", Height: 100}},
map[int64]tPBlocks{18: {"P2", true}}),
},
{
name: "first block missing",
pool: makeBlockPool(testBcR, 15,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
poolWanted: makeBlockPool(testBcR, 15,
[]BpPeer{{ID: "P1", Height: 100}},
map[int64]tPBlocks{18: {"P1", true}}),
},
{
name: "both blocks present",
pool: makeBlockPool(testBcR, 10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
poolWanted: makeBlockPool(testBcR, 10,
[]BpPeer{},
map[int64]tPBlocks{}),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
})
}
}
func TestProcessedCurrentHeightBlock(t *testing.T) {
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
poolWanted *BlockPool
}{
{
name: "one peer",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
map[int64]tPBlocks{101: {"P1", true}}),
},
{
name: "multiple peers",
pool: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
map[int64]tPBlocks{
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
101: {"P2", true}, 103: {"P2", false},
102: {"P3", true}, 106: {"P3", true}}),
poolWanted: makeBlockPool(testBcR, 101,
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
map[int64]tPBlocks{
104: {"P1", true}, 105: {"P1", false},
101: {"P2", true}, 103: {"P2", false},
102: {"P3", true}, 106: {"P3", true}}),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
tt.pool.ProcessedCurrentHeightBlock()
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
})
}
}
func TestRemovePeerAtCurrentHeight(t *testing.T) {
testBcR := newTestBcR()
tests := []struct {
name string
pool *BlockPool
poolWanted *BlockPool
}{
{
name: "one peer, remove peer for block at H",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
{
name: "one peer, remove peer for block at H+1",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
},
{
name: "multiple peers, remove peer for block at H",
pool: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
map[int64]tPBlocks{
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
101: {"P2", true}, 103: {"P2", false},
102: {"P3", true}, 106: {"P3", true}}),
poolWanted: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
map[int64]tPBlocks{
101: {"P2", true}, 103: {"P2", false},
102: {"P3", true}, 106: {"P3", true}}),
},
{
name: "multiple peers, remove peer for block at H+1",
pool: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
map[int64]tPBlocks{
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
101: {"P2", false}, 103: {"P2", false},
102: {"P3", true}, 106: {"P3", true}}),
poolWanted: makeBlockPool(testBcR, 100,
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
map[int64]tPBlocks{
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
102: {"P3", true}, 106: {"P3", true}}),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
})
}
}

569
blockchain/v1/reactor.go Normal file
View File

@@ -0,0 +1,569 @@
package v1
import (
"fmt"
"reflect"
"time"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
trySendIntervalMS = 10
// ask for best height every 10s
statusUpdateIntervalSeconds = 10
)
var (
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
// have not been received.
maxRequestsPerPeer = 20
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
maxNumRequests = 64
)
type consensusReactor interface {
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
p2p.BaseReactor
initialState sm.State // immutable
state sm.State
blockExec *sm.BlockExecutor
store *store.BlockStore
fastSync bool
stateSynced bool
fsm *BcReactorFSM
blocksSynced uint64
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
messagesForFSMCh chan bcReactorMessage
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
// to this channel to be processed in the context of the poolRoutine.
errorsForFSMCh chan bcReactorMessage
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
// the switch.
eventsFromFSMCh chan bcFsmMessage
swReporter *behaviour.SwitchReporter
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
const capacity = 1000
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
messagesForFSMCh := make(chan bcReactorMessage, capacity)
errorsForFSMCh := make(chan bcReactorMessage, capacity)
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
bcR := &BlockchainReactor{
initialState: state,
state: state,
blockExec: blockExec,
fastSync: fastSync,
store: store,
messagesForFSMCh: messagesForFSMCh,
eventsFromFSMCh: eventsFromFSMCh,
errorsForFSMCh: errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
})
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return
}
peer.Send(BlockchainChannel, msgBytes)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi})
if err != nil {
bcR.Logger.Error("unable to marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
if err != nil {
bcR.Logger.Error("unable to marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
})
if err != nil {
bcR.Logger.Error("unable to marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := bc.DecodeMsg(msgBytes)
if err != nil {
bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
if err = bc.ValidateMsg(msg); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: src.ID(),
height: bi.Height,
block: bi,
length: len(msgBytes),
},
}
bcR.Logger.Info("Received", "src", src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: src.ID(),
height: msg.Height,
length: len(msgBytes),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
}
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <-processReceivedBlockTicker.C: // try to execute blocks
select {
case doProcessBlockCh <- struct{}{}:
default:
}
case <-doProcessBlockCh:
for {
err := bcR.processBlock()
if err == errMissingBlock {
break
}
// Notify FSM of block processing result.
msgForFSM := bcReactorMessage{
event: processedBlockEv,
data: bReactorEventData{
err: err,
},
}
_ = bcR.fsm.Handle(&msgForFSM)
if err != nil {
break
}
bcR.blocksSynced++
if bcR.blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
height, maxPeerHeight := bcR.fsm.Status()
bcR.Logger.Info("Fast Sync Rate", "height", height,
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
}
}
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
func (bcR *BlockchainReactor) poolRoutine() {
bcR.fsm.Start()
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
stopProcessing := make(chan struct{}, 1)
go bcR.processBlocksRoutine(stopProcessing)
ForLoop:
for {
select {
case <-sendBlockRequestTicker.C:
if !bcR.fsm.NeedsBlocks() {
continue
}
_ = bcR.fsm.Handle(&bcReactorMessage{
event: makeRequestsEv,
data: bReactorEventData{
maxNumRequests: maxNumRequests}})
case <-statusUpdateTicker.C:
// Ask for status updates.
go bcR.sendStatusRequest()
case msg := <-bcR.messagesForFSMCh:
// Sent from the Receive() routine when status (statusResponseEv) and
// block (blockResponseEv) response events are received
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.errorsForFSMCh:
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
// FSM state timer expiry routine (stateTimeoutEv).
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.eventsFromFSMCh:
switch msg.event {
case syncFinishedEv:
stopProcessing <- struct{}{}
// Sent from the FSM when it enters finished state.
break ForLoop
case peerErrorEv:
// Sent from the FSM when it detects peer error
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
if msg.data.err == errNoPeerResponse {
// Sent from the peer timeout handler routine
_ = bcR.fsm.Handle(&bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: msg.data.peerID,
err: msg.data.err,
},
})
}
// else {
// For slow peers, or errors due to blocks received from wrong peer
// the FSM had already removed the peers
// }
default:
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
}
case <-bcR.Quit():
break ForLoop
}
}
}
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
}
}
func (bcR *BlockchainReactor) processBlock() error {
first, second, err := bcR.fsm.FirstTwoBlocks()
if err != nil {
// We need both to sync the first block.
return err
}
chainID := bcR.initialState.ChainID
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartSetHeader := firstParts.Header()
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("error during commit verification", "err", err,
"first", first.Height, "second", second.Height)
return errBlockVerificationFailure
}
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
if err != nil {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
return nil
}
// Implements bcRNotifier
// sendStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) sendStatusRequest() {
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
if err != nil {
panic(err)
}
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
}
// Implements bcRNotifier
// BlockRequest sends `BlockRequest` height.
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
peer := bcR.Switch.Peers().Get(peerID)
if peer == nil {
return errNilPeerForBlockRequest
}
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
if err != nil {
return err
}
queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
return errSendQueueFull
}
return nil
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) switchToConsensus() {
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
}
// else {
// Should only happen during testing.
// }
}
// Implements bcRNotifier
// Called by FSM and pool:
// - pool calls when it detects slow peer or when peer times out
// - FSM calls when:
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
msgData := bcFsmMessage{
event: peerErrorEv,
data: bFsmEventData{
peerID: peerID,
err: err,
},
}
bcR.eventsFromFSMCh <- msgData
}
// Implements bcRNotifier
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
if timer == nil {
panic("nil timer pointer parameter")
}
if *timer == nil {
*timer = time.AfterFunc(timeout, func() {
msg := bcReactorMessage{
event: stateTimeoutEv,
data: bReactorEventData{
stateName: name,
},
}
bcR.errorsForFSMCh <- msg
})
} else {
(*timer).Reset(timeout)
}
}

View File

@@ -0,0 +1,462 @@
package v1
import (
"errors"
"fmt"
"sync"
"time"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
// Blockchain Reactor State
type bcReactorFSMState struct {
name string
// called when transitioning out of current state
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
// called when entering the state
enter func(fsm *BcReactorFSM)
// timeout to ensure FSM is not stuck in a state forever
// the timer is owned and run by the fsm instance
timeout time.Duration
}
func (s *bcReactorFSMState) String() string {
return s.name
}
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
type BcReactorFSM struct {
logger log.Logger
mtx sync.Mutex
startTime time.Time
state *bcReactorFSMState
stateTimer *time.Timer
pool *BlockPool
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
toBcR bcReactor
}
// NewFSM creates a new reactor FSM.
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
return &BcReactorFSM{
state: unknown,
startTime: time.Now(),
pool: NewBlockPool(height, toBcR),
toBcR: toBcR,
}
}
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
type bReactorEventData struct {
peerID p2p.ID
err error // for peer error: timeout, slow; for processed block event if error occurred
base int64 // for status response
height int64 // for status response; for processed block event
block *types.Block // for block response
stateName string // for state timeout events
length int // for block response event, length of received block, used to detect slow peers
maxNumRequests int // for request needed event, maximum number of pending requests
}
// Blockchain Reactor Events (the input to the state machine)
type bReactorEvent uint
const (
// message type events
startFSMEv = iota + 1
statusResponseEv
blockResponseEv
noBlockResponseEv
processedBlockEv
makeRequestsEv
stopFSMEv
// other events
peerRemoveEv = iota + 256
stateTimeoutEv
)
func (msg *bcReactorMessage) String() string {
var dataStr string
switch msg.event {
case startFSMEv:
dataStr = ""
case statusResponseEv:
dataStr = fmt.Sprintf("peer=%v base=%v height=%v", msg.data.peerID, msg.data.base, msg.data.height)
case blockResponseEv:
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
msg.data.peerID, msg.data.block.Height, msg.data.length)
case noBlockResponseEv:
dataStr = fmt.Sprintf("peer=%v requested height=%v",
msg.data.peerID, msg.data.height)
case processedBlockEv:
dataStr = fmt.Sprintf("error=%v", msg.data.err)
case makeRequestsEv:
dataStr = ""
case stopFSMEv:
dataStr = ""
case peerRemoveEv:
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
case stateTimeoutEv:
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
default:
dataStr = "cannot interpret message data"
}
return fmt.Sprintf("%v: %v", msg.event, dataStr)
}
func (ev bReactorEvent) String() string {
switch ev {
case startFSMEv:
return "startFSMEv"
case statusResponseEv:
return "statusResponseEv"
case blockResponseEv:
return "blockResponseEv"
case noBlockResponseEv:
return "noBlockResponseEv"
case processedBlockEv:
return "processedBlockEv"
case makeRequestsEv:
return "makeRequestsEv"
case stopFSMEv:
return "stopFSMEv"
case peerRemoveEv:
return "peerRemoveEv"
case stateTimeoutEv:
return "stateTimeoutEv"
default:
return "event unknown"
}
}
// states
var (
unknown *bcReactorFSMState
waitForPeer *bcReactorFSMState
waitForBlock *bcReactorFSMState
finished *bcReactorFSMState
)
// timeouts for state timers
const (
waitForPeerTimeout = 3 * time.Second
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
)
// errors
var (
// internal to the package
errNoErrorFinished = errors.New("fast sync is finished")
errInvalidEvent = errors.New("invalid event in current state")
errMissingBlock = errors.New("missing blocks")
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
errSendQueueFull = errors.New("block request not made, send-queue is full")
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
errSwitchRemovesPeer = errors.New("switch is removing peer")
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
// reported eventually to the switch
// handle return
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous")
// handle return
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights")
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
)
func init() {
unknown = &bcReactorFSMState{
name: "unknown",
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
switch ev {
case startFSMEv:
// Broadcast Status message. Currently doesn't return non-nil error.
fsm.toBcR.sendStatusRequest()
return waitForPeer, nil
case stopFSMEv:
return finished, errNoErrorFinished
default:
return unknown, errInvalidEvent
}
},
}
waitForPeer = &bcReactorFSMState{
name: "waitForPeer",
timeout: waitForPeerTimeout,
enter: func(fsm *BcReactorFSM) {
// Stop when leaving the state.
fsm.resetStateTimer()
},
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
switch ev {
case stateTimeoutEv:
if data.stateName != "waitForPeer" {
fsm.logger.Error("received a state timeout event for different state",
"state", data.stateName)
return waitForPeer, errTimeoutEventWrongState
}
// There was no statusResponse received from any peer.
// Should we send status request again?
return finished, errNoTallerPeer
case statusResponseEv:
if err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height); err != nil {
if fsm.pool.NumPeers() == 0 {
return waitForPeer, err
}
}
if fsm.stateTimer != nil {
fsm.stateTimer.Stop()
}
return waitForBlock, nil
case stopFSMEv:
if fsm.stateTimer != nil {
fsm.stateTimer.Stop()
}
return finished, errNoErrorFinished
default:
return waitForPeer, errInvalidEvent
}
},
}
waitForBlock = &bcReactorFSMState{
name: "waitForBlock",
timeout: waitForBlockAtCurrentHeightTimeout,
enter: func(fsm *BcReactorFSM) {
// Stop when leaving the state.
fsm.resetStateTimer()
},
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
switch ev {
case statusResponseEv:
err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height)
if fsm.pool.NumPeers() == 0 {
return waitForPeer, err
}
if fsm.pool.ReachedMaxHeight() {
return finished, err
}
return waitForBlock, err
case blockResponseEv:
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
if err != nil {
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
// Ignore block, remove peer and send error to switch.
fsm.pool.RemovePeer(data.peerID, err)
fsm.toBcR.sendPeerError(err, data.peerID)
}
if fsm.pool.NumPeers() == 0 {
return waitForPeer, err
}
return waitForBlock, err
case noBlockResponseEv:
fsm.logger.Error("peer does not have requested block", "peer", data.peerID)
return waitForBlock, nil
case processedBlockEv:
if data.err != nil {
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
fsm.logger.Error("error processing block", "err", data.err,
"first", first.block.Height, "second", second.block.Height)
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
// Remove the first two blocks. This will also remove the peers
fsm.pool.InvalidateFirstTwoBlocks(data.err)
} else {
fsm.pool.ProcessedCurrentHeightBlock()
// Since we advanced one block reset the state timer
fsm.resetStateTimer()
}
// Both cases above may result in achieving maximum height.
if fsm.pool.ReachedMaxHeight() {
return finished, nil
}
return waitForBlock, data.err
case peerRemoveEv:
// This event is sent by the switch to remove disconnected and errored peers.
fsm.pool.RemovePeer(data.peerID, data.err)
if fsm.pool.NumPeers() == 0 {
return waitForPeer, nil
}
if fsm.pool.ReachedMaxHeight() {
return finished, nil
}
return waitForBlock, nil
case makeRequestsEv:
fsm.makeNextRequests(data.maxNumRequests)
return waitForBlock, nil
case stateTimeoutEv:
if data.stateName != "waitForBlock" {
fsm.logger.Error("received a state timeout event for different state",
"state", data.stateName)
return waitForBlock, errTimeoutEventWrongState
}
// We haven't received the block at current height or height+1. Remove peer.
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
fsm.resetStateTimer()
if fsm.pool.NumPeers() == 0 {
return waitForPeer, errNoPeerResponseForCurrentHeights
}
if fsm.pool.ReachedMaxHeight() {
return finished, nil
}
return waitForBlock, errNoPeerResponseForCurrentHeights
case stopFSMEv:
if fsm.stateTimer != nil {
fsm.stateTimer.Stop()
}
return finished, errNoErrorFinished
default:
return waitForBlock, errInvalidEvent
}
},
}
finished = &bcReactorFSMState{
name: "finished",
enter: func(fsm *BcReactorFSM) {
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
fsm.toBcR.switchToConsensus()
fsm.cleanup()
},
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
return finished, nil
},
}
}
// Interface used by FSM for sending Block and Status requests,
// informing of peer errors and state timeouts
// Implemented by BlockchainReactor and tests
type bcReactor interface {
sendStatusRequest()
sendBlockRequest(peerID p2p.ID, height int64) error
sendPeerError(err error, peerID p2p.ID)
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
switchToConsensus()
}
// SetLogger sets the FSM logger.
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
fsm.logger = l
fsm.pool.SetLogger(l)
}
// Start starts the FSM.
func (fsm *BcReactorFSM) Start() {
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
}
// Handle processes messages and events sent to the FSM.
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
fsm.mtx.Lock()
defer fsm.mtx.Unlock()
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
if fsm.state == nil {
fsm.state = unknown
}
next, err := fsm.state.handle(fsm, msg.event, msg.data)
if err != nil {
fsm.logger.Error("FSM event handler returned", "err", err,
"state", fsm.state, "event", msg.event)
}
oldState := fsm.state.name
fsm.transition(next)
if oldState != fsm.state.name {
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
}
return err
}
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
if next == nil {
return
}
if fsm.state != next {
fsm.state = next
if next.enter != nil {
next.enter(fsm)
}
}
}
// Called when entering an FSM state in order to detect lack of progress in the state machine.
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
func (fsm *BcReactorFSM) resetStateTimer() {
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
}
func (fsm *BcReactorFSM) isCaughtUp() bool {
return fsm.state == finished
}
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
fsm.pool.MakeNextRequests(maxNumRequests)
}
func (fsm *BcReactorFSM) cleanup() {
fsm.pool.Cleanup()
}
// NeedsBlocks checks if more block requests are required.
func (fsm *BcReactorFSM) NeedsBlocks() bool {
fsm.mtx.Lock()
defer fsm.mtx.Unlock()
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
}
// FirstTwoBlocks returns the two blocks at pool height and height+1
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
fsm.mtx.Lock()
defer fsm.mtx.Unlock()
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
if err == nil {
first = firstBP.block
second = secondBP.block
}
return
}
// Status returns the pool's height and the maximum peer height.
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
fsm.mtx.Lock()
defer fsm.mtx.Unlock()
return fsm.pool.Height, fsm.pool.MaxPeerHeight
}

View File

@@ -0,0 +1,944 @@
package v1
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
type lastBlockRequestT struct {
peerID p2p.ID
height int64
}
type lastPeerErrorT struct {
peerID p2p.ID
err error
}
// reactor for FSM testing
type testReactor struct {
logger log.Logger
fsm *BcReactorFSM
numStatusRequests int
numBlockRequests int
lastBlockRequest lastBlockRequestT
lastPeerError lastPeerErrorT
stateTimerStarts map[string]int
}
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
}
type fsmStepTestValues struct {
currentState string
event bReactorEvent
data bReactorEventData
wantErr error
wantState string
wantStatusReqSent bool
wantReqIncreased bool
wantNewBlocks []int64
wantRemovedPeers []p2p.ID
}
// ---------------------------------------------------------------------------
// helper test function for different FSM events, state and expected behavior
func sStopFSMEv(current, expected string) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: stopFSMEv,
wantState: expected,
wantErr: errNoErrorFinished}
}
func sUnknownFSMEv(current string) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: 1234,
wantState: current,
wantErr: errInvalidEvent}
}
func sStartFSMEv() fsmStepTestValues {
return fsmStepTestValues{
currentState: "unknown",
event: startFSMEv,
wantState: "waitForPeer",
wantStatusReqSent: true}
}
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: stateTimeoutEv,
data: bReactorEventData{
stateName: timedoutState,
},
wantState: expected,
wantErr: wantErr,
}
}
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: processedBlockEv,
data: bReactorEventData{
err: reactorError,
},
wantState: expected,
wantErr: reactorError,
}
}
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: statusResponseEv,
data: bReactorEventData{peerID: peerID, height: height},
wantState: expected,
wantErr: err}
}
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: makeRequestsEv,
data: bReactorEventData{maxNumRequests: maxPendingRequests},
wantState: expected,
wantReqIncreased: true,
}
}
func sMakeRequestsEvErrored(current, expected string,
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: makeRequestsEv,
data: bReactorEventData{maxNumRequests: maxPendingRequests},
wantState: expected,
wantErr: err,
wantRemovedPeers: peersRemoved,
wantReqIncreased: true,
}
}
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
return fsmStepTestValues{
currentState: current,
event: blockResponseEv,
data: bReactorEventData{
peerID: peerID,
height: height,
block: types.MakeBlock(height, txs, nil, nil),
length: 100},
wantState: expected,
wantNewBlocks: append(prevBlocks, height),
}
}
func sBlockRespEvErrored(current, expected string,
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
return fsmStepTestValues{
currentState: current,
event: blockResponseEv,
data: bReactorEventData{
peerID: peerID,
height: height,
block: types.MakeBlock(height, txs, nil, nil),
length: 100},
wantState: expected,
wantErr: wantErr,
wantRemovedPeers: peersRemoved,
wantNewBlocks: prevBlocks,
}
}
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
return fsmStepTestValues{
currentState: current,
event: peerRemoveEv,
data: bReactorEventData{
peerID: peerID,
err: err,
},
wantState: expected,
wantRemovedPeers: peersRemoved,
}
}
// --------------------------------------------
func newTestReactor(height int64) *testReactor {
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
testBcR.fsm = NewFSM(height, testBcR)
testBcR.fsm.SetLogger(testBcR.logger)
return testBcR
}
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
// There is currently no good way to know to which peer a block request was sent.
// So in some cases where it does not matter, before we simulate a block response
// we cheat and look where it is expected from.
if step.event == blockResponseEv {
height := step.data.height
peerID, ok := testBcR.fsm.pool.blocks[height]
if ok {
step.data.peerID = peerID
}
}
}
type testFields struct {
name string
startingHeight int64
maxRequestsPerPeer int
maxPendingRequests int
steps []fsmStepTestValues
}
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// Create test reactor
testBcR := newTestReactor(tt.startingHeight)
if tt.maxRequestsPerPeer != 0 {
maxRequestsPerPeer = tt.maxRequestsPerPeer
}
for _, step := range tt.steps {
step := step
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
var heightBefore int64
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
heightBefore = testBcR.fsm.pool.Height
}
oldNumStatusRequests := testBcR.numStatusRequests
oldNumBlockRequests := testBcR.numBlockRequests
if matchRespToReq {
fixBlockResponseEvStep(&step, testBcR)
}
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
assert.Equal(t, step.wantErr, fsmErr)
if step.wantStatusReqSent {
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
} else {
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
}
if step.wantReqIncreased {
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
} else {
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
}
for _, height := range step.wantNewBlocks {
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
assert.Nil(t, err)
}
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
heightAfter := testBcR.fsm.pool.Height
assert.Equal(t, heightBefore, heightAfter)
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
assert.NotNil(t, err1)
assert.NotNil(t, err2)
assert.Nil(t, firstAfter)
assert.Nil(t, secondAfter)
}
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
if step.wantState == "finished" {
assert.True(t, testBcR.fsm.isCaughtUp())
}
}
})
}
}
func TestFSMBasic(t *testing.T) {
tests := []testFields{
{
name: "one block, one peer - TS2",
startingHeight: 1,
maxRequestsPerPeer: 2,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
sProcessedBlockEv("waitForBlock", "finished", nil),
},
},
{
name: "multi block, multi peer - TS2",
startingHeight: 1,
maxRequestsPerPeer: 2,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
sProcessedBlockEv("waitForBlock", "finished", nil),
},
},
}
executeFSMTests(t, tests, true)
}
func TestFSMBlockVerificationFailure(t *testing.T) {
tests := []testFields{
{
name: "block verification failure - TS2 variant",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1 and get blocks 1-3 from it
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
// add P2
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
// process block failure, should remove P1 and all blocks
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
// get blocks 1-3 from P2
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
// finish after processing blocks 1 and 2
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
sProcessedBlockEv("waitForBlock", "finished", nil),
},
},
}
executeFSMTests(t, tests, false)
}
func TestFSMBadBlockFromPeer(t *testing.T) {
tests := []testFields{
{
name: "block we haven't asked for",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1 and ask for blocks 1-3
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
// blockResponseEv for height 100 should cause an error
sBlockRespEvErrored("waitForBlock", "waitForPeer",
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
},
},
{
name: "block we already have",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1 and get block 1
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock",
"P1", 1, []int64{}),
// Get block 1 again. Since peer is removed together with block 1,
// the blocks present in the pool should be {}
sBlockRespEvErrored("waitForBlock", "waitForPeer",
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
},
},
{
name: "block from unknown peer",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1 and get block 1
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
// get block 1 from unknown peer P2
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEvErrored("waitForBlock", "waitForBlock",
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
},
},
{
name: "block from wrong peer",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1, make requests for blocks 1-3 to P1
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
// add P2
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
// receive block 1 from P2
sBlockRespEvErrored("waitForBlock", "waitForBlock",
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
},
},
}
executeFSMTests(t, tests, false)
}
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
tests := []testFields{
{
name: "block at current height undelivered - TS5",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1, get blocks 1 and 2, process block 1
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock",
"P1", 1, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock",
"P1", 2, []int64{1}),
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
// add P2
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
// timeout on block 3, P1 should be removed
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
// make requests and finish by receiving blocks 2 and 3 from P2
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
sProcessedBlockEv("waitForBlock", "finished", nil),
},
},
{
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1, request blocks 1-3 from P1
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
// add P2 (tallest)
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
// receive blocks 1-3 from P1
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
// process blocks at heights 1 and 2
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
// timeout on block at height 4
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
},
},
}
executeFSMTests(t, tests, true)
}
func TestFSMPeerRelatedEvents(t *testing.T) {
tests := []testFields{
{
name: "peer remove event with no blocks",
startingHeight: 1,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1, P2, P3
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
// switch removes P2
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
},
},
{
name: "only peer removed while in waitForBlock state",
startingHeight: 100,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
// switch removes P1
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
},
},
{
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
startingHeight: 100,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1 and make requests
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
// add P2
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
// get blocks 100 and 101 from P1 and process block at height 100
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
// switch removes peer P1, should be finished
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
},
},
{
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
startingHeight: 100,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1 and make requests
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
// add P2
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
// get blocks 100 and 101 from P1
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
// processed block at heights 100
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
// P2 becomes short
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
},
},
{
name: "new short peer while in waitForPeer state",
startingHeight: 100,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
},
},
{
name: "new short peer while in waitForBlock state",
startingHeight: 100,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
},
},
{
name: "only peer updated with low height while in waitForBlock state",
startingHeight: 100,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
},
},
{
name: "peer does not exist in the switch",
startingHeight: 9999999,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
// add P1
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
// send request for block 9999999
// Note: For this block request the "switch missing the peer" error is simulated,
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
maxNumRequests, nil, []p2p.ID{"P1"}),
},
},
}
executeFSMTests(t, tests, true)
}
func TestFSMStopFSM(t *testing.T) {
tests := []testFields{
{
name: "stopFSMEv in unknown",
steps: []fsmStepTestValues{
sStopFSMEv("unknown", "finished"),
},
},
{
name: "stopFSMEv in waitForPeer",
startingHeight: 1,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStopFSMEv("waitForPeer", "finished"),
},
},
{
name: "stopFSMEv in waitForBlock",
startingHeight: 1,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sStopFSMEv("waitForBlock", "finished"),
},
},
}
executeFSMTests(t, tests, false)
}
func TestFSMUnknownElements(t *testing.T) {
tests := []testFields{
{
name: "unknown event for state unknown",
steps: []fsmStepTestValues{
sUnknownFSMEv("unknown"),
},
},
{
name: "unknown event for state waitForPeer",
steps: []fsmStepTestValues{
sStartFSMEv(),
sUnknownFSMEv("waitForPeer"),
},
},
{
name: "unknown event for state waitForBlock",
startingHeight: 1,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sUnknownFSMEv("waitForBlock"),
},
},
}
executeFSMTests(t, tests, false)
}
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
tests := []testFields{
{
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
},
},
{
name: "timeout event for state waitForPeer while in a state != waitForPeer",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
},
},
{
name: "timeout event for state waitForBlock while in state waitForBlock ",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
},
},
{
name: "timeout event for state waitForBlock while in a state != waitForBlock",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
},
},
{
name: "timeout event for state waitForBlock with multiple peers",
startingHeight: 1,
maxRequestsPerPeer: 3,
steps: []fsmStepTestValues{
sStartFSMEv(),
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
},
},
}
executeFSMTests(t, tests, false)
}
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
maxRequestsPerPeer int, maxPendingRequests int) testFields {
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
peerHeights := make([]int64, numPeers)
for i := 0; i < numPeers; i++ {
if i == 0 {
peerHeights[0] = numBlocks
continue
}
if randomPeerHeights {
peerHeights[i] = int64(tmmath.MaxInt(tmrand.Intn(int(numBlocks)), int(startingHeight)+1))
} else {
peerHeights[i] = numBlocks
}
}
// Approximate the slice capacity to save time for appends.
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
// Add startFSMEv step.
testSteps = append(testSteps, sStartFSMEv())
// For each peer, add statusResponseEv step.
for i := 0; i < numPeers; i++ {
peerName := fmt.Sprintf("P%d", i)
if i == 0 {
testSteps = append(
testSteps,
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
} else {
testSteps = append(testSteps,
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
}
}
height := startingHeight
numBlocksReceived := 0
prevBlocks := make([]int64, 0, maxPendingRequests)
forLoop:
for i := 0; i < int(numBlocks); i++ {
// Add the makeRequestEv step periodically.
if i%maxRequestsPerPeer == 0 {
testSteps = append(
testSteps,
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
)
}
// Add the blockRespEv step
testSteps = append(
testSteps,
sBlockRespEv("waitForBlock", "waitForBlock",
"P0", height, prevBlocks))
prevBlocks = append(prevBlocks, height)
height++
numBlocksReceived++
// Add the processedBlockEv step periodically.
if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
for j := int(height) - numBlocksReceived; j < int(height); j++ {
if j >= int(numBlocks) {
// This is the last block that is processed, we should be in "finished" state.
testSteps = append(
testSteps,
sProcessedBlockEv("waitForBlock", "finished", nil))
break forLoop
}
testSteps = append(
testSteps,
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
}
numBlocksReceived = 0
prevBlocks = make([]int64, 0, maxPendingRequests)
}
}
return testFields{
name: testName,
startingHeight: startingHeight,
maxRequestsPerPeer: maxRequestsPerPeer,
maxPendingRequests: maxPendingRequests,
steps: testSteps,
}
}
const (
maxStartingHeightTest = 100
maxRequestsPerPeerTest = 20
maxTotalPendingRequestsTest = 600
maxNumPeersTest = 1000
maxNumBlocksInChainTest = 10000 // should be smaller than 9999999
)
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
// Generate a starting height for fast sync.
startingHeight := int64(tmrand.Intn(maxStartingHeightTest) + 1)
// Generate the number of requests per peer.
maxRequestsPerPeer := tmrand.Intn(maxRequestsPerPeerTest) + 1
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
maxPendingRequests := tmrand.Intn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
// Generate the number of blocks to be synced.
numBlocks := int64(tmrand.Intn(maxNumBlocksInChainTest)) + startingHeight
// Generate a number of peers.
numPeers := tmrand.Intn(maxNumPeersTest) + 1
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
}
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
if step.event == processedBlockEv {
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
if err == errMissingBlock {
return false
}
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
if err == errMissingBlock {
return false
}
}
return true
}
func TestFSMCorrectTransitionSequences(t *testing.T) {
tests := []testFields{
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
makeCorrectTransitionSequenceWithRandomParameters(),
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// Create test reactor
testBcR := newTestReactor(tt.startingHeight)
if tt.maxRequestsPerPeer != 0 {
maxRequestsPerPeer = tt.maxRequestsPerPeer
}
for _, step := range tt.steps {
step := step
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
oldNumStatusRequests := testBcR.numStatusRequests
fixBlockResponseEvStep(&step, testBcR)
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
continue
}
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
assert.Equal(t, step.wantErr, fsmErr)
if step.wantStatusReqSent {
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
} else {
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
}
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
if step.wantState == "finished" {
assert.True(t, testBcR.fsm.isCaughtUp())
}
}
})
}
}
// ----------------------------------------
// implements the bcRNotifier
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
testR.lastPeerError.peerID = peerID
testR.lastPeerError.err = err
}
func (testR *testReactor) sendStatusRequest() {
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
testR.numStatusRequests++
}
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
testR.numBlockRequests++
testR.lastBlockRequest.peerID = peerID
testR.lastBlockRequest.height = height
if height == 9999999 {
// simulate switch does not have peer
return errNilPeerForBlockRequest
}
return nil
}
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
if _, ok := testR.stateTimerStarts[name]; !ok {
testR.stateTimerStarts[name] = 1
} else {
testR.stateTimerStarts[name]++
}
}
func (testR *testReactor) switchToConsensus() {
}
// ----------------------------------------

View File

@@ -0,0 +1,365 @@
package v1
import (
"fmt"
"os"
"sort"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
var config *cfg.Config
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
}
func makeVote(
t *testing.T,
header *types.Header,
blockID types.BlockID,
valset *types.ValidatorSet,
privVal types.PrivValidator) *types.Vote {
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
valIdx, _ := valset.GetByAddress(pubKey.Address())
vote := &types.Vote{
ValidatorAddress: pubKey.Address(),
ValidatorIndex: valIdx,
Height: header.Height,
Round: 1,
Timestamp: tmtime.Now(),
Type: tmproto.PrecommitType,
BlockID: blockID,
}
vpb := vote.ToProto()
_ = privVal.SignVote(header.ChainID, vpb)
vote.Signature = vpb.Signature
return vote
}
type BlockchainReactorPair struct {
bcR *BlockchainReactor
conR *consensusReactorTest
}
func newBlockchainReactor(
t *testing.T,
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) *BlockchainReactor {
if len(privVals) != 1 {
panic("only support one validator")
}
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
if err != nil {
panic(fmt.Errorf("error start app: %w", err))
}
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
// Make the BlockchainReactor itself.
// NOTE we have to create and commit the blocks first because
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
}
// let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
lastCommit := types.NewCommit(blockHeight-1, 1, types.BlockID{}, nil)
if blockHeight > 1 {
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1)
vote := makeVote(t, &lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
}
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
if err != nil {
panic(fmt.Errorf("error apply block: %w", err))
}
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
bcReactor.SetLogger(logger.With("module", "blockchain"))
return bcReactor
}
func newBlockchainReactorPair(
t *testing.T,
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) BlockchainReactorPair {
consensusReactor := &consensusReactorTest{}
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
return BlockchainReactorPair{
newBlockchainReactor(t, logger, genDoc, privVals, maxBlockHeight),
consensusReactor}
}
type consensusReactorTest struct {
p2p.BaseReactor // BaseService + p2p.Switch
switchedToConsensus bool
mtx sync.Mutex
}
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced bool) {
conR.mtx.Lock()
defer conR.mtx.Unlock()
conR.switchedToConsensus = true
}
func TestFastSyncNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(65)
reactorPairs := make([]BlockchainReactorPair, 2)
logger := log.TestingLogger()
reactorPairs[0] = newBlockchainReactorPair(t, logger, genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newBlockchainReactorPair(t, logger, genDoc, privVals, 0)
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
moduleName := fmt.Sprintf("blockchain-%v", i)
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
_ = r.bcR.Stop()
_ = r.conR.Stop()
}
}()
tests := []struct {
height int64
existent bool
}{
{maxBlockHeight + 2, false},
{10, true},
{1, true},
{maxBlockHeight + 100, false},
}
for {
time.Sleep(10 * time.Millisecond)
reactorPairs[1].conR.mtx.Lock()
if reactorPairs[1].conR.switchedToConsensus {
reactorPairs[1].conR.mtx.Unlock()
break
}
reactorPairs[1].conR.mtx.Unlock()
}
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
for _, tt := range tests {
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
if tt.existent {
assert.True(t, block != nil)
} else {
assert.True(t, block == nil)
}
}
}
// NOTE: This is too hard to test without
// an easy way to add test peer to switch
// or without significant refactoring of the module.
// Alternatively we could actually dial a TCP conn but
// that seems extreme.
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
numNodes := 4
maxBlockHeight := int64(148)
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
defer func() {
_ = otherChain.bcR.Stop()
_ = otherChain.conR.Stop()
}()
reactorPairs := make([]BlockchainReactorPair, numNodes)
logger := make([]log.Logger, numNodes)
for i := 0; i < numNodes; i++ {
logger[i] = log.TestingLogger()
height := int64(0)
if i == 0 {
height = maxBlockHeight
}
reactorPairs[i] = newBlockchainReactorPair(t, logger[i], genDoc, privVals, height)
}
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
reactorPairs[i].conR.mtx.Lock()
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
moduleName := fmt.Sprintf("blockchain-%v", i)
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
reactorPairs[i].conR.mtx.Unlock()
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
_ = r.bcR.Stop()
_ = r.conR.Stop()
}
}()
outerFor:
for {
time.Sleep(10 * time.Millisecond)
for i := 0; i < numNodes; i++ {
reactorPairs[i].conR.mtx.Lock()
if !reactorPairs[i].conR.switchedToConsensus {
reactorPairs[i].conR.mtx.Unlock()
continue outerFor
}
reactorPairs[i].conR.mtx.Unlock()
}
break
}
// at this time, reactors[0-3] is the newest
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
// mark last reactorPair as an invalid peer
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
lastLogger := log.TestingLogger()
lastReactorPair := newBlockchainReactorPair(t, lastLogger, genDoc, privVals, 0)
reactorPairs = append(reactorPairs, lastReactorPair)
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
return s
}, p2p.Connect2Switches)...)
for i := 0; i < len(reactorPairs)-1; i++ {
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
}
for {
time.Sleep(1 * time.Second)
lastReactorPair.conR.mtx.Lock()
if lastReactorPair.conR.switchedToConsensus {
lastReactorPair.conR.mtx.Unlock()
break
}
lastReactorPair.conR.mtx.Unlock()
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
break
}
}
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
}
//----------------------------------------------
// utility funcs
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}
type testApp struct {
abci.BaseApplication
}

View File

@@ -1,26 +1,21 @@
package v2
import (
"errors"
"fmt"
"github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
var (
errPeerQueueFull = errors.New("peer queue full")
)
type iIO interface {
sendBlockRequest(peer p2p.Peer, height int64) error
sendBlockToPeer(block *types.Block, peer p2p.Peer) error
sendBlockNotFound(height int64, peer p2p.Peer) error
sendStatusResponse(base, height int64, peer p2p.Peer) error
sendBlockRequest(peerID p2p.ID, height int64) error
sendBlockToPeer(block *types.Block, peerID p2p.ID) error
sendBlockNotFound(height int64, peerID p2p.ID) error
sendStatusResponse(base, height int64, peerID p2p.ID) error
sendStatusRequest(peer p2p.Peer) error
broadcastStatusRequest() error
trySwitchToConsensus(state state.State, skipWAL bool) bool
@@ -47,50 +42,46 @@ type consensusReactor interface {
SwitchToConsensus(state state.State, skipWAL bool)
}
func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error {
msgProto := &bcproto.Message{
Sum: &bcproto.Message_BlockRequest{
BlockRequest: &bcproto.BlockRequest{
Height: height,
},
},
func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
peer := sio.sw.Peers().Get(peerID)
if peer == nil {
return fmt.Errorf("peer not found")
}
msgBytes, err := proto.Marshal(msgProto)
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
if err != nil {
return err
}
queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
return errPeerQueueFull
return fmt.Errorf("send queue full")
}
return nil
}
func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error {
msgProto := &bcproto.Message{
Sum: &bcproto.Message_StatusResponse{
StatusResponse: &bcproto.StatusResponse{
Height: height,
Base: base,
},
},
func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) error {
peer := sio.sw.Peers().Get(peerID)
if peer == nil {
return fmt.Errorf("peer not found")
}
msgBytes, err := proto.Marshal(msgProto)
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return errPeerQueueFull
return fmt.Errorf("peer queue full")
}
return nil
}
func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
peer := sio.sw.Peers().Get(peerID)
if peer == nil {
return fmt.Errorf("peer not found")
}
if block == nil {
panic("trying to send nil block")
}
@@ -100,42 +91,29 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
return err
}
msgProto := &bcproto.Message{
Sum: &bcproto.Message_BlockResponse{
BlockResponse: &bcproto.BlockResponse{
Block: bpb,
},
},
}
msgBytes, err := proto.Marshal(msgProto)
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return errPeerQueueFull
return fmt.Errorf("peer queue full")
}
return nil
}
func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error {
msgProto := &bcproto.Message{
Sum: &bcproto.Message_NoBlockResponse{
NoBlockResponse: &bcproto.NoBlockResponse{
Height: height,
},
},
func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
peer := sio.sw.Peers().Get(peerID)
if peer == nil {
return fmt.Errorf("peer not found")
}
msgBytes, err := proto.Marshal(msgProto)
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return errPeerQueueFull
return fmt.Errorf("peer queue full")
}
return nil
@@ -149,33 +127,8 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool
return ok
}
func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error {
msgProto := &bcproto.Message{
Sum: &bcproto.Message_StatusRequest{
StatusRequest: &bcproto.StatusRequest{},
},
}
msgBytes, err := proto.Marshal(msgProto)
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return errPeerQueueFull
}
return nil
}
func (sio *switchIO) broadcastStatusRequest() error {
msgProto := &bcproto.Message{
Sum: &bcproto.Message_StatusRequest{
StatusRequest: &bcproto.StatusRequest{},
},
}
msgBytes, err := proto.Marshal(msgProto)
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
if err != nil {
return err
}

View File

@@ -13,8 +13,8 @@ import (
type pcBlockVerificationFailure struct {
priorityNormal
height int64
firstPeerID p2p.NodeID
secondPeerID p2p.NodeID
firstPeerID p2p.ID
secondPeerID p2p.ID
}
func (e pcBlockVerificationFailure) String() string {
@@ -26,7 +26,7 @@ func (e pcBlockVerificationFailure) String() string {
type pcBlockProcessed struct {
priorityNormal
height int64
peerID p2p.NodeID
peerID p2p.ID
}
func (e pcBlockProcessed) String() string {
@@ -46,7 +46,7 @@ func (p pcFinished) Error() string {
type queueItem struct {
block *types.Block
peerID p2p.NodeID
peerID p2p.ID
}
type blockQueue map[int64]queueItem
@@ -95,7 +95,7 @@ func (state *pcState) synced() bool {
return len(state.queue) <= 1
}
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) {
if item, ok := state.queue[height]; ok {
panic(fmt.Sprintf(
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
@@ -110,7 +110,7 @@ func (state *pcState) height() int64 {
}
// purgePeer moves all unprocessed blocks from the queue
func (state *pcState) purgePeer(peerID p2p.NodeID) {
func (state *pcState) purgePeer(peerID p2p.ID) {
// what if height is less than state.height?
for height, item := range state.queue {
if item.peerID == peerID {

View File

@@ -40,7 +40,7 @@ func makeState(p *params) *pcState {
state := newPcState(context)
for _, item := range p.items {
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
state.enqueue(p2p.ID(item.pid), makePcBlock(item.height), item.height)
}
state.blocksSynced = p.blocksSynced
@@ -48,7 +48,7 @@ func makeState(p *params) *pcState {
return state
}
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
func mBlockResponse(peerID p2p.ID, height int64) scBlockReceived {
return scBlockReceived{
peerID: peerID,
block: makePcBlock(height),

View File

@@ -5,8 +5,6 @@ import (
"fmt"
"time"
proto "github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
@@ -49,6 +47,11 @@ type BlockchainReactor struct {
store blockStore
}
//nolint:unused,deadcode
type blockVerifier interface {
VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
}
type blockApplier interface {
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
}
@@ -211,7 +214,7 @@ func (e rProcessBlock) String() string {
type bcBlockResponse struct {
priorityNormal
time time.Time
peerID p2p.NodeID
peerID p2p.ID
size int64
block *types.Block
}
@@ -225,7 +228,7 @@ func (resp bcBlockResponse) String() string {
type bcNoBlockResponse struct {
priorityNormal
time time.Time
peerID p2p.NodeID
peerID p2p.ID
height int64
}
@@ -238,7 +241,7 @@ func (resp bcNoBlockResponse) String() string {
type bcStatusResponse struct {
priorityNormal
time time.Time
peerID p2p.NodeID
peerID p2p.ID
base int64
height int64
}
@@ -251,7 +254,7 @@ func (resp bcStatusResponse) String() string {
// new peer is connected
type bcAddNewPeer struct {
priorityNormal
peerID p2p.NodeID
peerID p2p.ID
}
func (resp bcAddNewPeer) String() string {
@@ -261,7 +264,7 @@ func (resp bcAddNewPeer) String() string {
// existing peer is removed
type bcRemovePeer struct {
priorityHigh
peerID p2p.NodeID
peerID p2p.ID
reason interface{}
}
@@ -313,9 +316,6 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
defer doStatusTk.Stop()
doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers
// Memoize the scSchedulerFail error to avoid printing it every scheduleFreq.
var scSchedulerFailErr error
// XXX: Extract timers to make testing atemporal
for {
select {
@@ -380,22 +380,14 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
r.logger.Error("Error reporting peer", "err", err)
}
case scBlockRequest:
peer := r.Switch.Peers().Get(event.peerID)
if peer == nil {
r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID)
continue
}
if err := r.io.sendBlockRequest(peer, event.height); err != nil {
if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil {
r.logger.Error("Error sending block request", "err", err)
}
case scFinishedEv:
r.processor.send(event)
r.scheduler.stop()
case scSchedulerFail:
if scSchedulerFailErr != event.reason {
r.logger.Error("Scheduler failure", "err", event.reason.Error())
scSchedulerFailErr = event.reason
}
r.logger.Error("Scheduler failure", "err", event.reason.Error())
case scPeersPruned:
// Remove peers from the processor.
for _, peerID := range event.peers {
@@ -463,62 +455,54 @@ func (r *BlockchainReactor) Stop() error {
}
// Receive implements Reactor by handling different message types.
// XXX: do not call any methods that can block or incur heavy processing.
// https://github.com/tendermint/tendermint/issues/2888
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
logger := r.logger.With("src", src.ID(), "chID", chID)
msgProto := new(bcproto.Message)
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
logger.Error("error decoding message", "err", err)
msg, err := bc.DecodeMsg(msgBytes)
if err != nil {
r.logger.Error("error decoding message",
"src", src.ID(), "chId", chID, "msg", msg, "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
if err := msgProto.Validate(); err != nil {
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
if err = bc.ValidateMsg(msg); err != nil {
r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
r.logger.Debug("received", "msg", msgProto)
r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg)
switch msg := msgProto.Sum.(type) {
case *bcproto.Message_StatusRequest:
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil {
logger.Error("Could not send status message to src peer")
switch msg := msg.(type) {
case *bcproto.StatusRequest:
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
r.logger.Error("Could not send status message to peer", "src", src)
}
case *bcproto.Message_BlockRequest:
block := r.store.LoadBlock(msg.BlockRequest.Height)
case *bcproto.BlockRequest:
block := r.store.LoadBlock(msg.Height)
if block != nil {
if err := r.io.sendBlockToPeer(block, src); err != nil {
logger.Error("Could not send block message to src peer", "err", err)
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
r.logger.Error("Could not send block message to peer: ", err)
}
} else {
logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height)
if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil {
logger.Error("Couldn't send block not found msg", "err", err)
r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
peerID := src.ID()
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
r.logger.Error("Couldn't send block not found: ", err)
}
}
case *bcproto.Message_StatusResponse:
case *bcproto.StatusResponse:
r.mtx.RLock()
if r.events != nil {
r.events <- bcStatusResponse{
peerID: src.ID(),
base: msg.StatusResponse.Base,
height: msg.StatusResponse.Height,
}
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
}
r.mtx.RUnlock()
case *bcproto.Message_BlockResponse:
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
logger.Error("error transitioning block from protobuf", "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
r.logger.Error("error transitioning block from protobuf", "err", err)
return
}
r.mtx.RLock()
@@ -532,14 +516,10 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
}
r.mtx.RUnlock()
case *bcproto.Message_NoBlockResponse:
case *bcproto.NoBlockResponse:
r.mtx.RLock()
if r.events != nil {
r.events <- bcNoBlockResponse{
peerID: src.ID(),
height: msg.NoBlockResponse.Height,
time: time.Now(),
}
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
}
r.mtx.RUnlock()
}
@@ -547,16 +527,10 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
// AddPeer implements Reactor interface
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer)
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
if err != nil {
r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err)
r.logger.Error("Could not send status message to peer new", "src", peer.ID, "height", r.SyncHeight())
}
err = r.io.sendStatusRequest(peer)
if err != nil {
r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err)
}
r.mtx.RLock()
defer r.mtx.RUnlock()
if r.events != nil {

View File

@@ -9,13 +9,13 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
@@ -32,11 +32,11 @@ import (
type mockPeer struct {
service.Service
id p2p.NodeID
id p2p.ID
}
func (mp mockPeer) FlushStop() {}
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
func (mp mockPeer) ID() p2p.ID { return mp.id }
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
@@ -45,9 +45,9 @@ func (mp mockPeer) IsPersistent() bool { return true }
func (mp mockPeer) CloseConn() error { return nil }
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{
NodeID: "",
ListenAddr: "",
return p2p.DefaultNodeInfo{
DefaultNodeID: "",
ListenAddr: "",
}
}
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
@@ -59,19 +59,22 @@ func (mp mockPeer) TrySend(byte, []byte) bool { return true }
func (mp mockPeer) Set(string, interface{}) {}
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
//nolint:unused
// nolint:unused // ignore
type mockBlockStore struct {
blocks map[int64]*types.Block
}
// nolint:unused // ignore
func (ml *mockBlockStore) Height() int64 {
return int64(len(ml.blocks))
}
// nolint:unused // ignore
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
return ml.blocks[height]
}
// nolint:unused // ignore
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
ml.blocks[block.Height] = block
}
@@ -93,37 +96,34 @@ type mockSwitchIo struct {
numStatusResponse int
numBlockResponse int
numNoBlockResponse int
numStatusRequest int
}
var _ iIO = (*mockSwitchIo)(nil)
func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error {
func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error {
return nil
}
func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error {
func (sio *mockSwitchIo) sendStatusResponse(base, height int64, peerID p2p.ID) error {
sio.mtx.Lock()
defer sio.mtx.Unlock()
sio.numStatusResponse++
return nil
}
func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error {
func (sio *mockSwitchIo) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
sio.mtx.Lock()
defer sio.mtx.Unlock()
sio.numBlockResponse++
return nil
}
func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error {
func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error {
sio.mtx.Lock()
defer sio.mtx.Unlock()
sio.numNoBlockResponse++
return nil
}
func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool {
func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool {
sio.mtx.Lock()
defer sio.mtx.Unlock()
sio.switchedToConsensus = true
@@ -134,13 +134,6 @@ func (sio *mockSwitchIo) broadcastStatusRequest() error {
return nil
}
func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error {
sio.mtx.Lock()
defer sio.mtx.Unlock()
sio.numStatusRequest++
return nil
}
type testReactorParams struct {
logger log.Logger
genDoc *types.GenesisDoc
@@ -409,37 +402,23 @@ func TestReactorHelperMode(t *testing.T) {
switch ev := step.event.(type) {
case bcproto.StatusRequest:
old := mockSwitch.numStatusResponse
msgProto := new(bcproto.Message)
require.NoError(t, msgProto.Wrap(&ev))
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
msg, err := bc.EncodeMsg(&ev)
assert.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
case bcproto.BlockRequest:
if ev.Height > params.startHeight {
old := mockSwitch.numNoBlockResponse
msgProto := new(bcproto.Message)
require.NoError(t, msgProto.Wrap(&ev))
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
msg, err := bc.EncodeMsg(&ev)
assert.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
} else {
old := mockSwitch.numBlockResponse
msgProto := new(bcproto.Message)
require.NoError(t, msgProto.Wrap(&ev))
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
msg, err := bc.EncodeMsg(&ev)
assert.NoError(t, err)
assert.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
}
}

View File

@@ -26,7 +26,7 @@ func (e scFinishedEv) String() string {
// send a blockRequest message
type scBlockRequest struct {
priorityNormal
peerID p2p.NodeID
peerID p2p.ID
height int64
}
@@ -37,7 +37,7 @@ func (e scBlockRequest) String() string {
// a block has been received and validated by the scheduler
type scBlockReceived struct {
priorityNormal
peerID p2p.NodeID
peerID p2p.ID
block *types.Block
}
@@ -48,7 +48,7 @@ func (e scBlockReceived) String() string {
// scheduler detected a peer error
type scPeerError struct {
priorityHigh
peerID p2p.NodeID
peerID p2p.ID
reason error
}
@@ -59,7 +59,7 @@ func (e scPeerError) String() string {
// scheduler removed a set of peers (timed out or slow peer)
type scPeersPruned struct {
priorityHigh
peers []p2p.NodeID
peers []p2p.ID
}
func (e scPeersPruned) String() string {
@@ -126,7 +126,7 @@ func (e peerState) String() string {
}
type scPeer struct {
peerID p2p.NodeID
peerID p2p.ID
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
// updated to Removed when peer is removed
@@ -143,7 +143,7 @@ func (p scPeer) String() string {
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
}
func newScPeer(peerID p2p.NodeID) *scPeer {
func newScPeer(peerID p2p.ID) *scPeer {
return &scPeer{
peerID: peerID,
state: peerStateNew,
@@ -171,7 +171,7 @@ type scheduler struct {
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
// track of peer specific state
peers map[p2p.NodeID]*scPeer
peers map[p2p.ID]*scPeer
peerTimeout time.Duration // maximum response time from a peer otherwise prune
minRecvRate int64 // minimum receive rate from peer otherwise prune
@@ -183,13 +183,13 @@ type scheduler struct {
blockStates map[int64]blockState
// a map of heights to the peer we are waiting a response from
pendingBlocks map[int64]p2p.NodeID
pendingBlocks map[int64]p2p.ID
// the time at which a block was put in blockStatePending
pendingTime map[int64]time.Time
// a map of heights to the peers that put the block in blockStateReceived
receivedBlocks map[int64]p2p.NodeID
receivedBlocks map[int64]p2p.ID
}
func (sc scheduler) String() string {
@@ -204,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
syncTimeout: 60 * time.Second,
height: initHeight,
blockStates: make(map[int64]blockState),
peers: make(map[p2p.NodeID]*scPeer),
pendingBlocks: make(map[int64]p2p.NodeID),
peers: make(map[p2p.ID]*scPeer),
pendingBlocks: make(map[int64]p2p.ID),
pendingTime: make(map[int64]time.Time),
receivedBlocks: make(map[int64]p2p.NodeID),
receivedBlocks: make(map[int64]p2p.ID),
targetPending: 10, // TODO - pass as param
peerTimeout: 15 * time.Second, // TODO - pass as param
minRecvRate: 0, // int64(7680), TODO - pass as param
@@ -216,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
return &sc
}
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
func (sc *scheduler) ensurePeer(peerID p2p.ID) *scPeer {
if _, ok := sc.peers[peerID]; !ok {
sc.peers[peerID] = newScPeer(peerID)
}
return sc.peers[peerID]
}
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error {
peer, ok := sc.peers[peerID]
if !ok {
return fmt.Errorf("couldn't find peer %s", peerID)
@@ -238,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
return nil
}
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
func (sc *scheduler) removePeer(peerID p2p.ID) {
peer, ok := sc.peers[peerID]
if !ok {
return
@@ -298,7 +298,7 @@ func (sc *scheduler) addNewBlocks() {
}
}
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error {
peer := sc.ensurePeer(peerID)
if peer.state == peerStateRemoved {
@@ -333,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
}
}
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
peers := make([]p2p.NodeID, 0)
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID {
peers := make([]p2p.ID, 0)
for _, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@@ -346,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
return peers
}
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
prunable := make([]p2p.NodeID, 0)
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
prunable := make([]p2p.ID, 0)
for peerID, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@@ -366,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
}
// CONTRACT: peer exists and in Ready state.
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
peer := sc.peers[peerID]
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
@@ -390,7 +390,7 @@ func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, n
return nil
}
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) error {
state := sc.getStateAtHeight(height)
if state != blockStateNew {
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
@@ -472,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
return min
}
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 {
var heights []int64
for height, pendingPeerID := range sc.pendingBlocks {
if pendingPeerID == peerID {
@@ -482,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
return heights
}
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
peers := sc.getPeersWithHeight(height)
if len(peers) == 0 {
return "", fmt.Errorf("cannot find peer for height %d", height)
@@ -490,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
// create a map from number of pending requests to a list
// of peers having that number of pending requests.
pendingFrom := make(map[int][]p2p.NodeID)
pendingFrom := make(map[int][]p2p.ID)
for _, peerID := range peers {
numPending := len(sc.pendingFrom(peerID))
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
@@ -509,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
}
// PeerByID is a list of peers sorted by peerID.
type PeerByID []p2p.NodeID
type PeerByID []p2p.ID
func (peers PeerByID) Len() int {
return len(peers)

View File

@@ -20,9 +20,9 @@ type scTestParams struct {
initHeight int64
height int64
allB []int64
pending map[int64]p2p.NodeID
pending map[int64]p2p.ID
pendingTime map[int64]time.Time
received map[int64]p2p.NodeID
received map[int64]p2p.ID
peerTimeout time.Duration
minRecvRate int64
targetPending int
@@ -41,7 +41,7 @@ func verifyScheduler(sc *scheduler) {
}
func newTestScheduler(params scTestParams) *scheduler {
peers := make(map[p2p.NodeID]*scPeer)
peers := make(map[p2p.ID]*scPeer)
var maxHeight int64
initHeight := params.initHeight
@@ -54,8 +54,8 @@ func newTestScheduler(params scTestParams) *scheduler {
}
for id, peer := range params.peers {
peer.peerID = p2p.NodeID(id)
peers[p2p.NodeID(id)] = peer
peer.peerID = p2p.ID(id)
peers[p2p.ID(id)] = peer
if maxHeight < peer.height {
maxHeight = peer.height
}
@@ -122,7 +122,7 @@ func TestScMaxHeights(t *testing.T) {
name: "one ready peer",
sc: scheduler{
height: 3,
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
peers: map[p2p.ID]*scPeer{"P1": {height: 6, state: peerStateReady}},
},
wantMax: 6,
},
@@ -130,7 +130,7 @@ func TestScMaxHeights(t *testing.T) {
name: "ready and removed peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[p2p.ID]*scPeer{
"P1": {height: 4, state: peerStateReady},
"P2": {height: 10, state: peerStateRemoved}},
},
@@ -140,7 +140,7 @@ func TestScMaxHeights(t *testing.T) {
name: "removed peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[p2p.ID]*scPeer{
"P1": {height: 4, state: peerStateRemoved},
"P2": {height: 10, state: peerStateRemoved}},
},
@@ -150,7 +150,7 @@ func TestScMaxHeights(t *testing.T) {
name: "new peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[p2p.ID]*scPeer{
"P1": {base: -1, height: -1, state: peerStateNew},
"P2": {base: -1, height: -1, state: peerStateNew}},
},
@@ -160,7 +160,7 @@ func TestScMaxHeights(t *testing.T) {
name: "mixed peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[p2p.ID]*scPeer{
"P1": {height: -1, state: peerStateNew},
"P2": {height: 10, state: peerStateReady},
"P3": {height: 20, state: peerStateRemoved},
@@ -187,7 +187,7 @@ func TestScMaxHeights(t *testing.T) {
func TestScEnsurePeer(t *testing.T) {
type args struct {
peerID p2p.NodeID
peerID p2p.ID
}
tests := []struct {
name string
@@ -244,7 +244,7 @@ func TestScTouchPeer(t *testing.T) {
now := time.Now()
type args struct {
peerID p2p.NodeID
peerID p2p.ID
time time.Time
}
@@ -316,13 +316,13 @@ func TestScPrunablePeers(t *testing.T) {
name string
fields scTestParams
args args
wantResult []p2p.NodeID
wantResult []p2p.ID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []p2p.NodeID{},
wantResult: []p2p.ID{},
},
{
name: "mixed peers",
@@ -341,7 +341,7 @@ func TestScPrunablePeers(t *testing.T) {
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
wantResult: []p2p.ID{"P4", "P5", "P6"},
},
}
@@ -361,7 +361,7 @@ func TestScPrunablePeers(t *testing.T) {
func TestScRemovePeer(t *testing.T) {
type args struct {
peerID p2p.NodeID
peerID p2p.ID
}
tests := []struct {
name string
@@ -424,13 +424,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]p2p.ID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]p2p.NodeID{},
pending: map[int64]p2p.ID{},
},
},
{
@@ -438,13 +438,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
received: map[int64]p2p.NodeID{1: "P1"},
received: map[int64]p2p.ID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
received: map[int64]p2p.NodeID{},
received: map[int64]p2p.ID{},
},
},
{
@@ -452,15 +452,15 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
pending: map[int64]p2p.ID{1: "P1", 3: "P1"},
received: map[int64]p2p.ID{2: "P1", 4: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]p2p.NodeID{},
received: map[int64]p2p.NodeID{},
pending: map[int64]p2p.ID{},
received: map[int64]p2p.ID{},
},
},
{
@@ -471,8 +471,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
pending: map[int64]p2p.ID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]p2p.ID{2: "P1", 4: "P2", 5: "P2"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
@@ -481,8 +481,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]p2p.NodeID{3: "P2"},
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
pending: map[int64]p2p.ID{3: "P2"},
received: map[int64]p2p.ID{4: "P2", 5: "P2"},
},
},
}
@@ -501,7 +501,7 @@ func TestScRemovePeer(t *testing.T) {
func TestScSetPeerRange(t *testing.T) {
type args struct {
peerID p2p.NodeID
peerID p2p.ID
base int64
height int64
}
@@ -622,25 +622,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
name string
fields scTestParams
args args
wantResult []p2p.NodeID
wantResult []p2p.ID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{height: 10},
wantResult: []p2p.NodeID{},
wantResult: []p2p.ID{},
},
{
name: "only new peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
args: args{height: 10},
wantResult: []p2p.NodeID{},
wantResult: []p2p.ID{},
},
{
name: "only Removed peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
args: args{height: 2},
wantResult: []p2p.NodeID{},
wantResult: []p2p.ID{},
},
{
name: "one Ready shorter peer",
@@ -649,7 +649,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 5},
wantResult: []p2p.NodeID{},
wantResult: []p2p.ID{},
},
{
name: "one Ready equal peer",
@@ -658,7 +658,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{"P1"},
wantResult: []p2p.ID{"P1"},
},
{
name: "one Ready higher peer",
@@ -668,7 +668,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{"P1"},
wantResult: []p2p.ID{"P1"},
},
{
name: "one Ready higher peer at base",
@@ -678,7 +678,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{"P1"},
wantResult: []p2p.ID{"P1"},
},
{
name: "one Ready higher peer with higher base",
@@ -688,7 +688,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{},
wantResult: []p2p.ID{},
},
{
name: "multiple mixed peers",
@@ -703,7 +703,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{8, 9, 10, 11},
},
args: args{height: 8},
wantResult: []p2p.NodeID{"P2", "P5"},
wantResult: []p2p.ID{"P2", "P5"},
},
}
@@ -725,7 +725,7 @@ func TestScMarkPending(t *testing.T) {
now := time.Now()
type args struct {
peerID p2p.NodeID
peerID p2p.ID
height int64
tm time.Time
}
@@ -821,14 +821,14 @@ func TestScMarkPending(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]p2p.ID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
},
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
},
},
@@ -851,7 +851,7 @@ func TestScMarkReceived(t *testing.T) {
now := time.Now()
type args struct {
peerID p2p.NodeID
peerID p2p.ID
height int64
size int64
tm time.Time
@@ -891,7 +891,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
@@ -900,7 +900,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
wantErr: true,
},
@@ -909,13 +909,13 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{},
pending: map[int64]p2p.ID{},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{},
pending: map[int64]p2p.ID{},
},
wantErr: true,
},
@@ -924,14 +924,14 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
wantErr: true,
@@ -941,16 +941,16 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]p2p.ID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
received: map[int64]p2p.NodeID{2: "P1"},
received: map[int64]p2p.ID{2: "P1"},
},
},
}
@@ -991,7 +991,7 @@ func TestScMarkProcessed(t *testing.T) {
height: 2,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]p2p.ID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
targetPending: 1,
},
@@ -1009,15 +1009,15 @@ func TestScMarkProcessed(t *testing.T) {
height: 1,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]p2p.ID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
received: map[int64]p2p.NodeID{1: "P1"}},
received: map[int64]p2p.ID{1: "P1"}},
args: args{height: 1},
wantFields: scTestParams{
height: 2,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]p2p.ID{2: "P1"},
pendingTime: map[int64]time.Time{2: now}},
},
}
@@ -1101,7 +1101,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantResult: false,
@@ -1111,7 +1111,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantResult: false,
},
@@ -1122,7 +1122,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
peers: map[string]*scPeer{
"P1": {height: 4, state: peerStateReady}},
allB: []int64{4},
received: map[int64]p2p.NodeID{4: "P1"},
received: map[int64]p2p.ID{4: "P1"},
},
wantResult: true,
},
@@ -1131,7 +1131,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
pending: map[int64]p2p.ID{2: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{2: now, 4: now},
},
wantResult: false,
@@ -1179,7 +1179,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantHeight: -1,
@@ -1190,7 +1190,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantHeight: -1,
},
@@ -1209,7 +1209,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]p2p.ID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
},
wantHeight: 1,
@@ -1239,7 +1239,7 @@ func TestScSelectPeer(t *testing.T) {
name string
fields scTestParams
args args
wantResult p2p.NodeID
wantResult p2p.ID
wantError bool
}{
{
@@ -1307,7 +1307,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P2": {height: 9, state: peerStateReady}},
allB: []int64{4, 5, 6, 7, 8, 9},
pending: map[int64]p2p.NodeID{
pending: map[int64]p2p.ID{
4: "P1", 6: "P1",
5: "P2",
},
@@ -1323,7 +1323,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 15, state: peerStateReady},
"P3": {height: 15, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
pending: map[int64]p2p.NodeID{
pending: map[int64]p2p.ID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@@ -1373,9 +1373,6 @@ func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, even
t.Errorf("error = %v, wantErr %v", err, wantErr)
return
}
if !assert.IsType(t, wantEvent, event) {
t.Log(fmt.Sprintf("Wrong type received, got: %v", event))
}
switch wantEvent := wantEvent.(type) {
case scPeerError:
assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID)
@@ -1392,7 +1389,7 @@ func TestScHandleBlockResponse(t *testing.T) {
now := time.Now()
block6FromP1 := bcBlockResponse{
time: now.Add(time.Millisecond),
peerID: p2p.NodeID("P1"),
peerID: p2p.ID("P1"),
size: 100,
block: makeScBlock(6),
}
@@ -1433,7 +1430,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P2"},
pending: map[int64]p2p.ID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@@ -1444,7 +1441,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{6: "P1"},
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
},
args: args{event: block6FromP1},
@@ -1455,11 +1452,11 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block},
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(6)},
},
}
@@ -1477,7 +1474,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
now := time.Now()
noBlock6FromP1 := bcNoBlockResponse{
time: now.Add(time.Millisecond),
peerID: p2p.NodeID("P1"),
peerID: p2p.ID("P1"),
height: 6,
}
@@ -1513,14 +1510,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P2"},
pending: map[int64]p2p.ID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: noOpEvent{},
wantFields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P2"},
pending: map[int64]p2p.ID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
},
@@ -1529,7 +1526,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
@@ -1552,7 +1549,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
func TestScHandleBlockProcessed(t *testing.T) {
now := time.Now()
processed6FromP1 := pcBlockProcessed{
peerID: p2p.NodeID("P1"),
peerID: p2p.ID("P1"),
height: 6,
}
@@ -1579,7 +1576,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: processed6FromP1},
@@ -1591,7 +1588,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
received: map[int64]p2p.ID{6: "P1", 7: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: scFinishedEv{},
@@ -1602,8 +1599,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{7: "P1", 8: "P1"},
received: map[int64]p2p.ID{6: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: noOpEvent{},
@@ -1646,7 +1643,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -1658,7 +1655,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]p2p.ID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -1670,7 +1667,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
received: map[int64]p2p.ID{6: "P1", 7: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: scFinishedEv{},
@@ -1681,8 +1678,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 5,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{5, 6, 7, 8},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
pending: map[int64]p2p.ID{7: "P1", 8: "P1"},
received: map[int64]p2p.ID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: noOpEvent{},
@@ -1697,8 +1694,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
"P3": {height: 8, state: peerStateReady},
},
allB: []int64{5, 6, 7, 8},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
pending: map[int64]p2p.ID{7: "P1", 8: "P1"},
received: map[int64]p2p.ID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
wantEvent: noOpEvent{},
@@ -1717,7 +1714,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
func TestScHandleAddNewPeer(t *testing.T) {
addP1 := bcAddNewPeer{
peerID: p2p.NodeID("P1"),
peerID: p2p.ID("P1"),
}
type args struct {
event bcAddNewPeer
@@ -1828,7 +1825,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
allB: []int64{1, 2, 3, 4, 5, 6, 7},
peerTimeout: time.Second},
args: args{event: pruneEv},
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
wantEvent: scPeersPruned{peers: []p2p.ID{"P4", "P5", "P6"}},
},
{
name: "mixed peers, finish after pruning",
@@ -1926,7 +1923,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 4, state: peerStateReady},
"P2": {height: 5, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5},
pending: map[int64]p2p.NodeID{
pending: map[int64]p2p.ID{
1: "P1", 2: "P1",
3: "P2",
},
@@ -1944,7 +1941,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P3": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{
pending: map[int64]p2p.ID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@@ -2046,8 +2043,6 @@ func TestScHandle(t *testing.T) {
priorityNormal
}
block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3)
t0 := time.Now()
tick := make([]time.Time, 100)
for i := range tick {
@@ -2106,7 +2101,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]p2p.ID{1: "P1"},
pendingTime: map[int64]time.Time{1: tick[1]},
height: 1,
},
@@ -2118,7 +2113,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
height: 1,
},
@@ -2130,67 +2125,67 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
height: 1,
},
},
{ // block response 1
args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}},
wantEvent: scBlockReceived{peerID: "P1", block: block1},
args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: makeScBlock(1)}},
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(1)},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
pending: map[int64]p2p.ID{2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
received: map[int64]p2p.NodeID{1: "P1"},
received: map[int64]p2p.ID{1: "P1"},
height: 1,
},
},
{ // block response 2
args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}},
wantEvent: scBlockReceived{peerID: "P1", block: block2},
args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: makeScBlock(2)}},
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(2)},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{3: "P1"},
pending: map[int64]p2p.ID{3: "P1"},
pendingTime: map[int64]time.Time{3: tick[3]},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
received: map[int64]p2p.ID{1: "P1", 2: "P1"},
height: 1,
},
},
{ // block response 3
args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}},
wantEvent: scBlockReceived{peerID: "P1", block: block3},
args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: makeScBlock(3)}},
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(3)},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
},
{ // processed block 1
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
args: args{event: pcBlockProcessed{peerID: p2p.ID("P1"), height: 1}},
wantEvent: noOpEvent{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{2, 3},
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
received: map[int64]p2p.ID{2: "P1", 3: "P1"},
height: 2,
},
},
{ // processed block 2
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
args: args{event: pcBlockProcessed{peerID: p2p.ID("P1"), height: 2}},
wantEvent: scFinishedEv{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{3},
received: map[int64]p2p.NodeID{3: "P1"},
received: map[int64]p2p.ID{3: "P1"},
height: 3,
},
},
@@ -2206,7 +2201,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3, 4},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -2217,7 +2212,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]p2p.NodeID{},
received: map[int64]p2p.ID{},
height: 1,
},
},

View File

@@ -1,13 +0,0 @@
# The version of the generation template.
# Required.
# The only currently-valid value is v1beta1.
version: v1beta1
# The plugins to run.
plugins:
# The name of the plugin.
- name: gogofaster
# The the relative output directory.
out: proto
# Any options to provide to the plugin.
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative

View File

@@ -1,5 +1,3 @@
version: v1beta1
build:
roots:
- proto

View File

@@ -1,50 +1,24 @@
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"time"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
grpcprivval "github.com/tendermint/tendermint/privval/grpc"
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
)
var (
// Create a metrics registry.
reg = prometheus.NewRegistry()
// Create some standard server metrics.
grpcMetrics = grpc_prometheus.NewServerMetrics()
)
func main() {
var (
addr = flag.String("addr", "127.0.0.1:26659", "Address to listen on (host:port)")
addr = flag.String("addr", ":26659", "Address of client to connect to")
chainID = flag.String("chain-id", "mychain", "chain id")
privValKeyPath = flag.String("priv-key", "", "priv val key file path")
privValStatePath = flag.String("priv-state", "", "priv val state file path")
insecure = flag.Bool("insecure", false, "allow server to run insecurely (no TLS)")
certFile = flag.String("certfile", "", "absolute path to server certificate")
keyFile = flag.String("keyfile", "", "absolute path to server key")
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
logger = log.NewTMLogger(
log.NewSyncWriter(os.Stdout),
@@ -58,106 +32,39 @@ func main() {
"chainID", *chainID,
"privKeyPath", *privValKeyPath,
"privStatePath", *privValStatePath,
"insecure", *insecure,
"certFile", *certFile,
"keyFile", *keyFile,
"rootCA", *rootCA,
)
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
opts := []grpc.ServerOption{}
if !*insecure {
certificate, err := tls.LoadX509KeyPair(*certFile, *keyFile)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to load X509 key pair: %v", err)
os.Exit(1)
}
certPool := x509.NewCertPool()
bs, err := ioutil.ReadFile(*rootCA)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err)
os.Exit(1)
}
if ok := certPool.AppendCertsFromPEM(bs); !ok {
fmt.Fprintf(os.Stderr, "failed to append client certs")
os.Exit(1)
}
tlsConfig := &tls.Config{
ClientAuth: tls.RequireAndVerifyClientCert,
Certificates: []tls.Certificate{certificate},
ClientCAs: certPool,
MinVersion: tls.VersionTLS13,
}
creds := grpc.Creds(credentials.NewTLS(tlsConfig))
opts = append(opts, creds)
logger.Info("SignerServer: Creating security credentials")
} else {
logger.Info("SignerServer: You are using an insecure gRPC connection!")
}
// add prometheus metrics for unary RPC calls
opts = append(opts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))
ss := grpcprivval.NewSignerServer(*chainID, pv, logger)
var dialer privval.SocketDialer
protocol, address := tmnet.ProtocolAndAddress(*addr)
switch protocol {
case "unix":
dialer = privval.DialUnixFn(address)
case "tcp":
connTimeout := 3 * time.Second // TODO
dialer = privval.DialTCPFn(address, connTimeout, ed25519.GenPrivKey())
default:
logger.Error("Unknown protocol", "protocol", protocol)
os.Exit(1)
}
lis, err := net.Listen(protocol, address)
sd := privval.NewSignerDialerEndpoint(logger, dialer)
ss := privval.NewSignerServer(sd, *chainID, pv)
err := ss.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "SignerServer: Failed to listen %v", err)
os.Exit(1)
}
s := grpc.NewServer(opts...)
privvalproto.RegisterPrivValidatorAPIServer(s, ss)
var httpSrv *http.Server
if *prometheusAddr != "" {
httpSrv = registerPrometheus(*prometheusAddr, s)
}
logger.Info("SignerServer: Starting grpc server")
if err := s.Serve(lis); err != nil {
fmt.Fprintf(os.Stderr, "Unable to listen on port %s: %v", *addr, err)
os.Exit(1)
panic(err)
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
logger.Debug("SignerServer: calling Close")
if *prometheusAddr != "" {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
if err := httpSrv.Shutdown(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Unable to stop http server: %v", err)
os.Exit(1)
}
err := ss.Stop()
if err != nil {
panic(err)
}
s.GracefulStop()
})
// Run forever.
select {}
}
func registerPrometheus(addr string, s *grpc.Server) *http.Server {
// Initialize all metrics.
grpcMetrics.InitializeMetrics(s)
// create http server to serve prometheus
httpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: addr}
go func() {
if err := httpServer.ListenAndServe(); err != nil {
fmt.Fprintf(os.Stderr, "Unable to start a http server: %v", err)
os.Exit(1)
}
}()
return httpServer
}

View File

@@ -5,30 +5,30 @@ import (
"github.com/spf13/cobra"
tmjson "github.com/tendermint/tendermint/libs/json"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/p2p"
)
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
// NodeKey to the standard output.
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
// the standard output.
var GenNodeKeyCmd = &cobra.Command{
Use: "gen-node-key",
Aliases: []string{"gen_node_key"},
Short: "Generate a new node key",
RunE: genNodeKey,
Short: "Generate a node key for this node and print its ID",
PreRun: deprecateSnakeCase,
RunE: genNodeKey,
}
func genNodeKey(cmd *cobra.Command, args []string) error {
nodeKey := p2p.GenNodeKey()
bz, err := tmjson.Marshal(nodeKey)
if err != nil {
return fmt.Errorf("nodeKey -> json: %w", err)
nodeKeyFile := config.NodeKeyFile()
if tmos.FileExists(nodeKeyFile) {
return fmt.Errorf("node key at %s already exists", nodeKeyFile)
}
fmt.Printf(`%v
`, string(bz))
nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
if err != nil {
return err
}
fmt.Println(nodeKey.ID())
return nil
}

View File

@@ -7,7 +7,6 @@ import (
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
// GenValidatorCmd allows the generation of a keypair for a
@@ -16,28 +15,16 @@ var GenValidatorCmd = &cobra.Command{
Use: "gen-validator",
Aliases: []string{"gen_validator"},
Short: "Generate new validator keypair",
RunE: genValidator,
PreRun: deprecateSnakeCase,
Run: genValidator,
}
func init() {
GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
}
func genValidator(cmd *cobra.Command, args []string) error {
pv, err := privval.GenFilePV("", "", keyType)
if err != nil {
return err
}
func genValidator(cmd *cobra.Command, args []string) {
pv := privval.GenFilePV("", "")
jsbz, err := tmjson.Marshal(pv)
if err != nil {
return fmt.Errorf("validator -> json: %w", err)
panic(err)
}
fmt.Printf(`%v
`, string(jsbz))
return nil
}

View File

@@ -10,7 +10,6 @@ import (
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
@@ -22,15 +21,6 @@ var InitFilesCmd = &cobra.Command{
RunE: initFiles,
}
var (
keyType string
)
func init() {
InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
}
func initFiles(cmd *cobra.Command, args []string) error {
return initFilesWithConfig(config)
}
@@ -39,19 +29,13 @@ func initFilesWithConfig(config *cfg.Config) error {
// private validator
privValKeyFile := config.PrivValidatorKeyFile()
privValStateFile := config.PrivValidatorStateFile()
var (
pv *privval.FilePV
err error
)
var pv *privval.FilePV
if tmos.FileExists(privValKeyFile) {
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
logger.Info("Found private validator", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
} else {
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
if err != nil {
return err
}
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
pv.Save()
logger.Info("Generated private validator", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
@@ -72,17 +56,11 @@ func initFilesWithConfig(config *cfg.Config) error {
if tmos.FileExists(genFile) {
logger.Info("Found genesis file", "path", genFile)
} else {
genDoc := types.GenesisDoc{
ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)),
GenesisTime: tmtime.Now(),
ConsensusParams: types.DefaultConsensusParams(),
}
if keyType == "secp256k1" {
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
}
}
pubKey, err := pv.GetPubKey()
if err != nil {
return fmt.Errorf("can't get pubkey: %w", err)

View File

@@ -8,7 +8,6 @@ import (
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"time"
@@ -16,7 +15,6 @@ import (
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmos "github.com/tendermint/tendermint/libs/os"
@@ -24,7 +22,6 @@ import (
lproxy "github.com/tendermint/tendermint/light/proxy"
lrpc "github.com/tendermint/tendermint/light/rpc"
dbs "github.com/tendermint/tendermint/light/store/db"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
)
@@ -61,7 +58,7 @@ var (
primaryAddr string
witnessAddrsJoined string
chainID string
dir string
home string
maxOpenConnections int
sequential bool
@@ -83,8 +80,8 @@ func init() {
"connect to a Tendermint node at this address")
LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "",
"tendermint nodes to cross-check the primary node, comma-separated")
LightCmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")),
"specify the directory")
LightCmd.Flags().StringVar(&home, "home-dir", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")),
"specify the home directory")
LightCmd.Flags().IntVar(
&maxOpenConnections,
"max-open-connections",
@@ -122,12 +119,10 @@ func runProxy(cmd *cobra.Command, args []string) error {
witnessesAddrs = strings.Split(witnessAddrsJoined, ",")
}
lightDB, err := dbm.NewGoLevelDB("light-client-db", dir)
db, err := dbm.NewGoLevelDB("light-client-db", home)
if err != nil {
return fmt.Errorf("can't create a db: %w", err)
}
// create a prefixed db on the chainID
db := dbm.NewPrefixDB(lightDB, []byte(chainID))
if primaryAddr == "" { // check to see if we can start from an existing state
var err error
@@ -189,7 +184,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
},
primaryAddr,
witnessesAddrs,
dbs.New(db),
dbs.New(db, chainID),
options...,
)
} else { // continue from latest state
@@ -198,7 +193,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
trustingPeriod,
primaryAddr,
witnessesAddrs,
dbs.New(db),
dbs.New(db, chainID),
options...,
)
}
@@ -206,11 +201,6 @@ func runProxy(cmd *cobra.Command, args []string) error {
return err
}
rpcClient, err := rpchttp.New(primaryAddr, "/websocket")
if err != nil {
return fmt.Errorf("http client for %s: %w", primaryAddr, err)
}
cfg := rpcserver.DefaultConfig()
cfg.MaxBodyBytes = config.RPC.MaxBodyBytes
cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes
@@ -222,12 +212,11 @@ func runProxy(cmd *cobra.Command, args []string) error {
cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
}
p := lproxy.Proxy{
Addr: listenAddr,
Config: cfg,
Client: lrpc.NewClient(rpcClient, c, lrpc.KeyPathFn(defaultMerkleKeyPathFn())),
Logger: logger,
p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn()))
if err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
p.Listener.Close()
@@ -266,21 +255,3 @@ func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error {
}
return nil
}
func defaultMerkleKeyPathFn() lrpc.KeyPathFunc {
// regexp for extracting store name from /abci_query path
storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`)
return func(path string, key []byte) (merkle.KeyPath, error) {
matches := storeNameRegexp.FindStringSubmatch(path)
if len(matches) != 2 {
return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp)
}
storeName := matches[1]
kp := merkle.KeyPath{}
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
kp = kp.AppendKey(key, merkle.KeyEncodingURL)
return kp, nil
}
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
// ResetAllCmd removes the database of this Tendermint core
@@ -25,8 +24,6 @@ var keepAddrBook bool
func init() {
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
}
// ResetPrivValidatorCmd resets the private validator files.
@@ -78,10 +75,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
} else {
pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
if err != nil {
panic(err)
}
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
pv.Save()
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
"stateFile", privValStateFile)

View File

@@ -24,7 +24,7 @@ func init() {
}
func registerFlagsRootCmd(cmd *cobra.Command) {
cmd.PersistentFlags().String("log-level", config.LogLevel, "log level")
cmd.PersistentFlags().String("log_level", config.LogLevel, "log level")
}
// ParseConfig retrieves the default environment configuration,
@@ -51,20 +51,25 @@ var RootCmd = &cobra.Command{
if cmd.Name() == VersionCmd.Name() {
return nil
}
config, err = ParseConfig()
if err != nil {
return err
}
if config.LogFormat == cfg.LogFormatJSON {
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
}
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel)
if err != nil {
return err
}
if viper.GetBool(cli.TraceFlag) {
logger = log.NewTracingLogger(logger)
}
logger = logger.With("module", "main")
return nil
},

View File

@@ -99,7 +99,7 @@ func TestRootFlagsEnv(t *testing.T) {
logLevel string
}{
{[]string{"--log", "debug"}, nil, defaultLogLvl}, // wrong flag
{[]string{"--log-level", "debug"}, nil, "debug"}, // right flag
{[]string{"--log_level", "debug"}, nil, "debug"}, // right flag
{nil, map[string]string{"TM_LOW": "debug"}, defaultLogLvl}, // wrong env flag
{nil, map[string]string{"MT_LOG_LEVEL": "debug"}, defaultLogLvl}, // wrong env prefix
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
@@ -120,7 +120,7 @@ func TestRootConfig(t *testing.T) {
// write non-default config
nonDefaultLogLvl := "abc:debug"
cvals := map[string]string{
"log-level": nonDefaultLogLvl,
"log_level": nonDefaultLogLvl,
}
cases := []struct {
@@ -130,7 +130,7 @@ func TestRootConfig(t *testing.T) {
logLvl string
}{
{nil, nil, nonDefaultLogLvl}, // should load config
{[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides
{[]string{"--log_level=abc:info"}, nil, "abc:info"}, // flag over rides
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
}

View File

@@ -26,24 +26,24 @@ func AddNodeFlags(cmd *cobra.Command) {
// priv val flags
cmd.Flags().String(
"priv-validator-laddr",
"priv_validator_laddr",
config.PrivValidatorListenAddr,
"socket address to listen on for connections from external priv-validator process")
"socket address to listen on for connections from external priv_validator process")
// node flags
cmd.Flags().Bool("fast-sync", config.FastSyncMode, "fast blockchain syncing")
cmd.Flags().Bool("fast_sync", config.FastSyncMode, "fast blockchain syncing")
cmd.Flags().BytesHexVar(
&genesisHash,
"genesis-hash",
"genesis_hash",
[]byte{},
"optional SHA-256 hash of the genesis file")
cmd.Flags().Int64("consensus.double-sign-check-height", config.Consensus.DoubleSignCheckHeight,
cmd.Flags().Int64("consensus.double_sign_check_height", config.Consensus.DoubleSignCheckHeight,
"how many blocks to look back to check existence of the node's "+
"consensus votes before joining consensus")
// abci flags
cmd.Flags().String(
"proxy-app",
"proxy_app",
config.ProxyApp,
"proxy app address, or one of: 'kvstore',"+
" 'persistent_kvstore',"+
@@ -54,11 +54,11 @@ func AddNodeFlags(cmd *cobra.Command) {
// rpc flags
cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
cmd.Flags().String(
"rpc.grpc-laddr",
"rpc.grpc_laddr",
config.RPC.GRPCListenAddress,
"GRPC listen address (BroadcastTx only). Port required")
cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods")
cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)")
cmd.Flags().String("rpc.pprof_laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)")
// p2p flags
cmd.Flags().String(
@@ -66,31 +66,31 @@ func AddNodeFlags(cmd *cobra.Command) {
config.P2P.ListenAddress,
"node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
cmd.Flags().String("p2p.unconditional-peer-ids",
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
cmd.Flags().String("p2p.unconditional_peer_ids",
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")
cmd.Flags().Bool("p2p.seed-mode", config.P2P.SeedMode, "enable/disable seed mode")
cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs")
cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "enable/disable seed mode")
cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs")
// consensus flags
cmd.Flags().Bool(
"consensus.create-empty-blocks",
"consensus.create_empty_blocks",
config.Consensus.CreateEmptyBlocks,
"set this to false to only produce blocks when there are txs or when the AppHash changes")
cmd.Flags().String(
"consensus.create-empty-blocks-interval",
"consensus.create_empty_blocks_interval",
config.Consensus.CreateEmptyBlocksInterval.String(),
"the possible interval between empty blocks")
// db flags
cmd.Flags().String(
"db-backend",
"db_backend",
config.DBBackend,
"database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
cmd.Flags().String(
"db-dir",
"db_dir",
config.DBPath,
"database directory")
}

View File

@@ -23,6 +23,6 @@ func showNodeID(cmd *cobra.Command, args []string) error {
return err
}
fmt.Println(nodeKey.ID)
fmt.Println(nodeKey.ID())
return nil
}

View File

@@ -5,12 +5,9 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/crypto"
tmjson "github.com/tendermint/tendermint/libs/json"
tmnet "github.com/tendermint/tendermint/libs/net"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
tmgrpc "github.com/tendermint/tendermint/privval/grpc"
)
// ShowValidatorCmd adds capabilities for showing the validator info.
@@ -23,36 +20,16 @@ var ShowValidatorCmd = &cobra.Command{
}
func showValidator(cmd *cobra.Command, args []string) error {
var (
pubKey crypto.PubKey
err error
)
keyFilePath := config.PrivValidatorKeyFile()
if !tmos.FileExists(keyFilePath) {
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
}
//TODO: remove once gRPC is the only supported protocol
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidatorListenAddr)
switch protocol {
case "grpc":
pvsc, err := tmgrpc.DialRemoteSigner(config, config.ChainID(), logger)
if err != nil {
return fmt.Errorf("can't connect to remote validator %w", err)
}
pubKey, err = pvsc.GetPubKey()
if err != nil {
return fmt.Errorf("can't get pubkey: %w", err)
}
default:
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
keyFilePath := config.PrivValidatorKeyFile()
if !tmos.FileExists(keyFilePath) {
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
}
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
pubKey, err = pv.GetPubKey()
if err != nil {
return fmt.Errorf("can't get pubkey: %w", err)
}
pubKey, err := pv.GetPubKey()
if err != nil {
return fmt.Errorf("can't get pubkey: %w", err)
}
bz, err := tmjson.Marshal(pubKey)

View File

@@ -15,7 +15,6 @@ import (
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
@@ -75,8 +74,6 @@ func init() {
"P2P Port")
TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false,
"randomize the moniker for each generated node")
TestnetFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
}
// TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
@@ -88,7 +85,7 @@ necessary files (private validator, genesis, config, etc.).
Note, strict routability for addresses is turned off in the config file.
Optionally, it will fill in persistent-peers list in config file using either hostnames or IPs.
Optionally, it will fill in persistent_peers list in config file using either hostnames or IPs.
Example:
@@ -183,15 +180,10 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
// Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{
ChainID: "chain-" + tmrand.Str(6),
ConsensusParams: types.DefaultConsensusParams(),
GenesisTime: tmtime.Now(),
InitialHeight: initialHeight,
Validators: genVals,
ConsensusParams: types.DefaultConsensusParams(),
}
if keyType == "secp256k1" {
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
}
}
// Write genesis file.
@@ -263,7 +255,7 @@ func persistentPeersString(config *cfg.Config) (string, error) {
if err != nil {
return "", err
}
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
}
return strings.Join(persistentPeers, ","), nil
}

View File

@@ -2,6 +2,9 @@ coverage:
precision: 2
round: down
range: "70...100"
github_checks:
annotations: false
status:
project:
default:
@@ -9,9 +12,6 @@ coverage:
patch: on
changes: off
github_checks:
annotations: false
comment:
layout: "diff, files"
behavior: default

View File

@@ -20,6 +20,9 @@ const (
LogFormatPlain = "plain"
// LogFormatJSON is a format for json output
LogFormatJSON = "json"
// DefaultLogLevel defines a default log level as INFO.
DefaultLogLevel = "info"
)
// NOTE: Most of the structs & relevant comments + the
@@ -63,7 +66,7 @@ type Config struct {
StateSync *StateSyncConfig `mapstructure:"statesync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
}
@@ -151,7 +154,7 @@ type BaseConfig struct { //nolint: maligned
// TCP or UNIX socket address of the ABCI application,
// or the name of an ABCI application compiled in with the Tendermint binary
ProxyApp string `mapstructure:"proxy-app"`
ProxyApp string `mapstructure:"proxy_app"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
@@ -159,7 +162,7 @@ type BaseConfig struct { //nolint: maligned
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSyncMode bool `mapstructure:"fast-sync"`
FastSyncMode bool `mapstructure:"fast_sync"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
@@ -180,49 +183,39 @@ type BaseConfig struct { //nolint: maligned
// * badgerdb (uses github.com/dgraph-io/badger)
// - EXPERIMENTAL
// - use badgerdb build tag (go build -tags badgerdb)
DBBackend string `mapstructure:"db-backend"`
DBBackend string `mapstructure:"db_backend"`
// Database directory
DBPath string `mapstructure:"db-dir"`
DBPath string `mapstructure:"db_dir"`
// Output level for logging
LogLevel string `mapstructure:"log-level"`
LogLevel string `mapstructure:"log_level"`
// Output format: 'plain' (colored text) or 'json'
LogFormat string `mapstructure:"log-format"`
LogFormat string `mapstructure:"log_format"`
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis-file"`
Genesis string `mapstructure:"genesis_file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidatorKey string `mapstructure:"priv-validator-key-file"`
PrivValidatorKey string `mapstructure:"priv_validator_key_file"`
// Path to the JSON file containing the last sign state of a validator
PrivValidatorState string `mapstructure:"priv-validator-state-file"`
PrivValidatorState string `mapstructure:"priv_validator_state_file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv-validator-laddr"`
// Client certificate generated while creating needed files for secure connection.
// If a remote validator address is provided but no certificate, the connection will be insecure
PrivValidatorClientCertificate string `mapstructure:"priv-validator-client-certificate-file"`
// Client key generated while creating certificates for secure connection
PrivValidatorClientKey string `mapstructure:"priv-validator-client-key-file"`
// Path Root Certificate Authority used to sign both client and server certificates
PrivValidatorRootCA string `mapstructure:"priv-validator-root-ca-file"`
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node-key-file"`
NodeKey string `mapstructure:"node_key_file"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter-peers"` // false
FilterPeers bool `mapstructure:"filter_peers"` // false
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
@@ -235,7 +228,7 @@ func DefaultBaseConfig() BaseConfig {
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultPackageLogLevels(),
LogLevel: DefaultLogLevel,
LogFormat: LogFormatPlain,
FastSyncMode: true,
FilterPeers: false,
@@ -263,21 +256,6 @@ func (cfg BaseConfig) GenesisFile() string {
return rootify(cfg.Genesis, cfg.RootDir)
}
// PrivValidatorClientKeyFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorClientKeyFile() string {
return rootify(cfg.PrivValidatorClientKey, cfg.RootDir)
}
// PrivValidatorClientCertificateFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorClientCertificateFile() string {
return rootify(cfg.PrivValidatorClientCertificate, cfg.RootDir)
}
// PrivValidatorCertificateAuthorityFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorRootCAFile() string {
return rootify(cfg.PrivValidatorRootCA, cfg.RootDir)
}
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorKeyFile() string {
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
@@ -298,41 +276,17 @@ func (cfg BaseConfig) DBDir() string {
return rootify(cfg.DBPath, cfg.RootDir)
}
func (cfg *BaseConfig) ArePrivValidatorClientSecurityOptionsPresent() bool {
switch {
case cfg.PrivValidatorRootCA == "":
return false
case cfg.PrivValidatorClientKey == "":
return false
case cfg.PrivValidatorClientCertificate == "":
return false
default:
return true
}
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg BaseConfig) ValidateBasic() error {
switch cfg.LogFormat {
case LogFormatPlain, LogFormatJSON:
default:
return errors.New("unknown log format (must be 'plain' or 'json')")
return errors.New("unknown log_format (must be 'plain' or 'json')")
}
return nil
}
// DefaultLogLevel returns a default log level of "error"
func DefaultLogLevel() string {
return "error"
}
// DefaultPackageLogLevels returns a default log level setting so all packages
// log at "error", while the `state` and `main` packages log at "info"
func DefaultPackageLogLevels() string {
return fmt.Sprintf("main:info,state:info,statesync:info,*:%s", DefaultLogLevel())
}
//-----------------------------------------------------------------------------
// RPCConfig
@@ -347,58 +301,58 @@ type RPCConfig struct {
// If the special '*' value is present in the list, all origins will be allowed.
// An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com).
// Only one wildcard can be used per origin.
CORSAllowedOrigins []string `mapstructure:"cors-allowed-origins"`
CORSAllowedOrigins []string `mapstructure:"cors_allowed_origins"`
// A list of methods the client is allowed to use with cross-domain requests.
CORSAllowedMethods []string `mapstructure:"cors-allowed-methods"`
CORSAllowedMethods []string `mapstructure:"cors_allowed_methods"`
// A list of non simple headers the client is allowed to use with cross-domain requests.
CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"`
CORSAllowedHeaders []string `mapstructure:"cors_allowed_headers"`
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
GRPCListenAddress string `mapstructure:"grpc-laddr"`
GRPCListenAddress string `mapstructure:"grpc_laddr"`
// Maximum number of simultaneous connections.
// Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
// Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"`
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
// Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool
Unsafe bool `mapstructure:"unsafe"`
// Maximum number of simultaneous connections (including WebSocket).
// Does not include gRPC connections. See grpc-max-open-connections
// Does not include gRPC connections. See grpc_max_open_connections
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
// 1024 - 40 - 10 - 50 = 924 = ~900
MaxOpenConnections int `mapstructure:"max-open-connections"`
MaxOpenConnections int `mapstructure:"max_open_connections"`
// Maximum number of unique clientIDs that can /subscribe
// If you're using /broadcast_tx_commit, set to the estimated maximum number
// of broadcast_tx_commit calls per block.
MaxSubscriptionClients int `mapstructure:"max-subscription-clients"`
MaxSubscriptionClients int `mapstructure:"max_subscription_clients"`
// Maximum number of unique queries a given client can /subscribe to
// If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set
// to the estimated maximum number of broadcast_tx_commit calls per block.
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"`
// How long to wait for a tx to be committed during /broadcast_tx_commit
// WARNING: Using a value larger than 10s will result in increasing the
// global HTTP write timeout, which applies to all connections and endpoints.
// See https://github.com/tendermint/tendermint/issues/3435
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout-broadcast-tx-commit"`
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"`
// Maximum size of request body, in bytes
MaxBodyBytes int64 `mapstructure:"max-body-bytes"`
MaxBodyBytes int64 `mapstructure:"max_body_bytes"`
// Maximum size of request header, in bytes
MaxHeaderBytes int `mapstructure:"max-header-bytes"`
MaxHeaderBytes int `mapstructure:"max_header_bytes"`
// The path to a file containing certificate that is used to create the HTTPS server.
// Might be either absolute path or path related to Tendermint's config directory.
@@ -407,19 +361,19 @@ type RPCConfig struct {
// the certFile should be the concatenation of the server's certificate, any intermediates,
// and the CA's certificate.
//
// NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
// Otherwise, HTTP server is run.
TLSCertFile string `mapstructure:"tls-cert-file"`
TLSCertFile string `mapstructure:"tls_cert_file"`
// The path to a file containing matching private key that is used to create the HTTPS server.
// Might be either absolute path or path related to tendermint's config directory.
//
// NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
// Otherwise, HTTP server is run.
TLSKeyFile string `mapstructure:"tls-key-file"`
TLSKeyFile string `mapstructure:"tls_key_file"`
// pprof listen address (https://golang.org/pkg/net/http/pprof)
PprofListenAddress string `mapstructure:"pprof-laddr"`
PprofListenAddress string `mapstructure:"pprof_laddr"`
}
// DefaultRPCConfig returns a default configuration for the RPC server
@@ -460,25 +414,25 @@ func TestRPCConfig() *RPCConfig {
// returns an error if any check fails.
func (cfg *RPCConfig) ValidateBasic() error {
if cfg.GRPCMaxOpenConnections < 0 {
return errors.New("grpc-max-open-connections can't be negative")
return errors.New("grpc_max_open_connections can't be negative")
}
if cfg.MaxOpenConnections < 0 {
return errors.New("max-open-connections can't be negative")
return errors.New("max_open_connections can't be negative")
}
if cfg.MaxSubscriptionClients < 0 {
return errors.New("max-subscription-clients can't be negative")
return errors.New("max_subscription_clients can't be negative")
}
if cfg.MaxSubscriptionsPerClient < 0 {
return errors.New("max-subscriptions-per-client can't be negative")
return errors.New("max_subscriptions_per_client can't be negative")
}
if cfg.TimeoutBroadcastTxCommit < 0 {
return errors.New("timeout-broadcast-tx-commit can't be negative")
return errors.New("timeout_broadcast_tx_commit can't be negative")
}
if cfg.MaxBodyBytes < 0 {
return errors.New("max-body-bytes can't be negative")
return errors.New("max_body_bytes can't be negative")
}
if cfg.MaxHeaderBytes < 0 {
return errors.New("max-header-bytes can't be negative")
return errors.New("max_header_bytes can't be negative")
}
return nil
}
@@ -519,48 +473,48 @@ type P2PConfig struct { //nolint: maligned
ListenAddress string `mapstructure:"laddr"`
// Address to advertise to peers for them to dial
ExternalAddress string `mapstructure:"external-address"`
ExternalAddress string `mapstructure:"external_address"`
// Comma separated list of seed nodes to connect to
// We only use these if we cant connect to peers in the addrbook
Seeds string `mapstructure:"seeds"`
// Comma separated list of nodes to keep persistent connections to
PersistentPeers string `mapstructure:"persistent-peers"`
PersistentPeers string `mapstructure:"persistent_peers"`
// UPNP port forwarding
UPNP bool `mapstructure:"upnp"`
// Path to address book
AddrBook string `mapstructure:"addr-book-file"`
AddrBook string `mapstructure:"addr_book_file"`
// Set true for strict address routability rules
// Set false for private or local networks
AddrBookStrict bool `mapstructure:"addr-book-strict"`
AddrBookStrict bool `mapstructure:"addr_book_strict"`
// Maximum number of inbound peers
MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"`
MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"`
// Maximum number of outbound peers to connect to, excluding persistent peers
MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"`
MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"`
// List of node IDs, to which a connection will be (re)established ignoring any existing limits
UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"`
UnconditionalPeerIDs string `mapstructure:"unconditional_peer_ids"`
// Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"`
PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent_peers_max_dial_period"`
// Time to wait before flushing messages out on the connection
FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"`
FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"`
// Maximum size of a message packet payload, in bytes
MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"`
MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Rate at which packets can be sent, in bytes/second
SendRate int64 `mapstructure:"send-rate"`
SendRate int64 `mapstructure:"send_rate"`
// Rate at which packets can be received, in bytes/second
RecvRate int64 `mapstructure:"recv-rate"`
RecvRate int64 `mapstructure:"recv_rate"`
// Set true to enable the peer-exchange reactor
PexReactor bool `mapstructure:"pex"`
@@ -569,22 +523,25 @@ type P2PConfig struct { //nolint: maligned
// peers. If another node asks it for addresses, it responds and disconnects.
//
// Does not work if the peer-exchange reactor is disabled.
SeedMode bool `mapstructure:"seed-mode"`
SeedMode bool `mapstructure:"seed_mode"`
// Comma separated list of peer IDs to keep private (will not be gossiped to
// other peers)
PrivatePeerIDs string `mapstructure:"private-peer-ids"`
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
// Toggle to disable guard against peers connecting from the same ip.
AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"`
AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"`
// Peer connection configuration.
HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"`
DialTimeout time.Duration `mapstructure:"dial-timeout"`
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
DialTimeout time.Duration `mapstructure:"dial_timeout"`
// Testing params.
// Force dial to fail
TestDialFail bool `mapstructure:"test-dial-fail"`
TestDialFail bool `mapstructure:"test_dial_fail"`
// FUzz connection
TestFuzz bool `mapstructure:"test_fuzz"`
TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"`
}
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
@@ -599,20 +556,17 @@ func DefaultP2PConfig() *P2PConfig {
MaxNumOutboundPeers: 10,
PersistentPeersMaxDialPeriod: 0 * time.Second,
FlushThrottleTimeout: 100 * time.Millisecond,
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
// The IP header and the TCP header take up 20 bytes each at least (unless
// optional header fields are used) and thus the max for (non-Jumbo frame)
// Ethernet is 1500 - 20 -20 = 1460
// Source: https://stackoverflow.com/a/3074427/820520
MaxPacketMsgPayloadSize: 1400,
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
PexReactor: true,
SeedMode: false,
AllowDuplicateIP: false,
HandshakeTimeout: 20 * time.Second,
DialTimeout: 3 * time.Second,
TestDialFail: false,
MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
PexReactor: true,
SeedMode: false,
AllowDuplicateIP: false,
HandshakeTimeout: 20 * time.Second,
DialTimeout: 3 * time.Second,
TestDialFail: false,
TestFuzz: false,
TestFuzzConfig: DefaultFuzzConnConfig(),
}
}
@@ -634,29 +588,49 @@ func (cfg *P2PConfig) AddrBookFile() string {
// returns an error if any check fails.
func (cfg *P2PConfig) ValidateBasic() error {
if cfg.MaxNumInboundPeers < 0 {
return errors.New("max-num-inbound-peers can't be negative")
return errors.New("max_num_inbound_peers can't be negative")
}
if cfg.MaxNumOutboundPeers < 0 {
return errors.New("max-num-outbound-peers can't be negative")
return errors.New("max_num_outbound_peers can't be negative")
}
if cfg.FlushThrottleTimeout < 0 {
return errors.New("flush-throttle-timeout can't be negative")
return errors.New("flush_throttle_timeout can't be negative")
}
if cfg.PersistentPeersMaxDialPeriod < 0 {
return errors.New("persistent-peers-max-dial-period can't be negative")
return errors.New("persistent_peers_max_dial_period can't be negative")
}
if cfg.MaxPacketMsgPayloadSize < 0 {
return errors.New("max-packet-msg-payload-size can't be negative")
return errors.New("max_packet_msg_payload_size can't be negative")
}
if cfg.SendRate < 0 {
return errors.New("send-rate can't be negative")
return errors.New("send_rate can't be negative")
}
if cfg.RecvRate < 0 {
return errors.New("recv-rate can't be negative")
return errors.New("recv_rate can't be negative")
}
return nil
}
// FuzzConnConfig is a FuzzedConnection configuration.
type FuzzConnConfig struct {
Mode int
MaxDelay time.Duration
ProbDropRW float64
ProbDropConn float64
ProbSleep float64
}
// DefaultFuzzConnConfig returns the default config.
func DefaultFuzzConnConfig() *FuzzConnConfig {
return &FuzzConnConfig{
Mode: FuzzModeDrop,
MaxDelay: 3 * time.Second,
ProbDropRW: 0.2,
ProbDropConn: 0.00,
ProbSleep: 0.00,
}
}
//-----------------------------------------------------------------------------
// MempoolConfig
@@ -665,26 +639,26 @@ type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal-dir"`
WalPath string `mapstructure:"wal_dir"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.
// This only accounts for raw transactions (e.g. given 1MB transactions and
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
// max_txs_bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max_txs_bytes"`
// Size of the cache (used to filter transactions we saw earlier) in transactions
CacheSize int `mapstructure:"cache-size"`
CacheSize int `mapstructure:"cache_size"`
// Do not remove invalid transactions from the cache (default: false)
// Set to true if it's not possible for any invalid transaction to become
// valid again in the future.
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
// Maximum size of a single transaction
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
MaxTxBytes int `mapstructure:"max-tx-bytes"`
// NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}.
MaxTxBytes int `mapstructure:"max_tx_bytes"`
// Maximum size of a batch of transactions to send to a peer
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
MaxBatchBytes int `mapstructure:"max_batch_bytes"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
@@ -726,13 +700,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
return errors.New("size can't be negative")
}
if cfg.MaxTxsBytes < 0 {
return errors.New("max-txs-bytes can't be negative")
return errors.New("max_txs_bytes can't be negative")
}
if cfg.CacheSize < 0 {
return errors.New("cache-size can't be negative")
return errors.New("cache_size can't be negative")
}
if cfg.MaxTxBytes < 0 {
return errors.New("max-tx-bytes can't be negative")
return errors.New("max_tx_bytes can't be negative")
}
return nil
}
@@ -742,13 +716,15 @@ func (cfg *MempoolConfig) ValidateBasic() error {
// StateSyncConfig defines the configuration for the Tendermint state sync service
type StateSyncConfig struct {
Enable bool `mapstructure:"enable"`
TempDir string `mapstructure:"temp-dir"`
RPCServers []string `mapstructure:"rpc-servers"`
TrustPeriod time.Duration `mapstructure:"trust-period"`
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
Enable bool `mapstructure:"enable"`
TempDir string `mapstructure:"temp_dir"`
RPCServers []string `mapstructure:"rpc_servers"`
TrustPeriod time.Duration `mapstructure:"trust_period"`
TrustHeight int64 `mapstructure:"trust_height"`
TrustHash string `mapstructure:"trust_hash"`
DiscoveryTime time.Duration `mapstructure:"discovery_time"`
ChunkRequestTimeout time.Duration `mapstructure:"chunk_request_timeout"`
ChunkFetchers int32 `mapstructure:"chunk_fetchers"`
}
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
@@ -763,8 +739,10 @@ func (cfg *StateSyncConfig) TrustHashBytes() []byte {
// DefaultStateSyncConfig returns a default configuration for the state sync service
func DefaultStateSyncConfig() *StateSyncConfig {
return &StateSyncConfig{
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
ChunkRequestTimeout: 10 * time.Second,
ChunkFetchers: 4,
}
}
@@ -777,30 +755,49 @@ func TestStateSyncConfig() *StateSyncConfig {
func (cfg *StateSyncConfig) ValidateBasic() error {
if cfg.Enable {
if len(cfg.RPCServers) == 0 {
return errors.New("rpc-servers is required")
return errors.New("rpc_servers is required")
}
if len(cfg.RPCServers) < 2 {
return errors.New("at least two rpc-servers entries is required")
return errors.New("at least two rpc_servers entries is required")
}
for _, server := range cfg.RPCServers {
if len(server) == 0 {
return errors.New("found empty rpc-servers entry")
return errors.New("found empty rpc_servers entry")
}
}
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
return errors.New("discovery time must be 0s or greater than five seconds")
}
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
return errors.New("trusted_period is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
return errors.New("trusted_height is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
return errors.New("trusted_hash is required")
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
return fmt.Errorf("invalid trusted_hash: %w", err)
}
if cfg.ChunkRequestTimeout < 5*time.Second {
return errors.New("chunk_request_timeout must be at least 5 seconds")
}
if cfg.ChunkFetchers <= 0 {
return errors.New("chunk_fetchers is required")
}
}
return nil
}
@@ -829,6 +826,8 @@ func (cfg *FastSyncConfig) ValidateBasic() error {
switch cfg.Version {
case "v0":
return nil
case "v1":
return nil
case "v2":
return nil
default:
@@ -843,38 +842,39 @@ func (cfg *FastSyncConfig) ValidateBasic() error {
// including timeouts and details about the WAL and the block structure.
type ConsensusConfig struct {
RootDir string `mapstructure:"home"`
WalPath string `mapstructure:"wal-file"`
WalPath string `mapstructure:"wal_file"`
walFile string // overrides WalPath if set
// How long we wait for a proposal block before prevoting nil
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
// How much timeout-propose increases with each round
TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"`
TimeoutPropose time.Duration `mapstructure:"timeout_propose"`
// How much timeout_propose increases with each round
TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"`
// How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"`
// How much the timeout-prevote increases with each round
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"`
TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"`
// How much the timeout_prevote increases with each round
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"`
// How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"`
// How much the timeout-precommit increases with each round
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"`
TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"`
// How much the timeout_precommit increases with each round
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"`
// How long we wait after committing a block, before starting on the new
// height (this gives us a chance to receive some more precommits, even
// though we already have +2/3).
TimeoutCommit time.Duration `mapstructure:"timeout-commit"`
// NOTE: when modifying, make sure to update time_iota_ms genesis parameter
TimeoutCommit time.Duration `mapstructure:"timeout_commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"`
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
// EmptyBlocks mode and possible interval between empty blocks
CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"`
CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"`
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"`
// Reactor sleep duration parameters
PeerGossipSleepDuration time.Duration `mapstructure:"peer-gossip-sleep-duration"`
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"`
PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"`
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"`
DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"`
DoubleSignCheckHeight int64 `mapstructure:"double_sign_check_height"`
}
// DefaultConsensusConfig returns a default configuration for the consensus service
@@ -906,6 +906,7 @@ func TestConsensusConfig() *ConsensusConfig {
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
cfg.TimeoutPrecommit = 10 * time.Millisecond
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
// NOTE: when modifying, make sure to update time_iota_ms (testGenesisFmt) in toml.go
cfg.TimeoutCommit = 10 * time.Millisecond
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
@@ -963,37 +964,37 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
// returns an error if any check fails.
func (cfg *ConsensusConfig) ValidateBasic() error {
if cfg.TimeoutPropose < 0 {
return errors.New("timeout-propose can't be negative")
return errors.New("timeout_propose can't be negative")
}
if cfg.TimeoutProposeDelta < 0 {
return errors.New("timeout-propose-delta can't be negative")
return errors.New("timeout_propose_delta can't be negative")
}
if cfg.TimeoutPrevote < 0 {
return errors.New("timeout-prevote can't be negative")
return errors.New("timeout_prevote can't be negative")
}
if cfg.TimeoutPrevoteDelta < 0 {
return errors.New("timeout-prevote-delta can't be negative")
return errors.New("timeout_prevote_delta can't be negative")
}
if cfg.TimeoutPrecommit < 0 {
return errors.New("timeout-precommit can't be negative")
return errors.New("timeout_precommit can't be negative")
}
if cfg.TimeoutPrecommitDelta < 0 {
return errors.New("timeout-precommit-delta can't be negative")
return errors.New("timeout_precommit_delta can't be negative")
}
if cfg.TimeoutCommit < 0 {
return errors.New("timeout-commit can't be negative")
return errors.New("timeout_commit can't be negative")
}
if cfg.CreateEmptyBlocksInterval < 0 {
return errors.New("create-empty-blocks-interval can't be negative")
return errors.New("create_empty_blocks_interval can't be negative")
}
if cfg.PeerGossipSleepDuration < 0 {
return errors.New("peer-gossip-sleep-duration can't be negative")
return errors.New("peer_gossip_sleep_duration can't be negative")
}
if cfg.PeerQueryMaj23SleepDuration < 0 {
return errors.New("peer-query-maj23-sleep-duration can't be negative")
return errors.New("peer_query_maj23_sleep_duration can't be negative")
}
if cfg.DoubleSignCheckHeight < 0 {
return errors.New("double-sign-check-height can't be negative")
return errors.New("double_sign_check_height can't be negative")
}
return nil
}
@@ -1042,13 +1043,13 @@ type InstrumentationConfig struct {
Prometheus bool `mapstructure:"prometheus"`
// Address to listen for Prometheus collector(s) connections.
PrometheusListenAddr string `mapstructure:"prometheus-listen-addr"`
PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"`
// Maximum number of simultaneous connections.
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
MaxOpenConnections int `mapstructure:"max-open-connections"`
MaxOpenConnections int `mapstructure:"max_open_connections"`
// Instrumentation namespace.
Namespace string `mapstructure:"namespace"`
@@ -1075,7 +1076,7 @@ func TestInstrumentationConfig() *InstrumentationConfig {
// returns an error if any check fails.
func (cfg *InstrumentationConfig) ValidateBasic() error {
if cfg.MaxOpenConnections < 0 {
return errors.New("max-open-connections can't be negative")
return errors.New("max_open_connections can't be negative")
}
return nil
}

View File

@@ -133,7 +133,7 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
assert.NoError(t, cfg.ValidateBasic())
// tamper with version
cfg.Version = "v2"
cfg.Version = "v1"
assert.NoError(t, cfg.ValidateBasic())
cfg.Version = "invalid"

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"text/template"
@@ -64,7 +63,7 @@ func WriteConfigFile(configFilePath string, config *Config) {
panic(err)
}
mustWriteFile(configFilePath, buffer.Bytes(), 0644)
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644)
}
// Note: any changes to the comments/variables/mapstructure
@@ -83,7 +82,7 @@ const defaultConfigTemplate = `# This is a TOML config file.
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy-app = "{{ .BaseConfig.ProxyApp }}"
proxy_app = "{{ .BaseConfig.ProxyApp }}"
# A custom human readable name for this node
moniker = "{{ .BaseConfig.Moniker }}"
@@ -91,7 +90,7 @@ moniker = "{{ .BaseConfig.Moniker }}"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = {{ .BaseConfig.FastSyncMode }}
fast_sync = {{ .BaseConfig.FastSyncMode }}
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
@@ -112,52 +111,41 @@ fast-sync = {{ .BaseConfig.FastSyncMode }}
# * badgerdb (uses github.com/dgraph-io/badger)
# - EXPERIMENTAL
# - use badgerdb build tag (go build -tags badgerdb)
db-backend = "{{ .BaseConfig.DBBackend }}"
db_backend = "{{ .BaseConfig.DBBackend }}"
# Database directory
db-dir = "{{ js .BaseConfig.DBPath }}"
db_dir = "{{ js .BaseConfig.DBPath }}"
# Output level for logging, including package level options
log-level = "{{ .BaseConfig.LogLevel }}"
log_level = "{{ .BaseConfig.LogLevel }}"
# Output format: 'plain' (colored text) or 'json'
log-format = "{{ .BaseConfig.LogFormat }}"
log_format = "{{ .BaseConfig.LogFormat }}"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis-file = "{{ js .BaseConfig.Genesis }}"
genesis_file = "{{ js .BaseConfig.Genesis }}"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv-validator-key-file = "{{ js .BaseConfig.PrivValidatorKey }}"
priv_validator_key_file = "{{ js .BaseConfig.PrivValidatorKey }}"
# Path to the JSON file containing the last sign state of a validator
priv-validator-state-file = "{{ js .BaseConfig.PrivValidatorState }}"
priv_validator_state_file = "{{ js .BaseConfig.PrivValidatorState }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
priv-validator-laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
priv-validator-client-certificate-file = "{{ js .BaseConfig.PrivValidatorClientCertificate }}"
# Client key generated while creating certificates for secure connection
priv-validator-client-key-file = "{{ js .BaseConfig.PrivValidatorClientKey }}"
# Path Root Certificate Authority used to sign both client and server certificates
priv-validator-certificate-authority = "{{ js .BaseConfig.PrivValidatorRootCA }}"
priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node-key-file = "{{ js .BaseConfig.NodeKey }}"
node_key_file = "{{ js .BaseConfig.NodeKey }}"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "{{ .BaseConfig.ABCI }}"
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter-peers = {{ .BaseConfig.FilterPeers }}
filter_peers = {{ .BaseConfig.FilterPeers }}
#######################################################################
@@ -175,78 +163,78 @@ laddr = "{{ .RPC.ListenAddress }}"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors-allowed-origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}]
cors_allowed_origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}]
# A list of methods the client is allowed to use with cross-domain requests
cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}]
cors_allowed_methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}]
cors_allowed_headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = {{ .RPC.Unsafe }}
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc-max-open-connections
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max-open-connections = {{ .RPC.MaxOpenConnections }}
max_open_connections = {{ .RPC.MaxOpenConnections }}
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
max_subscription_clients = {{ .RPC.MaxSubscriptionClients }}
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }}
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout-broadcast-tx-commit = "{{ .RPC.TimeoutBroadcastTxCommit }}"
timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}"
# Maximum size of request body, in bytes
max-body-bytes = {{ .RPC.MaxBodyBytes }}
max_body_bytes = {{ .RPC.MaxBodyBytes }}
# Maximum size of request header, in bytes
max-header-bytes = {{ .RPC.MaxHeaderBytes }}
max_header_bytes = {{ .RPC.MaxHeaderBytes }}
# The path to a file containing certificate that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls-cert-file = "{{ .RPC.TLSCertFile }}"
tls_cert_file = "{{ .RPC.TLSCertFile }}"
# The path to a file containing matching private key that is used to create the HTTPS server.
# Might be either absolute path or path related to Tendermint's config directory.
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls-key-file = "{{ .RPC.TLSKeyFile }}"
tls_key_file = "{{ .RPC.TLSKeyFile }}"
# pprof listen address (https://golang.org/pkg/net/http/pprof)
pprof-laddr = "{{ .RPC.PprofListenAddress }}"
pprof_laddr = "{{ .RPC.PprofListenAddress }}"
#######################################################
### P2P Configuration Options ###
@@ -259,48 +247,49 @@ laddr = "{{ .P2P.ListenAddress }}"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external-address = "{{ .P2P.ExternalAddress }}"
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
external_address = "{{ .P2P.ExternalAddress }}"
# Comma separated list of seed nodes to connect to
seeds = "{{ .P2P.Seeds }}"
# Comma separated list of nodes to keep persistent connections to
persistent-peers = "{{ .P2P.PersistentPeers }}"
persistent_peers = "{{ .P2P.PersistentPeers }}"
# UPNP port forwarding
upnp = {{ .P2P.UPNP }}
# Path to address book
addr-book-file = "{{ js .P2P.AddrBook }}"
addr_book_file = "{{ js .P2P.AddrBook }}"
# Set true for strict address routability rules
# Set false for private or local networks
addr-book-strict = {{ .P2P.AddrBookStrict }}
addr_book_strict = {{ .P2P.AddrBookStrict }}
# Maximum number of inbound peers
max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }}
max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }}
# Maximum number of outbound peers to connect to, excluding persistent peers
max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }}
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
unconditional_peer_ids = "{{ .P2P.UnconditionalPeerIDs }}"
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}"
persistent_peers_max_dial_period = "{{ .P2P.PersistentPeersMaxDialPeriod }}"
# Time to wait before flushing messages out on the connection
flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}"
flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}"
# Maximum size of a message packet payload, in bytes
max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }}
max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
# Rate at which packets can be sent, in bytes/second
send-rate = {{ .P2P.SendRate }}
send_rate = {{ .P2P.SendRate }}
# Rate at which packets can be received, in bytes/second
recv-rate = {{ .P2P.RecvRate }}
recv_rate = {{ .P2P.RecvRate }}
# Set true to enable the peer-exchange reactor
pex = {{ .P2P.PexReactor }}
@@ -309,17 +298,17 @@ pex = {{ .P2P.PexReactor }}
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed-mode = {{ .P2P.SeedMode }}
seed_mode = {{ .P2P.SeedMode }}
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private-peer-ids = "{{ .P2P.PrivatePeerIDs }}"
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
# Toggle to disable guard against peers connecting from the same ip.
allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }}
allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }}
# Peer connection configuration.
handshake-timeout = "{{ .P2P.HandshakeTimeout }}"
dial-timeout = "{{ .P2P.DialTimeout }}"
handshake_timeout = "{{ .P2P.HandshakeTimeout }}"
dial_timeout = "{{ .P2P.DialTimeout }}"
#######################################################
### Mempool Configuration Option ###
@@ -328,18 +317,18 @@ dial-timeout = "{{ .P2P.DialTimeout }}"
recheck = {{ .Mempool.Recheck }}
broadcast = {{ .Mempool.Broadcast }}
wal-dir = "{{ js .Mempool.WalPath }}"
wal_dir = "{{ js .Mempool.WalPath }}"
# Maximum number of transactions in the mempool
size = {{ .Mempool.Size }}
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max-txs-bytes=5MB, mempool will only accept 5 transactions).
max-txs-bytes = {{ .Mempool.MaxTxsBytes }}
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
max_txs_bytes = {{ .Mempool.MaxTxsBytes }}
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache-size = {{ .Mempool.CacheSize }}
cache_size = {{ .Mempool.CacheSize }}
# Do not remove invalid transactions from the cache (default: false)
# Set to true if it's not possible for any invalid transaction to become valid
@@ -347,13 +336,13 @@ cache-size = {{ .Mempool.CacheSize }}
keep-invalid-txs-in-cache = {{ .Mempool.KeepInvalidTxsInCache }}
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
max-tx-bytes = {{ .Mempool.MaxTxBytes }}
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}.
max_tx_bytes = {{ .Mempool.MaxTxBytes }}
# Maximum size of a batch of transactions to send to a peer
# Including space needed by encoding (one varint per transaction).
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
max_batch_bytes = {{ .Mempool.MaxBatchBytes }}
#######################################################
### State Sync Configuration Options ###
@@ -370,19 +359,26 @@ enable = {{ .StateSync.Enable }}
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
# header hash obtained from a trusted source, and a period during which validators can be trusted.
#
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2
# weeks) during which they can be financially punished (slashed) for misbehavior.
rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}"
trust-height = {{ .StateSync.TrustHeight }}
trust-hash = "{{ .StateSync.TrustHash }}"
trust-period = "{{ .StateSync.TrustPeriod }}"
rpc_servers = "{{ StringsJoin .StateSync.RPCServers "," }}"
trust_height = {{ .StateSync.TrustHeight }}
trust_hash = "{{ .StateSync.TrustHash }}"
trust_period = "{{ .StateSync.TrustPeriod }}"
# Time to spend discovering snapshots before initiating a restore.
discovery-time = "{{ .StateSync.DiscoveryTime }}"
discovery_time = "{{ .StateSync.DiscoveryTime }}"
# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp).
# Will create a new, randomly named directory within, and remove it when done.
temp-dir = "{{ .StateSync.TempDir }}"
temp_dir = "{{ .StateSync.TempDir }}"
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 1 minute).
chunk_request_timeout = "{{ .StateSync.ChunkRequestTimeout }}"
# The number of concurrent chunk fetchers to run (default: 1).
chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
#######################################################
### Fast Sync Configuration Connections ###
@@ -391,6 +387,7 @@ temp-dir = "{{ .StateSync.TempDir }}"
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v1" - refactor of v0 version for better testability
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "{{ .FastSync.Version }}"
@@ -399,46 +396,46 @@ version = "{{ .FastSync.Version }}"
#######################################################
[consensus]
wal-file = "{{ js .Consensus.WalPath }}"
wal_file = "{{ js .Consensus.WalPath }}"
# How long we wait for a proposal block before prevoting nil
timeout-propose = "{{ .Consensus.TimeoutPropose }}"
# How much timeout-propose increases with each round
timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}"
timeout_propose = "{{ .Consensus.TimeoutPropose }}"
# How much timeout_propose increases with each round
timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}"
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
timeout-prevote = "{{ .Consensus.TimeoutPrevote }}"
# How much the timeout-prevote increases with each round
timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
timeout_prevote = "{{ .Consensus.TimeoutPrevote }}"
# How much the timeout_prevote increases with each round
timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}"
# How much the timeout-precommit increases with each round
timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}"
# How much the timeout_precommit increases with each round
timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
# How long we wait after committing a block, before starting on the new
# height (this gives us a chance to receive some more precommits, even
# though we already have +2/3).
timeout-commit = "{{ .Consensus.TimeoutCommit }}"
timeout_commit = "{{ .Consensus.TimeoutCommit }}"
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
# if the same consensus key was used to sign {double_sign_check_height} last blocks.
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }}
double_sign_check_height = {{ .Consensus.DoubleSignCheckHeight }}
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }}
skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
# EmptyBlocks mode and possible interval between empty blocks
create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }}
create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
# Reactor sleep duration parameters
peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
[tx-index]
[tx_index]
# What indexer to use for transactions
#
@@ -462,13 +459,13 @@ indexer = "{{ .TxIndex.Indexer }}"
prometheus = {{ .Instrumentation.Prometheus }}
# Address to listen for Prometheus collector(s) connections
prometheus-listen-addr = "{{ .Instrumentation.PrometheusListenAddr }}"
prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max-open-connections = {{ .Instrumentation.MaxOpenConnections }}
max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
# Instrumentation namespace
namespace = "{{ .Instrumentation.Namespace }}"
@@ -509,22 +506,16 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
chainID = "tendermint_test"
}
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
mustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
}
// we always overwrite the priv val
mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
config := TestConfig().SetRoot(rootDir)
return config
}
func mustWriteFile(filePath string, contents []byte, mode os.FileMode) {
if err := ioutil.WriteFile(filePath, contents, mode); err != nil {
tmos.Exit(fmt.Sprintf("failed to write file: %v", err))
}
}
var testGenesisFmt = `{
"genesis_time": "2018-10-10T08:20:13.695936996Z",
"chain_id": "%s",

View File

@@ -71,7 +71,7 @@ func checkConfig(configFile string) bool {
var elems = []string{
"moniker",
"seeds",
"proxy-app",
"proxy_app",
"fast_sync",
"create_empty_blocks",
"peer",

Some files were not shown because too many files have changed in this diff Show More