From bf9232e99f9ad2b5059924479da08ad703f37008 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Fri, 17 Sep 2021 04:35:49 -0400 Subject: [PATCH 001/174] e2e: cleanup on all errors if preserve not specified (#6950) If the e2e tests error, they leave all of the e2e state around including containers and networks etc. We should clean this up when the tests shuts down, even if it exits in error. --- test/e2e/runner/main.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index f65b6d0b1..8de182f3a 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -52,6 +52,13 @@ func NewCLI() *CLI { if err := Cleanup(cli.testnet); err != nil { return err } + defer func() { + if cli.preserve { + logger.Info("Preserving testnet contents because -preserve=true") + } else if err := Cleanup(cli.testnet); err != nil { + logger.Error("Error cleaning up testnet contents", "err", err) + } + }() if err := Setup(cli.testnet); err != nil { return err } @@ -103,11 +110,6 @@ func NewCLI() *CLI { if err := Test(cli.testnet); err != nil { return err } - if !cli.preserve { - if err := Cleanup(cli.testnet); err != nil { - return err - } - } return nil }, } @@ -269,6 +271,12 @@ Does not run any perbutations. if err := Cleanup(cli.testnet); err != nil { return err } + defer func() { + if err := Cleanup(cli.testnet); err != nil { + logger.Error("Error cleaning up testnet contents", "err", err) + } + }() + if err := Setup(cli.testnet); err != nil { return err } @@ -302,10 +310,6 @@ Does not run any perbutations. return err } - if err := Cleanup(cli.testnet); err != nil { - return err - } - return nil }, }) From ea6eecbb91f46784432963d895f78bb829b184af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Sep 2021 10:06:40 -0400 Subject: [PATCH 002/174] build(deps): Bump github.com/vektra/mockery/v2 from 2.9.3 to 2.9.4 (#6956) Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.9.3 to 2.9.4. - [Release notes](https://github.com/vektra/mockery/releases) - [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml) - [Commits](https://github.com/vektra/mockery/compare/v2.9.3...v2.9.4) --- updated-dependencies: - dependency-name: github.com/vektra/mockery/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0c90a7144..8c76f462e 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 - github.com/vektra/mockery/v2 v2.9.3 + github.com/vektra/mockery/v2 v2.9.4 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c diff --git a/go.sum b/go.sum index c00fc24ea..d36dd8988 100644 --- a/go.sum +++ b/go.sum @@ -895,8 +895,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektra/mockery/v2 v2.9.3 h1:ma6hcGQw4q/lhFUTJ+E9V8/5tsIcht9i2Q4d1qo26SQ= -github.com/vektra/mockery/v2 v2.9.3/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= +github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g= +github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= From ad067d73b96bb1b5f958828f9368e827864f49eb Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Mon, 20 Sep 2021 15:02:29 +0200 Subject: [PATCH 003/174] rfc: Fix a few typos and formatting glitches p2p roadmap (#6960) --- docs/rfc/rfc-000-p2p-roadmap.rst | 60 ++++++++++++++++---------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/docs/rfc/rfc-000-p2p-roadmap.rst b/docs/rfc/rfc-000-p2p-roadmap.rst index 64dda773e..dc9b54c7f 100644 --- a/docs/rfc/rfc-000-p2p-roadmap.rst +++ b/docs/rfc/rfc-000-p2p-roadmap.rst @@ -40,15 +40,15 @@ Critique of Current Peer-to-Peer Infrastructure The current (refactored) P2P stack is an improvement on the previous iteration (legacy), but as of 0.35, there remains room for improvement in the design and -implementation of the P2P layer. +implementation of the P2P layer. Some limitations of the current stack include: - heavy reliance on buffering to avoid backups in the flow of components, which is fragile to maintain and can lead to unexpected memory usage patterns and forces the routing layer to make decisions about when messages - should be discarded. - + should be discarded. + - the current p2p stack relies on convention (rather than the compiler) to enforce the API boundaries and conventions between reactors and the router, making it very easy to write "wrong" reactor code or introduce a bad @@ -64,7 +64,7 @@ Some limitations of the current stack include: difficult to expose that information to monitoring/observability tools. This general opacity also makes it difficult to interact with the peer system from other areas of the code base (e.g. tests, reactors). - + - the legacy stack provided some control to operators to force the system to dial new peers or seed nodes or manipulate the topology of the system _in situ_. The current stack can't easily provide this, and while the new stack @@ -94,16 +94,16 @@ blocksync and consensus) from procedure calls to message passing. This is a relatively simple change and could be implemented with the following components: -- a constant to represent "local" delivery as the ``To``` field on +- a constant to represent "local" delivery as the ``To`` field on ``p2p.Envelope``. - + - special path for routing local messages that doesn't require message serialization (protobuf marshalling/unmarshaling). - + Adding these semantics, particularly if in conjunction with synchronous semantics provides a solution to dependency graph problems currently present in the Tendermint codebase, which will simplify development, make it possible -to isolate components for testing. +to isolate components for testing. Eventually, this will also make it possible to have a logical Tendermint node running in multiple processes or in a collection of containers, although the @@ -129,7 +129,7 @@ of request/response ID to allow identifying out-of-order responses over a single connection. Additionally, expanded the programming model of the ``p2p.Channel`` to accommodate some kind of _future_ or similar paradigm to make it viable to write reactor code without needing for the reactor developer -to wrestle with lower level concurency constructs. +to wrestle with lower level concurrency constructs. Timeout Handling (QoS) @@ -143,7 +143,7 @@ detect or attribute. Additionally, the current system provides three main parameters to control quality of service: - buffer sizes for channels and queues. - + - priorities for channels - queue implementation details for shedding load. @@ -151,13 +151,13 @@ parameters to control quality of service: These end up being quite coarse controls, and changing the settings are difficult because as the queues and channels are able to buffer large numbers of messages it can be hard to see the impact of a given change, particularly -in our extant test environment. In general, we should endeavor to: +in our extant test environment. In general, we should endeavor to: - set real timeouts, via contexts, on most message send operations, so that senders rather than queues can be responsible for timeout logic. Additionally, this will make it possible to avoid sending messages during shutdown. - + - reduce (to the greatest extent possible) the amount of buffering in channels and the queues, to more readily surface backpressure and reduce the potential for buildup of stale messages. @@ -173,8 +173,8 @@ transport types and makes it more likely that message-based caching and rate limiting will be implemented at the transport layer rather than at a more appropriate level. -The transport then, would be responsible for negitating the connection and the -handshake and otherwise behave like a socket/file discriptor with ``Read` and +The transport then, would be responsible for negotiating the connection and the +handshake and otherwise behave like a socket/file descriptor with ``Read`` and ``Write`` methods. While this was included in the initial design for the new P2P layer, it may be @@ -185,7 +185,7 @@ Service Discovery ~~~~~~~~~~~~~~~~~ In the current system, Tendermint assumes that all nodes in a network are -largely equivelent, and nodes tend to be "chatty" making many requests of +largely equivalent, and nodes tend to be "chatty" making many requests of large numbers of peers and waiting for peers to (hopefully) respond. While this works and has allowed Tendermint to get to a certain point, this both produces a theoretical scaling bottle neck and makes it harder to test and @@ -194,7 +194,7 @@ verify components of the system. In addition to peer's identity and connection information, peers should be able to advertise a number of services or capabilities, and node operators or developers should be able to specify peer capability requirements (e.g. target -at least -percent of peers with capability.) +at least -percent of peers with capability.) These capabilities may be useful in selecting peers to send messages to, it may make sense to extend Tendermint's message addressing capability to allow @@ -215,7 +215,7 @@ Continued Homegrown Implementation The current peer system is homegrown and is conceptually compatible with the needs of the project, and while there are limitations to the system, the p2p layer is not (currently as of 0.35) a major source of bugs or friction during -development. +development. However, the current implementation makes a number of allowances for interoperability, and there are a collection of iterative improvements that @@ -228,18 +228,18 @@ implementation, upcoming work would include: connections using different protocols (e.g. QUIC, etc.) - entirely remove the constructs and implementations of the legacy peer - implementation. - + implementation. + - establish and enforce clearer chains of responsibility for connection establishment (e.g. handshaking, setup,) which is currently shared between - three components. + three components. - report better metrics regarding the into the state of peers and network connectivity, which are opaque outside of the system. This is constrained at the moment as a side effect of the split responsibility for connection establishment. - -- extend the PEX system to include service information so that ndoes in the + +- extend the PEX system to include service information so that nodes in the network weren't necessarily homogeneous. While maintaining a bespoke peer management layer would seem to distract from @@ -272,20 +272,20 @@ case that our internal systems need to know much less about peers than otherwise specified. Similarly, the current system has a notion of peer scoring that cannot be communicated to libp2p, which may be fine as this is only used to support peer exchange (PEX,) which would become a property libp2p -and not expressed in it's current higher-level form. +and not expressed in it's current higher-level form. -In general, the effort to switch to libp2p would involve: +In general, the effort to switch to libp2p would involve: - timing it during an appropriate protocol-breaking window, as it doesn't seem - viable to support both libp2p *and* the current p2p protocol. - + viable to support both libp2p *and* the current p2p protocol. + - providing some in-memory testing network to support the use case that the current ``p2p.MemoryNetwork`` provides. - re-homing the ``p2p.Router`` implementation on top of libp2p components to be able to maintain the current reactor implementations. - -Open question include: + +Open question include: - how much local buffering should we be doing? It sort of seems like we should figure out what the expected behavior is for libp2p for QoS-type @@ -302,7 +302,7 @@ Open question include: - how do efforts to select "the best" (healthy, close, well-behaving, etc.) peers work out if Tendermint is not maintaining a local peer database? - + - would adding additional higher level semantics (internal message passing, request/response pairs, service discovery, etc.) facilitate removing some of the direct linkages between constructs/components in the system and reduce @@ -311,6 +311,6 @@ Open question include: References ---------- -- `Tracking Ticket for P2P Refactor Project `_ +- `Tracking Ticket for P2P Refactor Project `_ - `ADR 61: P2P Refactor Scope <../architecture/adr-061-p2p-refactor-scope.md>`_ - `ADR 62: P2P Architecture and Abstraction <../architecture/adr-061-p2p-architecture.md>`_ From 87b876a73b0d44094fff38ad970b8f052e271384 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 20 Sep 2021 12:30:26 -0400 Subject: [PATCH 004/174] crypto/armor: remove unused package (#6963) --- CHANGELOG_PENDING.md | 4 ++++ crypto/armor/armor.go | 39 -------------------------------------- crypto/armor/armor_test.go | 20 ------------------- 3 files changed, 4 insertions(+), 59 deletions(-) delete mode 100644 crypto/armor/armor.go delete mode 100644 crypto/armor/armor_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index af278d3fb..1c8c2b81e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -16,6 +16,10 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - Go API + - [crypto/armor]: \#6963 remove package which is unused, and based on + deprecated fundamentals. Downstream users should maintain this + library. (@tychoish) + - Blockchain Protocol ### FEATURES diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go deleted file mode 100644 index c15d070e6..000000000 --- a/crypto/armor/armor.go +++ /dev/null @@ -1,39 +0,0 @@ -package armor - -import ( - "bytes" - "fmt" - "io/ioutil" - - "golang.org/x/crypto/openpgp/armor" -) - -func EncodeArmor(blockType string, headers map[string]string, data []byte) string { - buf := new(bytes.Buffer) - w, err := armor.Encode(buf, blockType, headers) - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - _, err = w.Write(data) - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - err = w.Close() - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - return buf.String() -} - -func DecodeArmor(armorStr string) (blockType string, headers map[string]string, data []byte, err error) { - buf := bytes.NewBufferString(armorStr) - block, err := armor.Decode(buf) - if err != nil { - return "", nil, nil, err - } - data, err = ioutil.ReadAll(block.Body) - if err != nil { - return "", nil, nil, err - } - return block.Type, block.Header, data, nil -} diff --git a/crypto/armor/armor_test.go b/crypto/armor/armor_test.go deleted file mode 100644 index 8ecfaa0e1..000000000 --- a/crypto/armor/armor_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package armor - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestArmor(t *testing.T) { - blockType := "MINT TEST" - data := []byte("somedata") - armorStr := EncodeArmor(blockType, nil, data) - - // Decode armorStr and test for equivalence. - blockType2, _, data2, err := DecodeArmor(armorStr) - require.Nil(t, err, "%+v", err) - assert.Equal(t, blockType, blockType2) - assert.Equal(t, data, data2) -} From cf59b8b38e5677ff4709104d72d50c49ae843b53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 13:03:10 -0400 Subject: [PATCH 005/174] build(deps): Bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#6961) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.8.1 to 1.9.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.8.1...v1.9.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sam Kleinman --- go.mod | 6 ++-- go.sum | 86 +++++++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 73 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index 8c76f462e..4c6bbdc99 100644 --- a/go.mod +++ b/go.mod @@ -31,12 +31,12 @@ require ( github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.8.1 + github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 github.com/vektra/mockery/v2 v2.9.4 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a - golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 + golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 + golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.40.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect diff --git a/go.sum b/go.sum index d36dd8988..d9385a995 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,11 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,6 +36,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -236,8 +242,9 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= @@ -311,6 +318,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -331,8 +339,9 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -368,12 +377,14 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -388,6 +399,9 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -399,8 +413,8 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC404yz7gQIfRZxX9/MLvQiqXyiBTJtgAy6yrI= @@ -442,8 +456,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqC github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -509,7 +525,6 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -618,8 +633,9 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -695,8 +711,9 @@ github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnh github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= @@ -776,6 +793,7 @@ github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoL github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= @@ -797,9 +815,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= @@ -816,8 +832,9 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -834,8 +851,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -960,8 +978,9 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1048,8 +1067,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1062,6 +1082,10 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1146,9 +1170,15 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1261,6 +1291,7 @@ golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1294,6 +1325,12 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1348,8 +1385,19 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1374,9 +1422,14 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1406,8 +1459,9 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= From 9dfdc62eb7eca771c11cce466926265f3cf4a85d Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 20 Sep 2021 15:18:48 -0400 Subject: [PATCH 006/174] proxy: move proxy package to internal (#6953) --- abci/client/client.go | 2 +- abci/client/creators.go | 35 ++ abci/client/doc.go | 4 +- abci/client/grpc_client.go | 2 +- abci/client/local_client.go | 2 +- abci/client/mocks/client.go | 116 +++--- abci/client/socket_client.go | 2 +- abci/client/socket_client_test.go | 8 +- abci/cmd/abci-cli/abci-cli.go | 6 +- abci/example/example_test.go | 4 +- abci/example/kvstore/kvstore_test.go | 14 +- abci/tests/client_server_test.go | 4 +- abci/tests/server/client.go | 10 +- abci/types/client.go | 1 + docs/tutorials/go-built-in.md | 6 +- internal/blocksync/v0/reactor_test.go | 5 +- internal/blocksync/v2/reactor_test.go | 335 +++++++++--------- internal/consensus/byzantine_test.go | 6 +- internal/consensus/common_test.go | 6 +- internal/consensus/reactor_test.go | 6 +- internal/consensus/replay.go | 2 +- internal/consensus/replay_file.go | 2 +- internal/consensus/replay_stubs.go | 7 +- internal/consensus/replay_test.go | 13 +- internal/consensus/wal_generator.go | 5 +- internal/mempool/v0/bench_test.go | 10 +- internal/mempool/v0/cache_test.go | 4 +- internal/mempool/v0/clist_mempool.go | 2 +- internal/mempool/v0/clist_mempool_test.go | 34 +- internal/mempool/v0/reactor_test.go | 4 +- internal/mempool/v1/mempool.go | 2 +- internal/mempool/v1/mempool_test.go | 7 +- {proxy => internal/proxy}/app_conn.go | 49 +-- {proxy => internal/proxy}/app_conn_test.go | 22 +- internal/proxy/client.go | 34 ++ .../proxy}/mocks/app_conn_consensus.go | 12 +- .../proxy}/mocks/app_conn_mempool.go | 20 +- .../proxy}/mocks/app_conn_query.go | 0 .../proxy}/mocks/app_conn_snapshot.go | 0 {proxy => internal/proxy}/multi_app_conn.go | 20 +- .../proxy}/multi_app_conn_test.go | 22 +- {proxy => internal/proxy}/version.go | 0 internal/statesync/reactor.go | 2 +- internal/statesync/reactor_test.go | 4 +- internal/statesync/syncer.go | 2 +- internal/statesync/syncer_test.go | 4 +- node/node.go | 5 +- node/node_test.go | 10 +- node/public.go | 4 +- node/setup.go | 5 +- proxy/client.go | 94 ----- proxy/mocks/client_creator.go | 36 -- rpc/client/mock/abci.go | 2 +- rpc/core/abci.go | 2 +- rpc/core/env.go | 2 +- rpc/grpc/types.pb.go | 7 +- rpc/test/helpers.go | 4 +- state/execution.go | 2 +- state/execution_test.go | 13 +- state/helpers_test.go | 5 +- test/e2e/app/main.go | 4 +- test/fuzz/mempool/v0/checktx.go | 6 +- test/fuzz/mempool/v1/checktx.go | 6 +- 63 files changed, 511 insertions(+), 553 deletions(-) create mode 100644 abci/client/creators.go create mode 100644 abci/types/client.go rename {proxy => internal/proxy}/app_conn.go (80%) rename {proxy => internal/proxy}/app_conn_test.go (88%) create mode 100644 internal/proxy/client.go rename {proxy => internal/proxy}/mocks/app_conn_consensus.go (92%) rename {proxy => internal/proxy}/mocks/app_conn_mempool.go (83%) rename {proxy => internal/proxy}/mocks/app_conn_query.go (100%) rename {proxy => internal/proxy}/mocks/app_conn_snapshot.go (100%) rename {proxy => internal/proxy}/multi_app_conn.go (90%) rename {proxy => internal/proxy}/multi_app_conn_test.go (80%) rename {proxy => internal/proxy}/version.go (100%) delete mode 100644 proxy/client.go delete mode 100644 proxy/mocks/client_creator.go diff --git a/abci/client/client.go b/abci/client/client.go index b6d34e422..93f8b9293 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" diff --git a/abci/client/creators.go b/abci/client/creators.go new file mode 100644 index 000000000..5a71becaf --- /dev/null +++ b/abci/client/creators.go @@ -0,0 +1,35 @@ +package abciclient + +import ( + "fmt" + + "github.com/tendermint/tendermint/abci/types" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" +) + +// Creator creates new ABCI clients. +type Creator func() (Client, error) + +// NewLocalCreator returns a Creator for the given app, +// which will be running locally. +func NewLocalCreator(app types.Application) Creator { + mtx := new(tmsync.RWMutex) + + return func() (Client, error) { + return NewLocalClient(mtx, app), nil + } +} + +// NewRemoteCreator returns a Creator for the given address (e.g. +// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you +// want the client to connect before reporting success. +func NewRemoteCreator(addr, transport string, mustConnect bool) Creator { + return func() (Client, error) { + remoteApp, err := NewClient(addr, transport, mustConnect) + if err != nil { + return nil, fmt.Errorf("failed to connect to proxy: %w", err) + } + + return remoteApp, nil + } +} diff --git a/abci/client/doc.go b/abci/client/doc.go index eac40fe11..fd5a17075 100644 --- a/abci/client/doc.go +++ b/abci/client/doc.go @@ -1,4 +1,4 @@ -// Package abcicli provides an ABCI implementation in Go. +// Package abciclient provides an ABCI implementation in Go. // // There are 3 clients available: // 1. socket (unix or TCP) @@ -26,4 +26,4 @@ // // sync: waits for all Async calls to complete (essentially what Flush does in // the socket client) and calls Sync method. -package abcicli +package abciclient diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 31bd6fae1..2bfa047bd 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 69457b5b0..06ff8171c 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 6726ce95e..664646e61 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" log "github.com/tendermint/tendermint/libs/log" @@ -20,15 +20,15 @@ type Client struct { } // ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -66,15 +66,15 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA } // BeginBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) { +func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -112,15 +112,15 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -158,15 +158,15 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t } // CommitAsync provides a mock function with given fields: _a0 -func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -204,15 +204,15 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -250,15 +250,15 @@ func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) } // EchoAsync provides a mock function with given fields: ctx, msg -func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { +func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { ret := _m.Called(ctx, msg) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok { r0 = rf(ctx, msg) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -296,15 +296,15 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho } // EndBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) { +func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -356,15 +356,15 @@ func (_m *Client) Error() error { } // FlushAsync provides a mock function with given fields: _a0 -func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -393,15 +393,15 @@ func (_m *Client) FlushSync(_a0 context.Context) error { } // InfoAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) { +func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -439,15 +439,15 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R } // InitChainAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) { +func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -499,15 +499,15 @@ func (_m *Client) IsRunning() bool { } // ListSnapshotsAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) { +func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -545,15 +545,15 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn } // LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -591,15 +591,15 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo } // OfferSnapshotAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) { +func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -670,15 +670,15 @@ func (_m *Client) OnStop() { } // QueryAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) { +func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -751,7 +751,7 @@ func (_m *Client) SetLogger(_a0 log.Logger) { } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 3fef8540d..d6477062c 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "bufio" diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index d61d729e1..53ba7b672 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -1,4 +1,4 @@ -package abcicli_test +package abciclient_test import ( "context" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" @@ -100,7 +100,7 @@ func TestHangingSyncCalls(t *testing.T) { } func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abcicli.Client) { + service.Service, abciclient.Client) { // some port between 20k and 30k port := 20000 + rand.Int31()%10000 addr := fmt.Sprintf("localhost:%d", port) @@ -110,7 +110,7 @@ func setupClientServer(t *testing.T, app types.Application) ( err = s.Start() require.NoError(t, err) - c := abcicli.NewSocketClient(addr, true) + c := abciclient.NewSocketClient(addr, true) err = c.Start() require.NoError(t, err) diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b9af27e22..9fae6fc05 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" @@ -27,7 +27,7 @@ import ( // client is a global variable so it can be reused by the console var ( - client abcicli.Client + client abciclient.Client logger log.Logger ctx = context.Background() @@ -67,7 +67,7 @@ var RootCmd = &cobra.Command{ if client == nil { var err error - client, err = abcicli.NewClient(flagAddress, flagAbci, false) + client, err = abciclient.NewClient(flagAddress, flagAbci, false) if err != nil { return err } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index fdfc5515e..cde8a15b1 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" @@ -61,7 +61,7 @@ func testStream(t *testing.T, app types.Application) { }) // Connect to the socket - client := abcicli.NewSocketClient(socket, false) + client := abciclient.NewSocketClient(socket, false) client.SetLogger(log.TestingLogger().With("module", "abci-client")) err = client.Start() require.NoError(t, err) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a52312a00..9d026bd87 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" abciserver "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" @@ -229,7 +229,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { } } -func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() @@ -241,7 +241,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, } // Connect to the socket - client := abcicli.NewSocketClient(socket, false) + client := abciclient.NewSocketClient(socket, false) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { if err = server.Stop(); err != nil { @@ -253,7 +253,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() @@ -265,7 +265,7 @@ func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, s return nil, nil, err } - client := abcicli.NewGRPCClient(socket, true) + client := abciclient.NewGRPCClient(socket, true) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { if err := server.Stop(); err != nil { @@ -313,7 +313,7 @@ func TestClientServer(t *testing.T) { runClientTests(t, gclient) } -func runClientTests(t *testing.T, client abcicli.Client) { +func runClientTests(t *testing.T, client abciclient.Client) { // run some tests.... key := testKey value := key @@ -325,7 +325,7 @@ func runClientTests(t *testing.T, client abcicli.Client) { testClient(t, client, tx, key, value) } -func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { +func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) { ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 2ef64e66a..62dc6e07e 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - abciclient "github.com/tendermint/tendermint/abci/client" + abciclientent "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" ) @@ -20,7 +20,7 @@ func TestClientServerNoAddrPrefix(t *testing.T) { err = server.Start() assert.NoError(t, err, "expected no error on server.Start") - client, err := abciclient.NewClient(addr, transport, true) + client, err := abciclientent.NewClient(addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") err = client.Start() assert.NoError(t, err, "expected no error on client.Start") diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 10d4a3e58..23adbe80d 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -7,14 +7,14 @@ import ( "fmt" mrand "math/rand" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" ) var ctx = context.Background() -func InitChain(client abcicli.Client) error { +func InitChain(client abciclient.Client) error { total := 10 vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { @@ -34,7 +34,7 @@ func InitChain(client abcicli.Client) error { return nil } -func Commit(client abcicli.Client, hashExp []byte) error { +func Commit(client abciclient.Client, hashExp []byte) error { res, err := client.CommitSync(ctx) data := res.Data if err != nil { @@ -51,7 +51,7 @@ func Commit(client abcicli.Client, hashExp []byte) error { return nil } -func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { @@ -70,7 +70,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] return nil } -func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { diff --git a/abci/types/client.go b/abci/types/client.go new file mode 100644 index 000000000..ab1254f4c --- /dev/null +++ b/abci/types/client.go @@ -0,0 +1 @@ +package types diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index e94fe171e..befcef778 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -430,7 +430,7 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { config, pv, nodeKey, - proxy.NewLocalClientCreator(app), + abcicli.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), @@ -482,7 +482,7 @@ node, err := nm.NewNode( config, pv, nodeKey, - proxy.NewLocalClientCreator(app), + abcicli.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), @@ -495,7 +495,7 @@ if err != nil { `NewNode` requires a few things including a configuration file, a private validator, a node key and a few others in order to construct the full node. -Note we use `proxy.NewLocalClientCreator` here to create a local client instead +Note we use `abcicli.NewLocalClientCreator` here to create a local client instead of one communicating through a socket or gRPC. [viper](https://github.com/spf13/viper) is being used for reading the config, diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index a1ddc02cd..0d6bc9b18 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -7,16 +7,17 @@ import ( "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cons "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" @@ -97,7 +98,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, t.Helper() rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(proxy.NewLocalClientCreator(&abci.BaseApplication{})) + rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{})) require.NoError(t, rts.app[nodeID].Start()) blockDB := dbm.NewMemDB() diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go index 4120b3942..3e8b03ebb 100644 --- a/internal/blocksync/v2/reactor_test.go +++ b/internal/blocksync/v2/reactor_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" @@ -20,11 +21,11 @@ import ( "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" tmstore "github.com/tendermint/tendermint/store" @@ -163,7 +164,7 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { appl = &mockBlockApplier{} } else { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.NoError(t, err) @@ -187,176 +188,176 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { // future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). // func TestReactorTerminationScenarios(t *testing.T) { -// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") -// defer os.RemoveAll(config.RootDir) -// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) -// refStore, _, _ := newReactorStore(genDoc, privVals, 20) +// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") +// defer os.RemoveAll(config.RootDir) +// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) +// refStore, _, _ := newReactorStore(genDoc, privVals, 20) -// params := testReactorParams{ -// logger: log.TestingLogger(), -// genDoc: genDoc, -// privVals: privVals, -// startHeight: 10, -// bufferSize: 100, -// mockA: true, -// } +// params := testReactorParams{ +// logger: log.TestingLogger(), +// genDoc: genDoc, +// privVals: privVals, +// startHeight: 10, +// bufferSize: 100, +// mockA: true, +// } -// type testEvent struct { -// evType string -// peer string -// height int64 -// } +// type testEvent struct { +// evType string +// peer string +// height int64 +// } -// tests := []struct { -// name string -// params testReactorParams -// msgs []testEvent -// }{ -// { -// name: "simple termination on max peer height - one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 12}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "simple termination on max peer height - two peers", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, noBlock error", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveNB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, remove one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "RemovePeer", peer: "P1"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// } +// tests := []struct { +// name string +// params testReactorParams +// msgs []testEvent +// }{ +// { +// name: "simple termination on max peer height - one peer", +// params: params, +// msgs: []testEvent{ +// {evType: "AddPeer", peer: "P1"}, +// {evType: "ReceiveS", peer: "P1", height: 13}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P1", height: 11}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P1", height: 12}, +// {evType: "Process"}, +// {evType: "ReceiveB", peer: "P1", height: 13}, +// {evType: "Process"}, +// }, +// }, +// { +// name: "simple termination on max peer height - two peers", +// params: params, +// msgs: []testEvent{ +// {evType: "AddPeer", peer: "P1"}, +// {evType: "AddPeer", peer: "P2"}, +// {evType: "ReceiveS", peer: "P1", height: 13}, +// {evType: "ReceiveS", peer: "P2", height: 15}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P1", height: 11}, +// {evType: "ReceiveB", peer: "P2", height: 12}, +// {evType: "Process"}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P1", height: 13}, +// {evType: "Process"}, +// {evType: "ReceiveB", peer: "P2", height: 14}, +// {evType: "Process"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 15}, +// {evType: "Process"}, +// }, +// }, +// { +// name: "termination on max peer height - two peers, noBlock error", +// params: params, +// msgs: []testEvent{ +// {evType: "AddPeer", peer: "P1"}, +// {evType: "AddPeer", peer: "P2"}, +// {evType: "ReceiveS", peer: "P1", height: 13}, +// {evType: "ReceiveS", peer: "P2", height: 15}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveNB", peer: "P1", height: 11}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 12}, +// {evType: "ReceiveB", peer: "P2", height: 11}, +// {evType: "Process"}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 13}, +// {evType: "Process"}, +// {evType: "ReceiveB", peer: "P2", height: 14}, +// {evType: "Process"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 15}, +// {evType: "Process"}, +// }, +// }, +// { +// name: "termination on max peer height - two peers, remove one peer", +// params: params, +// msgs: []testEvent{ +// {evType: "AddPeer", peer: "P1"}, +// {evType: "AddPeer", peer: "P2"}, +// {evType: "ReceiveS", peer: "P1", height: 13}, +// {evType: "ReceiveS", peer: "P2", height: 15}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "RemovePeer", peer: "P1"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 12}, +// {evType: "ReceiveB", peer: "P2", height: 11}, +// {evType: "Process"}, +// {evType: "BlockReq"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 13}, +// {evType: "Process"}, +// {evType: "ReceiveB", peer: "P2", height: 14}, +// {evType: "Process"}, +// {evType: "BlockReq"}, +// {evType: "ReceiveB", peer: "P2", height: 15}, +// {evType: "Process"}, +// }, +// }, +// } -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// reactor := newTestReactor(params) -// reactor.Start() -// reactor.reporter = behavior.NewMockReporter() -// mockSwitch := &mockSwitchIo{switchedToConsensus: false} -// reactor.io = mockSwitch -// // time for go routines to start -// time.Sleep(time.Millisecond) +// for _, tt := range tests { +// tt := tt +// t.Run(tt.name, func(t *testing.T) { +// reactor := newTestReactor(params) +// reactor.Start() +// reactor.reporter = behavior.NewMockReporter() +// mockSwitch := &mockSwitchIo{switchedToConsensus: false} +// reactor.io = mockSwitch +// // time for go routines to start +// time.Sleep(time.Millisecond) -// for _, step := range tt.msgs { -// switch step.evType { -// case "AddPeer": -// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) -// case "RemovePeer": -// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) -// case "ReceiveS": -// reactor.scheduler.send(bcStatusResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "ReceiveB": -// reactor.scheduler.send(bcBlockResponse{ -// peerID: p2p.ID(step.peer), -// block: refStore.LoadBlock(step.height), -// size: 10, -// time: time.Now(), -// }) -// case "ReceiveNB": -// reactor.scheduler.send(bcNoBlockResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "BlockReq": -// reactor.scheduler.send(rTrySchedule{time: time.Now()}) -// case "Process": -// reactor.processor.send(rProcessBlock{}) -// } -// // give time for messages to propagate between routines -// time.Sleep(time.Millisecond) -// } +// for _, step := range tt.msgs { +// switch step.evType { +// case "AddPeer": +// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) +// case "RemovePeer": +// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) +// case "ReceiveS": +// reactor.scheduler.send(bcStatusResponse{ +// peerID: p2p.ID(step.peer), +// height: step.height, +// time: time.Now(), +// }) +// case "ReceiveB": +// reactor.scheduler.send(bcBlockResponse{ +// peerID: p2p.ID(step.peer), +// block: refStore.LoadBlock(step.height), +// size: 10, +// time: time.Now(), +// }) +// case "ReceiveNB": +// reactor.scheduler.send(bcNoBlockResponse{ +// peerID: p2p.ID(step.peer), +// height: step.height, +// time: time.Now(), +// }) +// case "BlockReq": +// reactor.scheduler.send(rTrySchedule{time: time.Now()}) +// case "Process": +// reactor.processor.send(rProcessBlock{}) +// } +// // give time for messages to propagate between routines +// time.Sleep(time.Millisecond) +// } -// // time for processor to finish and reactor to switch to consensus -// time.Sleep(20 * time.Millisecond) -// assert.True(t, mockSwitch.hasSwitchedToConsensus()) -// reactor.Stop() -// }) -// } +// // time for processor to finish and reactor to switch to consensus +// time.Sleep(20 * time.Millisecond) +// assert.True(t, mockSwitch.hasSwitchedToConsensus()) +// reactor.Stop() +// }) +// } // } func TestReactorHelperMode(t *testing.T) { @@ -482,7 +483,7 @@ func newReactorStore( require.Len(t, privVals, 1) app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 1c6ec858b..f4a74ea6f 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/evidence" tmsync "github.com/tendermint/tendermint/internal/libs/sync" @@ -62,8 +62,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // one for mempool, one for consensus mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 17ba1ce2e..f5c31d02a 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -18,7 +18,7 @@ import ( dbm "github.com/tendermint/tm-db" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" @@ -410,8 +410,8 @@ func newStateWithConfigAndBlockStore( ) *State { // one for mempool, one for consensus mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 8c70ca1d5..571b6d832 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" @@ -347,8 +347,8 @@ func TestReactorWithEvidence(t *testing.T) { // one for mempool, one for consensus mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 9b22f4631..2660797c8 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -11,8 +11,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 51cb090d7..b59024ae1 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -13,10 +13,10 @@ import ( dbm "github.com/tendermint/tm-db" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index c79340a0c..7a3d2c34a 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -3,11 +3,12 @@ package consensus import ( "context" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -53,11 +54,11 @@ func (emptyMempool) CloseWAL() {} // the real app. func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { - clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ + clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ appHash: appHash, abciResponses: abciResponses, }) - cli, _ := clientCreator.NewABCIClient() + cli, _ := clientCreator() err := cli.Start() if err != nil { panic(err) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 4d1c9c6b2..591796245 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -19,19 +19,20 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" @@ -740,7 +741,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) - clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) + clientCreator2 := abciclient.NewLocalCreator(kvstoreApp) if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state @@ -890,7 +891,7 @@ func buildTMStateFromChain( kvstoreApp := kvstore.NewPersistentKVStoreApplication( filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() - clientCreator := proxy.NewLocalClientCreator(kvstoreApp) + clientCreator := abciclient.NewLocalCreator(kvstoreApp) proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { @@ -958,7 +959,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x03 { app := &badApp{numBlocks: 3, allHashesAreWrong: true} - clientCreator := proxy.NewLocalClientCreator(app) + clientCreator := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() require.NoError(t, err) @@ -982,7 +983,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - RANDOM HASH { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} - clientCreator := proxy.NewLocalClientCreator(app) + clientCreator := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() require.NoError(t, err) @@ -1225,7 +1226,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { val, _ := factory.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} - clientCreator := proxy.NewLocalClientCreator(app) + clientCreator := abciclient.NewLocalCreator(app) config := ResetConfig("handshake_test_") t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 81c2125ca..4a47c7cc5 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -13,11 +13,12 @@ import ( "github.com/stretchr/testify/require" db "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" @@ -64,7 +65,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) - proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) + proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go index 45123c9f6..acfaec283 100644 --- a/internal/mempool/v0/bench_test.go +++ b/internal/mempool/v0/bench_test.go @@ -6,14 +6,14 @@ import ( "sync/atomic" "testing" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/proxy" ) func BenchmarkReap(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() mp.config.Size = 100000 @@ -34,7 +34,7 @@ func BenchmarkReap(b *testing.B) { func BenchmarkCheckTx(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -56,7 +56,7 @@ func BenchmarkCheckTx(b *testing.B) { func BenchmarkParallelCheckTx(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -81,7 +81,7 @@ func BenchmarkParallelCheckTx(b *testing.B) { func BenchmarkCheckDuplicateTx(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go index fbb719231..389c0806c 100644 --- a/internal/mempool/v0/cache_test.go +++ b/internal/mempool/v0/cache_test.go @@ -7,16 +7,16 @@ import ( "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) func TestCacheAfterUpdate(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 167fe0410..9edf067de 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -12,9 +12,9 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index 6f3e6ebc0..6d023b5f6 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" @@ -23,7 +24,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -31,12 +31,12 @@ import ( // test. type cleanupFunc func() -func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { +func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) { return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) } -func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { - appConnMem, _ := cc.NewABCIClient() +func newMempoolWithAppAndConfig(cc abciclient.Creator, config *cfg.Config) (*CListMempool, cleanupFunc) { + appConnMem, _ := cc() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() if err != nil { @@ -92,7 +92,7 @@ func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types. func TestReapMaxBytesMaxGas(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -141,7 +141,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { func TestMempoolFilters(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() emptyTxArr := []types.Tx{[]byte{}} @@ -180,7 +180,7 @@ func TestMempoolFilters(t *testing.T) { func TestMempoolUpdate(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -216,7 +216,7 @@ func TestMempoolUpdate(t *testing.T) { func TestMempool_KeepInvalidTxsInCache(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) wcfg := cfg.DefaultConfig() wcfg.Mempool.KeepInvalidTxsInCache = true mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) @@ -264,7 +264,7 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { func TestTxsAvailable(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() mp.EnableTxsAvailable() @@ -308,12 +308,12 @@ func TestTxsAvailable(t *testing.T) { func TestSerialReap(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() - appConnCon, _ := cc.NewABCIClient() + appConnCon, _ := cc() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) err := appConnCon.Start() require.Nil(t, err) @@ -419,7 +419,7 @@ func TestSerialReap(t *testing.T) { func TestMempool_CheckTxChecksTxSize(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mempl, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -464,7 +464,7 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { func TestMempoolTxsBytes(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) config := cfg.ResetTestRoot("mempool_test") config.Mempool.MaxTxsBytes = 10 mp, cleanup := newMempoolWithAppAndConfig(cc, config) @@ -507,7 +507,7 @@ func TestMempoolTxsBytes(t *testing.T) { // 6. zero after tx is rechecked and removed due to not being valid anymore app2 := kvstore.NewApplication() - cc = proxy.NewLocalClientCreator(app2) + cc = abciclient.NewLocalCreator(app2) mp, cleanup = newMempoolWithApp(cc) defer cleanup() @@ -518,7 +518,7 @@ func TestMempoolTxsBytes(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, 8, mp.SizeBytes()) - appConnCon, _ := cc.NewABCIClient() + appConnCon, _ := cc() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) err = appConnCon.Start() require.Nil(t, err) @@ -597,10 +597,10 @@ func newRemoteApp( addr string, app abci.Application, ) ( - clientCreator proxy.ClientCreator, + clientCreator abciclient.Creator, server service.Service, ) { - clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) + clientCreator = abciclient.NewRemoteCreator(addr, "socket", true) // Start server server = abciserver.NewSocketServer(addr, app) diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 91729b37c..8bab45dd0 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" @@ -17,7 +18,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -55,7 +55,7 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac for nodeID := range rts.network.Nodes { rts.kvstores[nodeID] = kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(rts.kvstores[nodeID]) + cc := abciclient.NewLocalCreator(rts.kvstores[nodeID]) mempool, memCleanup := newMempoolWithApp(cc) t.Cleanup(memCleanup) diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index 46e7f5fcc..34e524159 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -12,9 +12,9 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index df26be726..3b7eb02d0 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -15,13 +15,14 @@ import ( "time" "github.com/stretchr/testify/require" + + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -76,12 +77,12 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { t.Helper() app := &application{kvstore.NewApplication()} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) cfg.Mempool.CacheSize = cacheSize - appConnMem, err := cc.NewABCIClient() + appConnMem, err := cc() require.NoError(t, err) require.NoError(t, appConnMem.Start()) diff --git a/proxy/app_conn.go b/internal/proxy/app_conn.go similarity index 80% rename from proxy/app_conn.go rename to internal/proxy/app_conn.go index 8eb90daf3..39860e2a2 100644 --- a/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -3,7 +3,7 @@ package proxy import ( "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" ) @@ -13,25 +13,25 @@ import ( // Enforce which abci msgs can be sent on a connection at the type level type AppConnConsensus interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(abciclient.Callback) Error() error InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abcicli.ReqRes, error) + DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abciclient.ReqRes, error) EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) CommitSync(context.Context) (*types.ResponseCommit, error) } type AppConnMempool interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(abciclient.Callback) Error() error - CheckTxAsync(context.Context, types.RequestCheckTx) (*abcicli.ReqRes, error) + CheckTxAsync(context.Context, types.RequestCheckTx) (*abciclient.ReqRes, error) CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - FlushAsync(context.Context) (*abcicli.ReqRes, error) + FlushAsync(context.Context) (*abciclient.ReqRes, error) FlushSync(context.Context) error } @@ -53,19 +53,19 @@ type AppConnSnapshot interface { } //----------------------------------------------------------------------------------------- -// Implements AppConnConsensus (subset of abcicli.Client) +// Implements AppConnConsensus (subset of abciclient.Client) type appConnConsensus struct { - appConn abcicli.Client + appConn abciclient.Client } -func NewAppConnConsensus(appConn abcicli.Client) AppConnConsensus { +func NewAppConnConsensus(appConn abciclient.Client) AppConnConsensus { return &appConnConsensus{ appConn: appConn, } } -func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) { +func (app *appConnConsensus) SetResponseCallback(cb abciclient.Callback) { app.appConn.SetResponseCallback(cb) } @@ -87,7 +87,10 @@ func (app *appConnConsensus) BeginBlockSync( return app.appConn.BeginBlockSync(ctx, req) } -func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (app *appConnConsensus) DeliverTxAsync( + ctx context.Context, + req types.RequestDeliverTx, +) (*abciclient.ReqRes, error) { return app.appConn.DeliverTxAsync(ctx, req) } @@ -103,19 +106,19 @@ func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCom } //------------------------------------------------ -// Implements AppConnMempool (subset of abcicli.Client) +// Implements AppConnMempool (subset of abciclient.Client) type appConnMempool struct { - appConn abcicli.Client + appConn abciclient.Client } -func NewAppConnMempool(appConn abcicli.Client) AppConnMempool { +func NewAppConnMempool(appConn abciclient.Client) AppConnMempool { return &appConnMempool{ appConn: appConn, } } -func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) { +func (app *appConnMempool) SetResponseCallback(cb abciclient.Callback) { app.appConn.SetResponseCallback(cb) } @@ -123,7 +126,7 @@ func (app *appConnMempool) Error() error { return app.appConn.Error() } -func (app *appConnMempool) FlushAsync(ctx context.Context) (*abcicli.ReqRes, error) { +func (app *appConnMempool) FlushAsync(ctx context.Context) (*abciclient.ReqRes, error) { return app.appConn.FlushAsync(ctx) } @@ -131,7 +134,7 @@ func (app *appConnMempool) FlushSync(ctx context.Context) error { return app.appConn.FlushSync(ctx) } -func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abciclient.ReqRes, error) { return app.appConn.CheckTxAsync(ctx, req) } @@ -140,13 +143,13 @@ func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestChe } //------------------------------------------------ -// Implements AppConnQuery (subset of abcicli.Client) +// Implements AppConnQuery (subset of abciclient.Client) type appConnQuery struct { - appConn abcicli.Client + appConn abciclient.Client } -func NewAppConnQuery(appConn abcicli.Client) AppConnQuery { +func NewAppConnQuery(appConn abciclient.Client) AppConnQuery { return &appConnQuery{ appConn: appConn, } @@ -169,13 +172,13 @@ func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQu } //------------------------------------------------ -// Implements AppConnSnapshot (subset of abcicli.Client) +// Implements AppConnSnapshot (subset of abciclient.Client) type appConnSnapshot struct { - appConn abcicli.Client + appConn abciclient.Client } -func NewAppConnSnapshot(appConn abcicli.Client) AppConnSnapshot { +func NewAppConnSnapshot(appConn abciclient.Client) AppConnSnapshot { return &appConnSnapshot{ appConn: appConn, } diff --git a/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go similarity index 88% rename from proxy/app_conn_test.go rename to internal/proxy/app_conn_test.go index 458088635..f1ae7fe1a 100644 --- a/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" @@ -17,20 +17,20 @@ import ( //---------------------------------------- type appConnTestI interface { - EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) + EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) FlushSync(context.Context) error InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) } type appConnTest struct { - appConn abcicli.Client + appConn abciclient.Client } -func newAppConnTest(appConn abcicli.Client) appConnTestI { +func newAppConnTest(appConn abciclient.Client) appConnTestI { return &appConnTest{appConn} } -func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { +func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { return app.appConn.EchoAsync(ctx, msg) } @@ -48,7 +48,7 @@ var SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) // Start server s := server.NewSocketServer(sockPath, kvstore.NewApplication()) @@ -63,7 +63,7 @@ func TestEcho(t *testing.T) { }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator() if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } @@ -96,7 +96,7 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) // Start server s := server.NewSocketServer(sockPath, kvstore.NewApplication()) @@ -111,7 +111,7 @@ func BenchmarkEcho(b *testing.B) { }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator() if err != nil { b.Fatalf("Error creating ABCI client: %v", err.Error()) } @@ -149,7 +149,7 @@ func BenchmarkEcho(b *testing.B) { func TestInfo(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) // Start server s := server.NewSocketServer(sockPath, kvstore.NewApplication()) @@ -164,7 +164,7 @@ func TestInfo(t *testing.T) { }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator() if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } diff --git a/internal/proxy/client.go b/internal/proxy/client.go new file mode 100644 index 000000000..072ddf3e0 --- /dev/null +++ b/internal/proxy/client.go @@ -0,0 +1,34 @@ +package proxy + +import ( + "io" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/types" +) + +// DefaultClientCreator returns a default ClientCreator, which will create a +// local client if addr is one of: 'kvstore', +// 'persistent_kvstore' or 'noop', otherwise - a remote client. +// +// The Closer is a noop except for persistent_kvstore applications, +// which will clean up the store. +func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io.Closer) { + switch addr { + case "kvstore": + return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} + case "persistent_kvstore": + app := kvstore.NewPersistentKVStoreApplication(dbDir) + return abciclient.NewLocalCreator(app), app + case "noop": + return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} + default: + mustConnect := false // loop retrying + return abciclient.NewRemoteCreator(addr, transport, mustConnect), noopCloser{} + } +} + +type noopCloser struct{} + +func (noopCloser) Close() error { return nil } diff --git a/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go similarity index 92% rename from proxy/mocks/app_conn_consensus.go rename to internal/proxy/mocks/app_conn_consensus.go index 03207706e..fa93b0931 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -64,15 +64,15 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -147,6 +147,6 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/proxy/mocks/app_conn_mempool.go b/internal/proxy/mocks/app_conn_mempool.go similarity index 83% rename from proxy/mocks/app_conn_mempool.go rename to internal/proxy/mocks/app_conn_mempool.go index 2505160d6..5429d8f90 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/internal/proxy/mocks/app_conn_mempool.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -18,15 +18,15 @@ type AppConnMempool struct { } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -78,15 +78,15 @@ func (_m *AppConnMempool) Error() error { } // FlushAsync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -115,6 +115,6 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/proxy/mocks/app_conn_query.go b/internal/proxy/mocks/app_conn_query.go similarity index 100% rename from proxy/mocks/app_conn_query.go rename to internal/proxy/mocks/app_conn_query.go diff --git a/proxy/mocks/app_conn_snapshot.go b/internal/proxy/mocks/app_conn_snapshot.go similarity index 100% rename from proxy/mocks/app_conn_snapshot.go rename to internal/proxy/mocks/app_conn_snapshot.go diff --git a/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go similarity index 90% rename from proxy/multi_app_conn.go rename to internal/proxy/multi_app_conn.go index 369b685ba..df49df287 100644 --- a/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -5,7 +5,7 @@ import ( "os" "syscall" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -33,7 +33,7 @@ type AppConns interface { } // NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator ClientCreator) AppConns { +func NewAppConns(clientCreator abciclient.Creator) AppConns { return NewMultiAppConn(clientCreator) } @@ -50,16 +50,16 @@ type multiAppConn struct { queryConn AppConnQuery snapshotConn AppConnSnapshot - consensusConnClient abcicli.Client - mempoolConnClient abcicli.Client - queryConnClient abcicli.Client - snapshotConnClient abcicli.Client + consensusConnClient abciclient.Client + mempoolConnClient abciclient.Client + queryConnClient abciclient.Client + snapshotConnClient abciclient.Client - clientCreator ClientCreator + clientCreator abciclient.Creator } // NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator ClientCreator) AppConns { +func NewMultiAppConn(clientCreator abciclient.Creator) AppConns { multiAppConn := &multiAppConn{ clientCreator: clientCreator, } @@ -178,8 +178,8 @@ func (app *multiAppConn) stopAllClients() { } } -func (app *multiAppConn) abciClientFor(conn string) (abcicli.Client, error) { - c, err := app.clientCreator.NewABCIClient() +func (app *multiAppConn) abciClientFor(conn string) (abciclient.Client, error) { + c, err := app.clientCreator() if err != nil { return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) } diff --git a/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go similarity index 80% rename from proxy/multi_app_conn_test.go rename to internal/proxy/multi_app_conn_test.go index 34b0d0830..55bcc7524 100644 --- a/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -8,27 +8,30 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" abcimocks "github.com/tendermint/tendermint/abci/client/mocks" - "github.com/tendermint/tendermint/proxy/mocks" ) func TestAppConns_Start_Stop(t *testing.T) { quitCh := make(<-chan struct{}) - clientCreatorMock := &mocks.ClientCreator{} - clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return().Times(4) clientMock.On("Start").Return(nil).Times(4) clientMock.On("Stop").Return(nil).Times(4) clientMock.On("Quit").Return(quitCh).Times(4) - clientCreatorMock.On("NewABCIClient").Return(clientMock, nil).Times(4) + creatorCallCount := 0 + creator := func() (abciclient.Client, error) { + creatorCallCount++ + return clientMock, nil + } - appConns := NewAppConns(clientCreatorMock) + appConns := NewAppConns(creator) err := appConns.Start() require.NoError(t, err) @@ -39,6 +42,7 @@ func TestAppConns_Start_Stop(t *testing.T) { require.NoError(t, err) clientMock.AssertExpectations(t) + assert.Equal(t, 4, creatorCallCount) } // Upon failure, we call tmos.Kill @@ -56,8 +60,6 @@ func TestAppConns_Failure(t *testing.T) { var recvQuitCh <-chan struct{} // nolint:gosimple recvQuitCh = quitCh - clientCreatorMock := &mocks.ClientCreator{} - clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return() clientMock.On("Start").Return(nil) @@ -66,9 +68,11 @@ func TestAppConns_Failure(t *testing.T) { clientMock.On("Quit").Return(recvQuitCh) clientMock.On("Error").Return(errors.New("EOF")).Once() - clientCreatorMock.On("NewABCIClient").Return(clientMock, nil) + creator := func() (abciclient.Client, error) { + return clientMock, nil + } - appConns := NewAppConns(clientCreatorMock) + appConns := NewAppConns(creator) err := appConns.Start() require.NoError(t, err) diff --git a/proxy/version.go b/internal/proxy/version.go similarity index 100% rename from proxy/version.go rename to internal/proxy/version.go diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 6c0d26812..18c6acc2f 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -14,12 +14,12 @@ import ( "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 6373ed6ab..e2f2c8225 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -16,14 +16,14 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" - proxymocks "github.com/tendermint/tendermint/proxy/mocks" smmocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 559e98a8f..3591cb6b5 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -11,10 +11,10 @@ import ( "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index c1d6b462a..d965c20be 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -13,10 +13,10 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/proxy" + proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - proxymocks "github.com/tendermint/tendermint/proxy/mocks" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" diff --git a/node/node.go b/node/node.go index efa69a724..6ec28515d 100644 --- a/node/node.go +++ b/node/node.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" @@ -21,6 +22,7 @@ import ( "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -30,7 +32,6 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" - "github.com/tendermint/tendermint/proxy" rpccore "github.com/tendermint/tendermint/rpc/core" grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" @@ -119,7 +120,7 @@ func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, err func makeNode(config *cfg.Config, privValidator types.PrivValidator, nodeKey types.NodeKey, - clientCreator proxy.ClientCreator, + clientCreator abciclient.Creator, genesisDocProvider genesisDocProvider, dbProvider cfg.DBProvider, logger log.Logger) (service.Service, error) { diff --git a/node/node_test.go b/node/node_test.go index 6925008a6..7512c732f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -16,21 +16,21 @@ import ( dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" - "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/store" @@ -214,7 +214,7 @@ func testFreeAddr(t *testing.T) string { func TestCreateProposalBlock(t *testing.T) { config := cfg.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -306,7 +306,7 @@ func TestCreateProposalBlock(t *testing.T) { func TestMaxTxsProposalBlockSize(t *testing.T) { config := cfg.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -368,7 +368,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { func TestMaxProposalBlockSize(t *testing.T) { config := cfg.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) diff --git a/node/public.go b/node/public.go index 99a8226d0..c616eebac 100644 --- a/node/public.go +++ b/node/public.go @@ -4,11 +4,11 @@ package node import ( "fmt" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -28,7 +28,7 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) // value of the final argument. func New(conf *config.Config, logger log.Logger, - cf proxy.ClientCreator, + cf abciclient.Creator, gen *types.GenesisDoc, ) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(conf.NodeKeyFile()) diff --git a/node/setup.go b/node/setup.go index 1a7c1b3b2..a966da85c 100644 --- a/node/setup.go +++ b/node/setup.go @@ -12,6 +12,7 @@ import ( dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" @@ -24,12 +25,12 @@ import ( mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/state/indexer/sink" @@ -50,7 +51,7 @@ func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.B return } -func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { +func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger) (proxy.AppConns, error) { proxyApp := proxy.NewAppConns(clientCreator) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { diff --git a/proxy/client.go b/proxy/client.go deleted file mode 100644 index 929933e01..000000000 --- a/proxy/client.go +++ /dev/null @@ -1,94 +0,0 @@ -package proxy - -import ( - "fmt" - "io" - - abcicli "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -//go:generate ../scripts/mockery_generate.sh ClientCreator - -// ClientCreator creates new ABCI clients. -type ClientCreator interface { - // NewABCIClient returns a new ABCI client. - NewABCIClient() (abcicli.Client, error) -} - -//---------------------------------------------------- -// local proxy uses a mutex on an in-proc app - -type localClientCreator struct { - mtx *tmsync.RWMutex - app types.Application -} - -// NewLocalClientCreator returns a ClientCreator for the given app, -// which will be running locally. -func NewLocalClientCreator(app types.Application) ClientCreator { - return &localClientCreator{ - mtx: new(tmsync.RWMutex), - app: app, - } -} - -func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { - return abcicli.NewLocalClient(l.mtx, l.app), nil -} - -//--------------------------------------------------------------- -// remote proxy opens new connections to an external app process - -type remoteClientCreator struct { - addr string - transport string - mustConnect bool -} - -// NewRemoteClientCreator returns a ClientCreator for the given address (e.g. -// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you -// want the client to connect before reporting success. -func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator { - return &remoteClientCreator{ - addr: addr, - transport: transport, - mustConnect: mustConnect, - } -} - -func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { - remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) - if err != nil { - return nil, fmt.Errorf("failed to connect to proxy: %w", err) - } - - return remoteApp, nil -} - -// DefaultClientCreator returns a default ClientCreator, which will create a -// local client if addr is one of: 'kvstore', -// 'persistent_kvstore' or 'noop', otherwise - a remote client. -// -// The Closer is a noop except for persistent_kvstore applications, -// which will clean up the store. -func DefaultClientCreator(addr, transport, dbDir string) (ClientCreator, io.Closer) { - switch addr { - case "kvstore": - return NewLocalClientCreator(kvstore.NewApplication()), noopCloser{} - case "persistent_kvstore": - app := kvstore.NewPersistentKVStoreApplication(dbDir) - return NewLocalClientCreator(app), app - case "noop": - return NewLocalClientCreator(types.NewBaseApplication()), noopCloser{} - default: - mustConnect := false // loop retrying - return NewRemoteClientCreator(addr, transport, mustConnect), noopCloser{} - } -} - -type noopCloser struct{} - -func (noopCloser) Close() error { return nil } diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go deleted file mode 100644 index 0e4157c2f..000000000 --- a/proxy/mocks/client_creator.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - abcicli "github.com/tendermint/tendermint/abci/client" -) - -// ClientCreator is an autogenerated mock type for the ClientCreator type -type ClientCreator struct { - mock.Mock -} - -// NewABCIClient provides a mock function with given fields: -func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { - ret := _m.Called() - - var r0 abcicli.Client - if rf, ok := ret.Get(0).(func() abcicli.Client); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(abcicli.Client) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 0737deec0..960ee6501 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" diff --git a/rpc/core/abci.go b/rpc/core/abci.go index ce705ba90..885ae2c23 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -2,8 +2,8 @@ package core import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/core/env.go b/rpc/core/env.go index 7069bc4d4..091c8972b 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -10,9 +10,9 @@ import ( "github.com/tendermint/tendermint/internal/consensus" mempl "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index b9cbee03f..f2b92c878 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -6,14 +6,15 @@ package coregrpc import ( context "context" fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + proto "github.com/gogo/protobuf/proto" types "github.com/tendermint/tendermint/abci/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 259450540..3a9a11108 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -6,13 +6,13 @@ import ( "os" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" core_grpc "github.com/tendermint/tendermint/rpc/grpc" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" @@ -100,7 +100,7 @@ func StartTendermint(ctx context.Context, } else { logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) } - papp := proxy.NewLocalClientCreator(app) + papp := abciclient.NewLocalCreator(app) node, err := nm.New(conf, logger, papp, nil) if err != nil { return nil, func(_ context.Context) error { return nil }, err diff --git a/state/execution.go b/state/execution.go index 05d5bdd52..f5380de7c 100644 --- a/state/execution.go +++ b/state/execution.go @@ -10,9 +10,9 @@ import ( cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/fail" mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) diff --git a/state/execution_test.go b/state/execution_test.go index 8e0ec563a..e4d78e1b7 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -9,15 +9,16 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" mmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/mocks" sf "github.com/tendermint/tendermint/state/test/factory" @@ -34,7 +35,7 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -59,7 +60,7 @@ func TestApplyBlock(t *testing.T) { // TestBeginBlockValidators ensures we send absent validators list. func TestBeginBlockValidators(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -122,7 +123,7 @@ func TestBeginBlockValidators(t *testing.T) { // TestBeginBlockByzantineValidators ensures we send byzantine validators list. func TestBeginBlockByzantineValidators(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -347,7 +348,7 @@ func TestUpdateValidators(t *testing.T) { // TestEndBlockValidatorUpdates ensures we update validator set and send an event. func TestEndBlockValidatorUpdates(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -420,7 +421,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { // would result in empty set causes no panic, an error is raised and NextValidators is not updated func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) diff --git a/state/helpers_test.go b/state/helpers_test.go index 6d575e147..3590e642d 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -7,16 +7,17 @@ import ( dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" @@ -29,7 +30,7 @@ type paramsChangeTestCase struct { func newTestApp() proxy.AppConns { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) return proxy.NewAppConns(cc) } diff --git a/test/e2e/app/main.go b/test/e2e/app/main.go index fd464220d..1d6806524 100644 --- a/test/e2e/app/main.go +++ b/test/e2e/app/main.go @@ -14,6 +14,7 @@ import ( "github.com/spf13/viper" "google.golang.org/grpc" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" @@ -28,7 +29,6 @@ import ( "github.com/tendermint/tendermint/privval" grpcprivval "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" - "github.com/tendermint/tendermint/proxy" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) @@ -130,7 +130,7 @@ func startNode(cfg *Config) error { n, err := node.New(tmcfg, nodeLogger, - proxy.NewLocalClientCreator(app), + abciclient.NewLocalCreator(app), nil, ) if err != nil { diff --git a/test/fuzz/mempool/v0/checktx.go b/test/fuzz/mempool/v0/checktx.go index a90ec2290..62eda9729 100644 --- a/test/fuzz/mempool/v0/checktx.go +++ b/test/fuzz/mempool/v0/checktx.go @@ -3,19 +3,19 @@ package v0 import ( "context" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - "github.com/tendermint/tendermint/proxy" ) var mp mempool.Mempool func init() { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() + cc := abciclient.NewLocalCreator(app) + appConnMem, _ := cc() err := appConnMem.Start() if err != nil { panic(err) diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/v1/checktx.go index 6194f3bcb..2ed0b97ff 100644 --- a/test/fuzz/mempool/v1/checktx.go +++ b/test/fuzz/mempool/v1/checktx.go @@ -3,19 +3,19 @@ package v1 import ( "context" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" - "github.com/tendermint/tendermint/proxy" ) var mp mempool.Mempool func init() { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() + cc := abciclient.NewLocalCreator(app) + appConnMem, _ := cc() err := appConnMem.Start() if err != nil { panic(err) From 84ffaaaf37f2456f37d19e71cd8a311eede40e7c Mon Sep 17 00:00:00 2001 From: JayT106 Date: Tue, 21 Sep 2021 03:22:16 -0400 Subject: [PATCH 007/174] statesync/rpc: metrics for the statesync and the rpc SyncInfo (#6795) --- internal/statesync/chunks.go | 13 ++++ internal/statesync/chunks_test.go | 46 ++++++----- internal/statesync/metrics.go | 91 ++++++++++++++++++++++ internal/statesync/mocks/Metricer.go | 112 +++++++++++++++++++++++++++ internal/statesync/reactor.go | 95 +++++++++++++++++++++++ internal/statesync/reactor_test.go | 14 ++++ internal/statesync/syncer.go | 26 ++++++- internal/statesync/syncer_test.go | 26 ++++++- node/node.go | 49 ++++++++---- rpc/client/mock/status_test.go | 27 +++++-- rpc/core/env.go | 14 ++-- rpc/core/status.go | 10 +++ rpc/core/types/responses.go | 8 ++ rpc/openapi/openapi.yaml | 21 +++++ 14 files changed, 500 insertions(+), 52 deletions(-) create mode 100644 internal/statesync/metrics.go create mode 100644 internal/statesync/mocks/Metricer.go diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 590f128da..84b6971b8 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -355,3 +355,16 @@ func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { return ch } + +func (q *chunkQueue) numChunksReturned() int { + q.Lock() + defer q.Unlock() + + cnt := 0 + for _, b := range q.chunkReturned { + if b { + cnt++ + } + } + return cnt +} diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index ad7f19b3b..e17c170bd 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -421,15 +421,7 @@ func TestChunkQueue_Retry(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - // Allocate and add all chunks to the queue - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = queue.Next() - require.NoError(t, err) - } + allocateAddChunksToQueue(t, queue) // Retrying a couple of chunks makes Next() return them, but they are not allocatable queue.Retry(3) @@ -454,15 +446,7 @@ func TestChunkQueue_RetryAll(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - // Allocate and add all chunks to the queue - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = queue.Next() - require.NoError(t, err) - } + allocateAddChunksToQueue(t, queue) _, err := queue.Next() assert.Equal(t, errDone, err) @@ -552,3 +536,29 @@ func TestChunkQueue_WaitFor(t *testing.T) { _, ok = <-w assert.False(t, ok) } + +func TestNumChunkReturned(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + assert.EqualValues(t, 5, queue.Size()) + + allocateAddChunksToQueue(t, queue) + assert.EqualValues(t, 5, queue.numChunksReturned()) + + err := queue.Close() + require.NoError(t, err) +} + +// Allocate and add all chunks to the queue +func allocateAddChunksToQueue(t *testing.T, q *chunkQueue) { + t.Helper() + for i := uint32(0); i < q.Size(); i++ { + _, err := q.Allocate() + require.NoError(t, err) + _, err = q.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = q.Next() + require.NoError(t, err) + } +} diff --git a/internal/statesync/metrics.go b/internal/statesync/metrics.go new file mode 100644 index 000000000..fb134f580 --- /dev/null +++ b/internal/statesync/metrics.go @@ -0,0 +1,91 @@ +package statesync + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this package. + MetricsSubsystem = "statesync" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + TotalSnapshots metrics.Counter + ChunkProcessAvgTime metrics.Gauge + SnapshotHeight metrics.Gauge + SnapshotChunk metrics.Counter + SnapshotChunkTotal metrics.Gauge + BackFilledBlocks metrics.Counter + BackFillBlocksTotal metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_snapshots", + Help: "The total number of snapshots discovered.", + }, labels).With(labelsAndValues...), + ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chunk_process_avg_time", + Help: "The average processing time per chunk.", + }, labels).With(labelsAndValues...), + SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_height", + Help: "The height of the current snapshot the has been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk", + Help: "The current number of chunks that have been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunks_total", + Help: "The total number of chunks in the current snapshot.", + }, labels).With(labelsAndValues...), + BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "backfilled_blocks", + Help: "The current number of blocks that have been back-filled.", + }, labels).With(labelsAndValues...), + BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "backfilled_blocks_total", + Help: "The total number of blocks that need to be back-filled.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + TotalSnapshots: discard.NewCounter(), + ChunkProcessAvgTime: discard.NewGauge(), + SnapshotHeight: discard.NewGauge(), + SnapshotChunk: discard.NewCounter(), + SnapshotChunkTotal: discard.NewGauge(), + BackFilledBlocks: discard.NewCounter(), + BackFillBlocksTotal: discard.NewGauge(), + } +} diff --git a/internal/statesync/mocks/Metricer.go b/internal/statesync/mocks/Metricer.go new file mode 100644 index 000000000..c4721b304 --- /dev/null +++ b/internal/statesync/mocks/Metricer.go @@ -0,0 +1,112 @@ +// Code generated by mockery 2.9.4. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Metricer is an autogenerated mock type for the Metricer type +type Metricer struct { + mock.Mock +} + +// BackFillBlocksTotal provides a mock function with given fields: +func (_m *Metricer) BackFillBlocksTotal() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// BackFilledBlocks provides a mock function with given fields: +func (_m *Metricer) BackFilledBlocks() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// ChunkProcessAvgTime provides a mock function with given fields: +func (_m *Metricer) ChunkProcessAvgTime() time.Duration { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// SnapshotChunksCount provides a mock function with given fields: +func (_m *Metricer) SnapshotChunksCount() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// SnapshotChunksTotal provides a mock function with given fields: +func (_m *Metricer) SnapshotChunksTotal() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// SnapshotHeight provides a mock function with given fields: +func (_m *Metricer) SnapshotHeight() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// TotalSnapshots provides a mock function with given fields: +func (_m *Metricer) TotalSnapshots() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 18c6acc2f..c2f598a8c 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -125,6 +125,18 @@ const ( maxLightBlockRequestRetries = 20 ) +// Metricer defines an interface used for the rpc sync info query, please see statesync.metrics +// for the details. +type Metricer interface { + TotalSnapshots() int64 + ChunkProcessAvgTime() time.Duration + SnapshotHeight() int64 + SnapshotChunksCount() int64 + SnapshotChunksTotal() int64 + BackFilledBlocks() int64 + BackFillBlocksTotal() int64 +} + // Reactor handles state sync, both restoring snapshots for the local node and // serving snapshots for other nodes. type Reactor struct { @@ -158,6 +170,10 @@ type Reactor struct { syncer *syncer providers map[types.NodeID]*BlockProvider stateProvider StateProvider + + metrics *Metrics + backfillBlockTotal int64 + backfilledBlocks int64 } // NewReactor returns a reference to a new state sync reactor, which implements @@ -176,6 +192,7 @@ func NewReactor( stateStore sm.Store, blockStore *store.BlockStore, tempDir string, + ssMetrics *Metrics, ) *Reactor { r := &Reactor{ chainID: chainID, @@ -195,6 +212,7 @@ func NewReactor( peers: newPeerList(), dispatcher: NewDispatcher(blockCh.Out), providers: make(map[types.NodeID]*BlockProvider), + metrics: ssMetrics, } r.BaseService = *service.NewBaseService(logger, "StateSync", r) @@ -271,6 +289,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.snapshotCh.Out, r.chunkCh.Out, r.tempDir, + r.metrics, ) r.mtx.Unlock() defer func() { @@ -347,6 +366,9 @@ func (r *Reactor) backfill( r.Logger.Info("starting backfill process...", "startHeight", startHeight, "stopHeight", stopHeight, "stopTime", stopTime, "trustedBlockID", trustedBlockID) + r.backfillBlockTotal = startHeight - stopHeight + 1 + r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) + const sleepTime = 1 * time.Second var ( lastValidatorSet *types.ValidatorSet @@ -481,6 +503,16 @@ func (r *Reactor) backfill( lastValidatorSet = resp.block.ValidatorSet + r.backfilledBlocks++ + r.metrics.BackFilledBlocks.Add(1) + + // The block height might be less than the stopHeight because of the stopTime condition + // hasn't been fulfilled. + if resp.block.Height < stopHeight { + r.backfillBlockTotal++ + r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) + } + case <-queue.done(): if err := queue.error(); err != nil { return err @@ -1005,3 +1037,66 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial } return nil } + +func (r *Reactor) TotalSnapshots() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.snapshots != nil { + return int64(len(r.syncer.snapshots.snapshots)) + } + return 0 +} + +func (r *Reactor) ChunkProcessAvgTime() time.Duration { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + return time.Duration(r.syncer.avgChunkTime) + } + return time.Duration(0) +} + +func (r *Reactor) SnapshotHeight() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + return r.syncer.lastSyncedSnapshotHeight + } + return 0 +} +func (r *Reactor) SnapshotChunksCount() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.chunks != nil { + return int64(r.syncer.chunks.numChunksReturned()) + } + return 0 +} + +func (r *Reactor) SnapshotChunksTotal() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.processingSnapshot != nil { + return int64(r.syncer.processingSnapshot.Chunks) + } + return 0 +} + +func (r *Reactor) BackFilledBlocks() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.backfilledBlocks +} + +func (r *Reactor) BackFillBlocksTotal() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.backfillBlockTotal +} diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index e2f2c8225..6ab41b36c 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -29,6 +29,10 @@ import ( "github.com/tendermint/tendermint/types" ) +var ( + m = PrometheusMetrics(config.TestConfig().Instrumentation.Namespace) +) + type reactorTestSuite struct { reactor *Reactor syncer *syncer @@ -156,6 +160,7 @@ func setup( rts.stateStore, rts.blockStore, "", + m, ) rts.syncer = newSyncer( @@ -167,6 +172,7 @@ func setup( rts.snapshotOutCh, rts.chunkOutCh, "", + rts.reactor.metrics, ) require.NoError(t, rts.reactor.Start()) @@ -596,6 +602,9 @@ func TestReactor_Backfill(t *testing.T) { ) if failureRate > 3 { require.Error(t, err) + + require.NotEqual(t, rts.reactor.backfilledBlocks, rts.reactor.backfillBlockTotal) + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal) } else { require.NoError(t, err) @@ -606,7 +615,12 @@ func TestReactor_Backfill(t *testing.T) { require.Nil(t, rts.blockStore.LoadBlockMeta(stopHeight-1)) require.Nil(t, rts.blockStore.LoadBlockMeta(startHeight+1)) + + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfilledBlocks) + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal) } + require.Equal(t, rts.reactor.backfilledBlocks, rts.reactor.BackFilledBlocks()) + require.Equal(t, rts.reactor.backfillBlockTotal, rts.reactor.BackFillBlocksTotal()) }) } } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 3591cb6b5..05cfffe64 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -63,8 +63,13 @@ type syncer struct { fetchers int32 retryTimeout time.Duration - mtx tmsync.RWMutex - chunks *chunkQueue + mtx tmsync.RWMutex + chunks *chunkQueue + metrics *Metrics + + avgChunkTime int64 + lastSyncedSnapshotHeight int64 + processingSnapshot *snapshot } // newSyncer creates a new syncer. @@ -76,6 +81,7 @@ func newSyncer( stateProvider StateProvider, snapshotCh, chunkCh chan<- p2p.Envelope, tempDir string, + metrics *Metrics, ) *syncer { return &syncer{ logger: logger, @@ -88,6 +94,7 @@ func newSyncer( tempDir: tempDir, fetchers: cfg.Fetchers, retryTimeout: cfg.ChunkRequestTimeout, + metrics: metrics, } } @@ -121,6 +128,7 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err return false, err } if added { + s.metrics.TotalSnapshots.Add(1) s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) } @@ -190,9 +198,14 @@ func (s *syncer) SyncAny( defer chunks.Close() // in case we forget to close it elsewhere } + s.processingSnapshot = snapshot + s.metrics.SnapshotChunkTotal.Set(float64(snapshot.Chunks)) + newState, commit, err := s.Sync(ctx, snapshot, chunks) switch { case err == nil: + s.metrics.SnapshotHeight.Set(float64(snapshot.Height)) + s.lastSyncedSnapshotHeight = int64(snapshot.Height) return newState, commit, nil case errors.Is(err, errAbort): @@ -237,6 +250,7 @@ func (s *syncer) SyncAny( } snapshot = nil chunks = nil + s.processingSnapshot = nil } } @@ -286,6 +300,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu // Spawn chunk fetchers. They will terminate when the chunk queue is closed or context canceled. fetchCtx, cancel := context.WithCancel(ctx) defer cancel() + fetchStartTime := time.Now() for i := int32(0); i < s.fetchers; i++ { go s.fetchChunks(fetchCtx, snapshot, chunks) } @@ -324,7 +339,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu } // Restore snapshot - err = s.applyChunks(ctx, chunks) + err = s.applyChunks(ctx, chunks, fetchStartTime) if err != nil { return sm.State{}, nil, err } @@ -381,7 +396,7 @@ func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { // applyChunks applies chunks to the app. It returns various errors depending on the app's // response, or nil once the snapshot is fully restored. -func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { +func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time.Time) error { for { chunk, err := chunks.Next() if err == errDone { @@ -423,6 +438,9 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { switch resp.Result { case abci.ResponseApplySnapshotChunk_ACCEPT: + s.metrics.SnapshotChunk.Add(1) + s.avgChunkTime = time.Since(start).Nanoseconds() / int64(chunks.numChunksReturned()) + s.metrics.ChunkProcessAvgTime.Set(float64(s.avgChunkTime)) case abci.ResponseApplySnapshotChunk_ABORT: return errAbort case abci.ResponseApplySnapshotChunk_RETRY: diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index d965c20be..867c99b0f 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -70,6 +70,8 @@ func TestSyncer_SyncAny(t *testing.T) { peerCID := types.NodeID("cc") rts := setup(t, connSnapshot, connQuery, stateProvider, 3) + rts.reactor.syncer = rts.syncer + // Adding a chunk should error when no sync is in progress _, err := rts.syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) require.Error(t, err) @@ -195,6 +197,16 @@ func TestSyncer_SyncAny(t *testing.T) { require.Equal(t, expectState, newState) require.Equal(t, commit, lastCommit) + require.Equal(t, len(chunks), int(rts.syncer.processingSnapshot.Chunks)) + require.Equal(t, expectState.LastBlockHeight, rts.syncer.lastSyncedSnapshotHeight) + require.True(t, rts.syncer.avgChunkTime > 0) + + require.Equal(t, int64(rts.syncer.processingSnapshot.Chunks), rts.reactor.SnapshotChunksTotal()) + require.Equal(t, rts.syncer.lastSyncedSnapshotHeight, rts.reactor.SnapshotHeight()) + require.Equal(t, time.Duration(rts.syncer.avgChunkTime), rts.reactor.ChunkProcessAvgTime()) + require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots()) + require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount()) + connSnapshot.AssertExpectations(t) connQuery.AssertExpectations(t) } @@ -448,6 +460,9 @@ func TestSyncer_applyChunks_Results(t *testing.T) { body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") require.NoError(t, err) + + fetchStartTime := time.Now() + _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) @@ -461,7 +476,7 @@ func TestSyncer_applyChunks_Results(t *testing.T) { Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } - err = rts.syncer.applyChunks(ctx, chunks) + err = rts.syncer.applyChunks(ctx, chunks, fetchStartTime) if tc.expectErr == unknownErr { require.Error(t, err) } else { @@ -498,6 +513,9 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") require.NoError(t, err) + + fetchStartTime := time.Now() + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}}) require.True(t, added) require.NoError(t, err) @@ -526,7 +544,7 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { // check the queue contents, and finally close the queue to end the goroutine. // We don't really care about the result of applyChunks, since it has separate test. go func() { - rts.syncer.applyChunks(ctx, chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) @@ -588,6 +606,8 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { chunks, err := newChunkQueue(s1, "") require.NoError(t, err) + fetchStartTime := time.Now() + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerAID}) require.True(t, added) require.NoError(t, err) @@ -625,7 +645,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // However, it will block on e.g. retry result, so we spawn a goroutine that will // be shut down when the chunk queue closes. go func() { - rts.syncer.applyChunks(ctx, chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) diff --git a/node/node.go b/node/node.go index 6ec28515d..fa75a432d 100644 --- a/node/node.go +++ b/node/node.go @@ -240,16 +240,17 @@ func makeNode(config *cfg.Config, return nil, fmt.Errorf("failed to create peer manager: %w", err) } - csMetrics, p2pMetrics, memplMetrics, smMetrics := defaultMetricsProvider(config.Instrumentation)(genDoc.ChainID) + nodeMetrics := + defaultMetricsProvider(config.Instrumentation)(genDoc.ChainID) - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, + router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, peerManager, transport, getRouterConfig(config, proxyApp)) if err != nil { return nil, fmt.Errorf("failed to create router: %w", err) } mpReactorShim, mpReactor, mp, err := createMempoolReactor( - config, proxyApp, state, memplMetrics, peerManager, router, logger, + config, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { return nil, err @@ -270,12 +271,12 @@ func makeNode(config *cfg.Config, mp, evPool, blockStore, - sm.BlockExecutorWithMetrics(smMetrics), + sm.BlockExecutorWithMetrics(nodeMetrics.state), ) csReactorShim, csReactor, csState := createConsensusReactor( config, state, blockExec, blockStore, mp, evPool, - privValidator, csMetrics, stateSync || blockSync, eventBus, + privValidator, nodeMetrics.cs, stateSync || blockSync, eventBus, peerManager, router, consensusLogger, ) @@ -283,7 +284,7 @@ func makeNode(config *cfg.Config, // doing a state sync first. bcReactorShim, bcReactor, err := createBlockchainReactor( logger, config, state, blockExec, blockStore, csReactor, - peerManager, router, blockSync && !stateSync, csMetrics, + peerManager, router, blockSync && !stateSync, nodeMetrics.cs, ) if err != nil { return nil, fmt.Errorf("could not create blockchain reactor: %w", err) @@ -300,9 +301,9 @@ func makeNode(config *cfg.Config, // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { - csMetrics.StateSyncing.Set(1) + nodeMetrics.cs.StateSyncing.Set(1) } else if blockSync { - csMetrics.BlockSyncing.Set(1) + nodeMetrics.cs.BlockSyncing.Set(1) } // Set up state sync reactor, and schedule a sync if requested. @@ -342,6 +343,7 @@ func makeNode(config *cfg.Config, stateStore, blockStore, config.StateSync.TempDir, + nodeMetrics.statesync, ) // add the channel descriptors to both the transports @@ -379,7 +381,7 @@ func makeNode(config *cfg.Config, if config.P2P.UseLegacy { // setup Transport and Switch sw = createSwitch( - config, transport, p2pMetrics, mpReactorShim, bcReactorForSwitch, + config, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch, stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, ) @@ -1035,20 +1037,37 @@ func defaultGenesisDocProviderFunc(config *cfg.Config) genesisDocProvider { } } -// metricsProvider returns a consensus, p2p and mempool Metrics. -type metricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) +type nodeMetrics struct { + cs *cs.Metrics + p2p *p2p.Metrics + mempool *mempool.Metrics + state *sm.Metrics + statesync *statesync.Metrics +} + +// metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. +type metricsProvider func(chainID string) *nodeMetrics // defaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) { + return func(chainID string) *nodeMetrics { if config.Prometheus { - return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), + return &nodeMetrics{ + cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), mempool.PrometheusMetrics(config.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) + sm.PrometheusMetrics(config.Namespace, "chain_id", chainID), + statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID), + } + } + return &nodeMetrics{ + cs.NopMetrics(), + p2p.NopMetrics(), + mempool.NopMetrics(), + sm.NopMetrics(), + statesync.NopMetrics(), } - return cs.NopMetrics(), p2p.NopMetrics(), mempool.NopMetrics(), sm.NopMetrics() } } diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 3933c33c9..4f2f59f4c 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -20,12 +20,19 @@ func TestStatus(t *testing.T) { Call: mock.Call{ Response: &ctypes.ResultStatus{ SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: bytes.HexBytes("block"), - LatestAppHash: bytes.HexBytes("app"), - LatestBlockHeight: 10, - MaxPeerBlockHeight: 20, - TotalSyncedTime: time.Second, - RemainingTime: time.Minute, + LatestBlockHash: bytes.HexBytes("block"), + LatestAppHash: bytes.HexBytes("app"), + LatestBlockHeight: 10, + MaxPeerBlockHeight: 20, + TotalSyncedTime: time.Second, + RemainingTime: time.Minute, + TotalSnapshots: 10, + ChunkProcessAvgTime: time.Duration(10), + SnapshotHeight: 10, + SnapshotChunksCount: 9, + SnapshotChunksTotal: 10, + BackFilledBlocks: 9, + BackFillBlocksTotal: 10, }, }}, } @@ -56,4 +63,12 @@ func TestStatus(t *testing.T) { assert.EqualValues(20, st.SyncInfo.MaxPeerBlockHeight) assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + + assert.EqualValues(10, st.SyncInfo.TotalSnapshots) + assert.EqualValues(time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) + assert.EqualValues(10, st.SyncInfo.SnapshotHeight) + assert.EqualValues(9, status.SyncInfo.SnapshotChunksCount) + assert.EqualValues(10, status.SyncInfo.SnapshotChunksTotal) + assert.EqualValues(9, status.SyncInfo.BackFilledBlocks) + assert.EqualValues(10, status.SyncInfo.BackFillBlocksTotal) } diff --git a/rpc/core/env.go b/rpc/core/env.go index 091c8972b..9aaa499a2 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -11,6 +11,7 @@ import ( mempl "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/statesync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -91,12 +92,13 @@ type Environment struct { PeerManager peerManager // objects - PubKey crypto.PubKey - GenDoc *types.GenesisDoc // cache the genesis structure - EventSinks []indexer.EventSink - EventBus *types.EventBus // thread safe - Mempool mempl.Mempool - BlockSyncReactor consensus.BlockSyncReactor + PubKey crypto.PubKey + GenDoc *types.GenesisDoc // cache the genesis structure + EventSinks []indexer.EventSink + EventBus *types.EventBus // thread safe + Mempool mempl.Mempool + BlockSyncReactor consensus.BlockSyncReactor + StateSyncMetricer statesync.Metricer Logger log.Logger diff --git a/rpc/core/status.go b/rpc/core/status.go index 815ab37f5..c5a412369 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -77,6 +77,16 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err ValidatorInfo: validatorInfo, } + if env.StateSyncMetricer != nil { + result.SyncInfo.TotalSnapshots = env.StateSyncMetricer.TotalSnapshots() + result.SyncInfo.ChunkProcessAvgTime = env.StateSyncMetricer.ChunkProcessAvgTime() + result.SyncInfo.SnapshotHeight = env.StateSyncMetricer.SnapshotHeight() + result.SyncInfo.SnapshotChunksCount = env.StateSyncMetricer.SnapshotChunksCount() + result.SyncInfo.SnapshotChunksTotal = env.StateSyncMetricer.SnapshotChunksTotal() + result.SyncInfo.BackFilledBlocks = env.StateSyncMetricer.BackFilledBlocks() + result.SyncInfo.BackFillBlocksTotal = env.StateSyncMetricer.BackFillBlocksTotal() + } + return result, nil } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index caa9b8732..ecb058312 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -100,6 +100,14 @@ type SyncInfo struct { TotalSyncedTime time.Duration `json:"total_synced_time"` RemainingTime time.Duration `json:"remaining_time"` + + TotalSnapshots int64 `json:"total_snapshots"` + ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time"` + SnapshotHeight int64 `json:"snapshot_height"` + SnapshotChunksCount int64 `json:"snapshot_chunks_count"` + SnapshotChunksTotal int64 `json:"snapshot_chunks_total"` + BackFilledBlocks int64 `json:"backfilled_blocks"` + BackFillBlocksTotal int64 `json:"backfill_blocks_total"` } // Info about the node's validator diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index d5a9ffafa..a32d44986 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -1396,6 +1396,27 @@ components: remaining_time: type: string example: "0" + total_snapshots: + type: string + example: "10" + chunk_process_avg_time: + type: string + example: "1000000000" + snapshot_height: + type: string + example: "1262196" + snapshot_chunks_count: + type: string + example: "10" + snapshot_chunks_total: + type: string + example: "100" + backfilled_blocks: + type: string + example: "10" + backfill_blocks_total: + type: string + example: "100" ValidatorInfo: type: object properties: From df2d744ea999f8bbb4507d0cc63b06e227b8faa5 Mon Sep 17 00:00:00 2001 From: Marko Date: Tue, 21 Sep 2021 10:32:00 +0000 Subject: [PATCH 008/174] config/docs: update and deprecated (#6879) ## Description - Add deprecated to config values in toml - update config in configuration doc - explain how to set up a node with the new network - add sentence about not needing to fork tendermint for built-in tutorial - closes #6865 - add note to use a released version of tendermint with the tutorials. This is to avoid unknown issues prior to a release. --- config/toml.go | 18 +++- docs/networks/README.md | 2 +- docs/nodes/configuration.md | 174 +++++++++++++++++++++++++++++----- docs/tutorials/go-built-in.md | 5 + 4 files changed, 169 insertions(+), 30 deletions(-) diff --git a/config/toml.go b/config/toml.go index 1cb3c0615..986ff1c2a 100644 --- a/config/toml.go +++ b/config/toml.go @@ -159,14 +159,14 @@ state-file = "{{ js .PrivValidator.State }}" # when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client laddr = "{{ .PrivValidator.ListenAddr }}" -# Client certificate generated while creating needed files for secure connection. +# Path to the client certificate generated while creating needed files for secure connection. # If a remote validator address is provided but no certificate, the connection will be insecure client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}" # Client key generated while creating certificates for secure connection validator-client-key-file = "{{ js .PrivValidator.ClientKey }}" -# Path Root Certificate Authority used to sign both client and server certificates +# Path to the Root Certificate Authority used to sign both client and server certificates certificate-authority = "{{ js .PrivValidator.RootCA }}" @@ -331,18 +331,28 @@ max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}" # Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" # Time to wait before flushing messages out on the connection +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" # Maximum size of a message packet payload, in bytes +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} # Rate at which packets can be sent, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 send-rate = {{ .P2P.SendRate }} # Rate at which packets can be received, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 recv-rate = {{ .P2P.RecvRate }} # Set true to enable the peer-exchange reactor @@ -519,7 +529,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" [tx-index] # The backend database list to back the indexer. -# If list contains null, meaning no indexer service will be used. +# If list contains "null" or "", meaning no indexer service will be used. # # The application will set which txs to index. In some cases a node operator will be able # to decide which txs to index based on configuration set in the application. @@ -527,8 +537,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. # 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}] # The PostgreSQL connection configuration, the connection format: diff --git a/docs/networks/README.md b/docs/networks/README.md index 8528f44ed..0b14e391b 100644 --- a/docs/networks/README.md +++ b/docs/networks/README.md @@ -2,7 +2,7 @@ order: 1 parent: title: Networks - order: 6 + order: 1 --- # Overview diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index ffdbaffa2..5695c1a28 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -16,8 +16,7 @@ the parameters set with their default values. It will look something like the file below, however, double check by inspecting the `config.toml` created with your version of `tendermint` installed: -```toml -# This is a TOML config file. +```toml# This is a TOML config file. # For more information, see https://github.com/toml-lang/toml # NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or @@ -34,14 +33,14 @@ like the file below, however, double check by inspecting the proxy-app = "tcp://127.0.0.1:26658" # A custom human readable name for this node -moniker = "anonymous" +moniker = "ape" # Mode of Node: full | validator | seed (default: "validator") # * validator node (default) # - all reactors # - with priv_validator_key.json, priv_validator_state.json -# * full node +# * full node # - all reactors # - No priv_validator_key.json, priv_validator_state.json # * seed node @@ -49,6 +48,11 @@ moniker = "anonymous" # - No priv_validator_key.json, priv_validator_state.json mode = "validator" +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast-sync = true + # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go @@ -84,16 +88,6 @@ log-format = "plain" # Path to the JSON file containing the initial validator set and other meta data genesis-file = "config/genesis.json" -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv-validator-key-file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv-validator-state-file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv-validator-laddr = "" - # Path to the JSON file containing the private key to use for node authentication in the p2p protocol node-key-file = "config/node_key.json" @@ -105,6 +99,33 @@ abci = "socket" filter-peers = false +####################################################### +### Priv Validator Configuration ### +####################################################### +[priv-validator] + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +key-file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +state-file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client +laddr = "" + +# Path to the client certificate generated while creating needed files for secure connection. +# If a remote validator address is provided but no certificate, the connection will be insecure +client-certificate-file = "" + +# Client key generated while creating certificates for secure connection +validator-client-key-file = "" + +# Path to the Root Certificate Authority used to sign both client and server certificates +certificate-authority = "" + + ####################################################################### ### Advanced Configuration Options ### ####################################################################### @@ -130,6 +151,7 @@ cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", # TCP or UNIX socket address for the gRPC server to listen on # NOTE: This server only supports /broadcast_tx_commit +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. grpc-laddr = "" # Maximum number of simultaneous connections. @@ -139,9 +161,10 @@ grpc-laddr = "" # 0 - unlimited. # Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} # 1024 - 40 - 10 - 50 = 924 = ~900 +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. grpc-max-open-connections = 900 -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = false # Maximum number of simultaneous connections (including WebSocket). @@ -198,18 +221,34 @@ pprof-laddr = "" ####################################################### [p2p] +# Enable the legacy p2p layer. +use-legacy = false + +# Select the p2p internal queue +queue-type = "priority" + # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" # Address to advertise to peers for them to dial # If empty, will use the same port as the laddr, # and will introspect on the listener or use UPnP -# to figure out the address. +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 external-address = "" # Comma separated list of seed nodes to connect to +# We only use these if we can’t connect to peers in the addrbook +# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 seeds = "" +# Comma separated list of peers to be added to the peer store +# on startup. Either BootstrapPeers or PersistentPeers are +# needed for peer discovery +bootstrap-peers = "" + # Comma separated list of nodes to keep persistent connections to persistent-peers = "" @@ -217,6 +256,8 @@ persistent-peers = "" upnp = false # Path to address book +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 addr-book-file = "config/addrbook.json" # Set true for strict address routability rules @@ -224,9 +265,15 @@ addr-book-file = "config/addrbook.json" addr-book-strict = true # Maximum number of inbound peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 max-num-inbound-peers = 40 # Maximum number of outbound peers to connect to, excluding persistent peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 max-num-outbound-peers = 10 # Maximum number of connections (inbound and outbound). @@ -236,27 +283,40 @@ max-connections = 64 max-incoming-connection-attempts = 100 # List of node IDs, to which a connection will be (re)established ignoring any existing limits +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 unconditional-peer-ids = "" # Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 persistent-peers-max-dial-period = "0s" # Time to wait before flushing messages out on the connection +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 flush-throttle-timeout = "100ms" # Maximum size of a message packet payload, in bytes -max-packet-msg-payload-size = 1024 +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +max-packet-msg-payload-size = 1400 # Rate at which packets can be sent, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 send-rate = 5120000 # Rate at which packets can be received, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 recv-rate = 5120000 # Set true to enable the peer-exchange reactor pex = true # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 private-peer-ids = "" # Toggle to disable guard against peers connecting from the same ip. @@ -349,8 +409,15 @@ discovery-time = "15s" # Will create a new, randomly named directory within, and remove it when done. temp-dir = "" +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 15 seconds). +chunk-request-timeout = "15s" + +# The number of concurrent chunk and block fetchers to run (default: 4). +fetchers = "4" + ####################################################### -### BlockSync Configuration Connections ### +### Block Sync Configuration Connections ### ####################################################### [blocksync] @@ -410,7 +477,8 @@ peer-query-maj23-sleep-duration = "2s" ####################################################### [tx-index] -# What indexer to use for transactions +# The backend database list to back the indexer. +# If list contains "null" or "", meaning no indexer service will be used. # # The application will set which txs to index. In some cases a node operator will be able # to decide which txs to index based on configuration set in the application. @@ -418,8 +486,13 @@ peer-query-maj23-sleep-duration = "2s" # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = ["kv"] + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" ####################################################### ### Instrumentation Configuration Options ### @@ -520,10 +593,61 @@ This section will cover settings within the p2p section of the `config.toml`. - `external-address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port. - > We recommend setting an external address. When used in a private network, Tendermint Core currently doesn't advertise the node's public address. There is active and ongoing work to improve the P2P system, but this is a helpful workaround for now. -- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks - `persistent-peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added. -- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). -- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). -- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. - `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id. + +Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced. + +We will cover the new and deprecated parameters below. +### New Parameters + +There are three new parameters, which are enabled if use-legacy is set to false. + +- `queue-type` = sets a type of queue to use in the p2p layer. There are three options available `fifo`, `priority` and `wdrr`. The default is priority +- `bootstrap-peers` = is a list of comma seperated peers which will be used to bootstrap the address book. +- `max-connections` = is the max amount of allowed inbound and outbound connections. +### Deprecated Parameters + +> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by deafult with the deprecated fields. The new implementation uses different config parameters, explained above. + +- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). *This was replaced by `max-connections`* +- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).*This was replaced by `max-connections`* +- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. *Deprecated* +- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks *Deprecated, replaced by bootstrap peers* + +## Indexing Settings + +Operators can configure indexing via the `[tx_index]` section. The `indexer` +field takes a series of supported indexers. If `null` is included, indexing will +be turned off regardless of other values provided. + +### Supported Indexers + +#### KV + +The `kv` indexer type is an embedded key-value store supported by the main +underlying Tendermint database. Using the `kv` indexer type allows you to query +for block and transaction events directly against Tendermint's RPC. However, the +query syntax is limited and so this indexer type might be deprecated or removed +entirely in the future. + +#### PostgreSQL + +The `psql` indexer type allows an operator to enable block and transaction event +indexing by proxying it to an external PostgreSQL instance allowing for the events +to be stored in relational models. Since the events are stored in a RDBMS, operators +can leverage SQL to perform a series of rich and complex queries that are not +supported by the `kv` indexer type. Since operators can leverage SQL directly, +searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any +such query will fail. + +Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators +must explicitly create the relations prior to starting Tendermint and enabling +the `psql` indexer type. + +Example: + +```shell +$ psql ... -f state/indexer/sink/psql/schema.sql +``` diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index befcef778..d31b8d71e 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -23,6 +23,8 @@ yourself with the syntax. By following along with this guide, you'll create a Tendermint Core project called kvstore, a (very) simple distributed BFT key-value store. +> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master. + ## Built-in app vs external app Running your application inside the same process as Tendermint Core will give @@ -50,10 +52,13 @@ We'll start by creating a new Go project. ```bash mkdir kvstore cd kvstore +go mod init github.com// ``` Inside the example directory create a `main.go` file with the following content: +> Note: there is no need to clone or fork Tendermint in this tutorial. + ```go package main From 0f53a590ff27fa1e395c0c1aad41b0a2974ff44f Mon Sep 17 00:00:00 2001 From: Daria K Date: Tue, 21 Sep 2021 15:35:40 +0500 Subject: [PATCH 009/174] readme: update discord links (#6965) This updates the Discord invite links. --- README.md | 4 ++-- docs/.vuepress/config.js | 2 +- docs/tools/debugging/pro.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5d62a2f23..3157a148e 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Or [Blockchain](), for shor [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) [![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint) [![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm) -[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T) +[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/cosmosnetwork) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) [![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge) @@ -33,7 +33,7 @@ Tendermint has been in the production of private and public environments, most n See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, we're happy to help. You can -contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T). +contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork). ## Security diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 59012fba1..f453dc1ef 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -78,7 +78,7 @@ module.exports = { }, footer: { question: { - text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' + text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' }, logo: '/logo-bw.svg', textLink: { diff --git a/docs/tools/debugging/pro.md b/docs/tools/debugging/pro.md index b43ed5cba..a248aa130 100644 --- a/docs/tools/debugging/pro.md +++ b/docs/tools/debugging/pro.md @@ -99,7 +99,7 @@ We’re hoping that these Tendermint tools will become de facto the first respon Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet? -Join our [discord chat](https://discord.gg/vcExX9T), where we discuss the current issues and future improvements. +Join our [discord chat](https://discord.gg/cosmosnetwork), where we discuss the current issues and future improvements. — From 5a13c7075b230a1398f849f1d209b70a44896b0d Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 21 Sep 2021 11:22:55 -0400 Subject: [PATCH 010/174] rfc: event system (#6957) --- docs/rfc/README.md | 1 + docs/rfc/rfc-005-event-system.rst | 122 ++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 docs/rfc/rfc-005-event-system.rst diff --git a/docs/rfc/README.md b/docs/rfc/README.md index 3af5a33e8..c3adfa08a 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -42,5 +42,6 @@ sections. - [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md) - [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) - [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) +- [RFC-005: Event System](./rfc-005-event-system.rst) diff --git a/docs/rfc/rfc-005-event-system.rst b/docs/rfc/rfc-005-event-system.rst new file mode 100644 index 000000000..b4a00b43d --- /dev/null +++ b/docs/rfc/rfc-005-event-system.rst @@ -0,0 +1,122 @@ +===================== +RFC 005: Event System +===================== + +Changelog +--------- + +- 2021-09-17: Initial Draft (@tychoish) + +Abstract +-------- + +The event system within Tendermint, which supports a lot of core +functionality, also represents a major infrastructural liability. As part of +our upcoming review of the RPC interfaces and our ongoing thoughts about +stability and performance, as well as the preparation for Tendermint 1.0, we +should revisit the design and implementation of the event system. This +document discusses both the current state of the system and potential +directions for future improvement. + +Background +---------- + +Current State of Events +~~~~~~~~~~~~~~~~~~~~~~~ + +The event system makes it possible for clients, both internal and external, +to receive notifications of state replication events, such as new blocks, +new transactions, validator set changes, as well as intermediate events during +consensus. Because the event system is very cross cutting, the behavior and +performance of the event publication and subscription system has huge impacts +for all of Tendermint. + +The subscription service is exposed over the RPC interface, but also powers +the indexing (e.g. to an external database,) and is the mechanism by which +`BroadcastTxCommit` is able to wait for transactions to land in a block. + +The current pubsub mechanism relies on a couple of buffered channels, +primarily between all event creators and subscribers, but also for each +subscription. The result of this design is that, in some situations with the +right collection of slow subscription consumers the event system can put +backpressure on the consensus state machine and message gossiping in the +network, thereby causing nodes to lag. + +Improvements +~~~~~~~~~~~~ + +The current system relies on implicit, bounded queues built by the buffered channels, +and though threadsafe, can force all activity within Tendermint to serialize, +which does not need to happen. Additionally, timeouts for subscription +consumers related to the implementation of the RPC layer, may complicate the +use of the system. + +References +~~~~~~~~~~ + +- Legacy Implementation + - `publication of events `_ + - `send operation `_ + - `send loop `_ +- Related RFCs + - `RFC 002: IPC Ecosystem <./rfc-002-ipc-ecosystem.md>`_ + - `RFC 003: Performance Questions <./rfc-003-performance-questions.md>`_ + +Discussion +---------- + +Changes to Published Events +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As part of this process, the Tendermint team should do a study of the existing +event types and ensure that there are viable production use cases for +subscriptions to all event types. Instinctively it seems plausible that some +of the events may not be useable outside of tendermint, (e.g. ``TimeoutWait`` +or ``NewRoundStep``) and it might make sense to remove them. Certainly, it +would be good to make sure that we don't maintain infrastructure for unused or +un-useful message indefinitely. + +Blocking Subscription +~~~~~~~~~~~~~~~~~~~~~ + +The blocking subscription mechanism makes it possible to have *send* +operations into the subscription channel be un-buffered (the event processing +channel is still buffered.) In the blocking case, events from one subscription +can block processing that event for other non-blocking subscriptions. The main +case, it seems for blocking subscriptions is ensuring that a transaction has +been committed to a block for ``BroadcastTxCommit``. Removing blocking +subscriptions entirely, and potentially finding another way to implement +``BroadcastTxCommit``, could lead to important simplifications and +improvements to throughput without requiring large changes. + +Subscription Identification +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before `#6386 `_, all +subscriptions were identified by the combination of a client ID and a query, +and with that change, it became possible to identify all subscription given +only an ID, but compatibility with the legacy identification means that there's a +good deal of legacy code as well as client side efficiency that could be +improved. + +Pubsub Changes +~~~~~~~~~~~~~~ + +The pubsub core should be implemented in a way that removes the possibility of +backpressure from the event system to impact the core system *or* for one +subscription to impact the behavior of another area of the +system. Additionally, because the current system is implemented entirely in +terms of a collection of buffered channels, the event system (and large +numbers of subscriptions) can be a source of memory pressure. + +These changes could include: + +- explicit cancellation and timeouts promulgated from callers (e.g. RPC end + points, etc,) this should be done using contexts. + +- subscription system should be able to spill to disk to avoid putting memory + pressure on the core behavior of the node (consensus, gossip). + +- subscriptions implemented as cursors rather than channels, with either + condition variables to simulate the existing "push" API or a client side + iterator API with some kind of long polling-type interface. From 07d10184a11fd2a155ad251df3544badf495ea8a Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 22 Sep 2021 12:32:49 -0400 Subject: [PATCH 011/174] inspect: remove duplicated construction path (#6966) --- cmd/tendermint/commands/inspect.go | 28 ++----------------- {inspect => internal/inspect}/doc.go | 0 {inspect => internal/inspect}/inspect.go | 5 ++-- {inspect => internal/inspect}/inspect_test.go | 10 +++++-- {inspect => internal/inspect}/rpc/rpc.go | 0 5 files changed, 11 insertions(+), 32 deletions(-) rename {inspect => internal/inspect}/doc.go (100%) rename {inspect => internal/inspect}/inspect.go (96%) rename {inspect => internal/inspect}/inspect_test.go (98%) rename {inspect => internal/inspect}/rpc/rpc.go (100%) diff --git a/cmd/tendermint/commands/inspect.go b/cmd/tendermint/commands/inspect.go index de31d33d4..3cd6ef572 100644 --- a/cmd/tendermint/commands/inspect.go +++ b/cmd/tendermint/commands/inspect.go @@ -8,12 +8,7 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/inspect" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer/sink" - "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/internal/inspect" ) // InspectCmd is the command for starting an inspect server. @@ -55,29 +50,10 @@ func runInspect(cmd *cobra.Command, args []string) error { cancel() }() - blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config}) + ins, err := inspect.NewFromConfig(logger, config) if err != nil { return err } - blockStore := store.NewBlockStore(blockStoreDB) - stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config}) - if err != nil { - if err := blockStoreDB.Close(); err != nil { - logger.Error("error closing block store db", "error", err) - } - return err - } - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) - if err != nil { - return err - } - sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID) - if err != nil { - return err - } - stateStore := state.NewStore(stateDB) - - ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger) logger.Info("starting inspect server") if err := ins.Run(ctx); err != nil { diff --git a/inspect/doc.go b/internal/inspect/doc.go similarity index 100% rename from inspect/doc.go rename to internal/inspect/doc.go diff --git a/inspect/inspect.go b/internal/inspect/inspect.go similarity index 96% rename from inspect/inspect.go rename to internal/inspect/inspect.go index 38bc9ed5d..f27baa6ef 100644 --- a/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -7,7 +7,7 @@ import ( "net" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/inspect/rpc" + "github.com/tendermint/tendermint/internal/inspect/rpc" "github.com/tendermint/tendermint/libs/log" tmstrings "github.com/tendermint/tendermint/libs/strings" rpccore "github.com/tendermint/tendermint/rpc/core" @@ -58,7 +58,7 @@ func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexe } // NewFromConfig constructs an Inspector using the values defined in the passed in config. -func NewFromConfig(cfg *config.Config) (*Inspector, error) { +func NewFromConfig(logger log.Logger, cfg *config.Config) (*Inspector, error) { bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg}) if err != nil { return nil, err @@ -76,7 +76,6 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { if err != nil { return nil, err } - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) ss := state.NewStore(sDB) return New(cfg.RPC, bs, ss, sinks, logger), nil } diff --git a/inspect/inspect_test.go b/internal/inspect/inspect_test.go similarity index 98% rename from inspect/inspect_test.go rename to internal/inspect/inspect_test.go index c2a1df571..6d814208f 100644 --- a/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/require" abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/inspect" + "github.com/tendermint/tendermint/internal/inspect" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/proto/tendermint/state" @@ -28,10 +28,12 @@ import ( func TestInspectConstructor(t *testing.T) { cfg := config.ResetTestRoot("test") + testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() t.Run("from config", func(t *testing.T) { - d, err := inspect.NewFromConfig(cfg) + logger := testLogger.With(t.Name()) + d, err := inspect.NewFromConfig(logger, cfg) require.NoError(t, err) require.NotNil(t, d) }) @@ -40,10 +42,12 @@ func TestInspectConstructor(t *testing.T) { func TestInspectRun(t *testing.T) { cfg := config.ResetTestRoot("test") + testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() t.Run("from config", func(t *testing.T) { - d, err := inspect.NewFromConfig(cfg) + logger := testLogger.With(t.Name()) + d, err := inspect.NewFromConfig(logger, cfg) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) stoppedWG := &sync.WaitGroup{} diff --git a/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go similarity index 100% rename from inspect/rpc/rpc.go rename to internal/inspect/rpc/rpc.go From 638346500dbd82f106e2782830a4ededa27f17e6 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 22 Sep 2021 12:48:38 -0400 Subject: [PATCH 012/174] ci: reduce number of groups for 0.34 e2e runs (#6968) --- .github/workflows/e2e-nightly-34x.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 9c3b74cda..1b076d2ea 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01'] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -37,7 +37,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly + run: ./build/generator -g 2 -d networks/nightly - name: Run testnets in group ${{ matrix.group }} working-directory: test/e2e From 1c4950dbd2f842affa5418086bd226086c73b60f Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 22 Sep 2021 13:04:25 -0400 Subject: [PATCH 013/174] state: move package to internal (#6964) --- cmd/tendermint/commands/reindex_event.go | 10 +++++----- cmd/tendermint/commands/reindex_event_test.go | 4 ++-- internal/blocksync/v0/reactor.go | 2 +- internal/blocksync/v0/reactor_test.go | 4 ++-- internal/blocksync/v2/io.go | 2 +- internal/blocksync/v2/processor.go | 2 +- internal/blocksync/v2/processor_context.go | 2 +- internal/blocksync/v2/processor_test.go | 2 +- internal/blocksync/v2/reactor.go | 2 +- internal/blocksync/v2/reactor_test.go | 4 ++-- internal/blocksync/v2/scheduler_test.go | 2 +- internal/consensus/byzantine_test.go | 2 +- internal/consensus/common_test.go | 2 +- internal/consensus/mempool_test.go | 2 +- internal/consensus/mocks/cons_sync_reactor.go | 2 +- internal/consensus/mocks/fast_sync_reactor.go | 2 +- internal/consensus/reactor.go | 2 +- internal/consensus/reactor_test.go | 4 ++-- internal/consensus/replay.go | 2 +- internal/consensus/replay_file.go | 2 +- internal/consensus/replay_test.go | 4 ++-- internal/consensus/state.go | 2 +- internal/consensus/wal_generator.go | 2 +- internal/evidence/pool.go | 2 +- internal/evidence/pool_test.go | 6 +++--- internal/evidence/reactor_test.go | 2 +- internal/evidence/verify_test.go | 4 ++-- internal/inspect/inspect.go | 8 ++++---- internal/inspect/inspect_test.go | 6 +++--- internal/inspect/rpc/rpc.go | 6 +++--- {rpc => internal/rpc}/core/CONTRIBUTING.md | 0 {rpc => internal/rpc}/core/README.md | 0 {rpc => internal/rpc}/core/abci.go | 2 +- {rpc => internal/rpc}/core/blocks.go | 4 ++-- {rpc => internal/rpc}/core/blocks_test.go | 4 ++-- {rpc => internal/rpc}/core/consensus.go | 2 +- {rpc => internal/rpc}/core/dev.go | 2 +- {rpc => internal/rpc}/core/doc.go | 0 {rpc => internal/rpc}/core/doc_template.txt | 0 {rpc => internal/rpc}/core/env.go | 6 +++--- {rpc => internal/rpc}/core/env_test.go | 0 {rpc => internal/rpc}/core/events.go | 2 +- {rpc => internal/rpc}/core/evidence.go | 2 +- {rpc => internal/rpc}/core/health.go | 2 +- {rpc => internal/rpc}/core/mempool.go | 2 +- {rpc => internal/rpc}/core/net.go | 2 +- {rpc => internal/rpc}/core/net_test.go | 0 {rpc => internal/rpc}/core/routes.go | 0 {rpc => internal/rpc}/core/status.go | 2 +- {rpc => internal/rpc}/core/tx.go | 4 ++-- {state => internal/state}/errors.go | 0 {state => internal/state}/execution.go | 0 {state => internal/state}/execution_test.go | 6 +++--- {state => internal/state}/export_test.go | 0 {state => internal/state}/helpers_test.go | 4 ++-- {state => internal/state}/indexer/block/kv/kv.go | 2 +- {state => internal/state}/indexer/block/kv/kv_test.go | 2 +- {state => internal/state}/indexer/block/kv/util.go | 0 {state => internal/state}/indexer/block/null/null.go | 2 +- {state => internal/state}/indexer/doc.go | 0 {state => internal/state}/indexer/eventsink.go | 0 {state => internal/state}/indexer/indexer.go | 0 {state => internal/state}/indexer/indexer_service.go | 0 .../state}/indexer/indexer_service_test.go | 6 +++--- {state => internal/state}/indexer/mocks/event_sink.go | 2 +- {state => internal/state}/indexer/query_range.go | 0 {state => internal/state}/indexer/sink/kv/kv.go | 6 +++--- {state => internal/state}/indexer/sink/kv/kv_test.go | 4 ++-- {state => internal/state}/indexer/sink/null/null.go | 2 +- .../state}/indexer/sink/null/null_test.go | 2 +- {state => internal/state}/indexer/sink/psql/psql.go | 2 +- .../state}/indexer/sink/psql/psql_test.go | 2 +- {state => internal/state}/indexer/sink/psql/schema.sql | 0 {state => internal/state}/indexer/sink/sink.go | 8 ++++---- {state => internal/state}/indexer/tx/kv/kv.go | 2 +- .../state}/indexer/tx/kv/kv_bench_test.go | 0 {state => internal/state}/indexer/tx/kv/kv_test.go | 2 +- {state => internal/state}/indexer/tx/kv/utils.go | 0 {state => internal/state}/indexer/tx/kv/utils_test.go | 0 {state => internal/state}/indexer/tx/null/null.go | 2 +- {state => internal/state}/metrics.go | 0 {state => internal/state}/mocks/block_store.go | 0 {state => internal/state}/mocks/event_sink.go | 2 +- {state => internal/state}/mocks/evidence_pool.go | 2 +- {state => internal/state}/mocks/store.go | 2 +- {state => internal/state}/services.go | 0 {state => internal/state}/state.go | 0 {state => internal/state}/state_test.go | 4 ++-- {state => internal/state}/store.go | 0 {state => internal/state}/store_test.go | 2 +- {state => internal/state}/test/factory/block.go | 2 +- {state => internal/state}/time.go | 0 {state => internal/state}/time_test.go | 0 {state => internal/state}/tx_filter.go | 0 {state => internal/state}/tx_filter_test.go | 2 +- {state => internal/state}/validation.go | 0 {state => internal/state}/validation_test.go | 6 +++--- internal/statesync/mocks/state_provider.go | 2 +- internal/statesync/reactor.go | 2 +- internal/statesync/reactor_test.go | 2 +- internal/statesync/stateprovider.go | 2 +- internal/statesync/syncer.go | 2 +- internal/statesync/syncer_test.go | 2 +- light/provider/http/http.go | 2 +- light/proxy/routes.go | 2 +- light/rpc/client.go | 2 +- node/node.go | 4 ++-- node/node_test.go | 4 ++-- node/setup.go | 6 +++--- proto/tendermint/state/types.pb.go | 4 ++-- proto/tendermint/statesync/types.pb.go | 4 ++-- rpc/client/event_test.go | 2 +- rpc/client/examples_test.go | 2 +- rpc/client/helpers_test.go | 2 +- rpc/client/http/http.go | 2 +- rpc/client/http/ws.go | 2 +- rpc/client/interface.go | 2 +- rpc/client/local/local.go | 4 ++-- rpc/client/mock/abci.go | 2 +- rpc/client/mock/abci_test.go | 2 +- rpc/client/mock/client.go | 4 ++-- rpc/client/mock/status.go | 2 +- rpc/client/mock/status_test.go | 2 +- rpc/client/mocks/client.go | 2 +- rpc/client/rpc_test.go | 2 +- rpc/{core/types => coretypes}/responses.go | 0 rpc/{core/types => coretypes}/responses_test.go | 0 rpc/grpc/api.go | 2 +- rpc/grpc/client_server.go | 2 +- rpc/jsonrpc/server/http_json_handler.go | 2 +- rpc/jsonrpc/server/http_uri_handler.go | 2 +- rpc/jsonrpc/server/ws_handler.go | 2 +- rpc/test/helpers.go | 2 +- store/store_test.go | 4 ++-- test/e2e/runner/perturb.go | 2 +- test/e2e/runner/rpc.go | 2 +- test/e2e/tests/e2e_test.go | 2 +- tools/tm-signer-harness/internal/test_harness.go | 2 +- 138 files changed, 154 insertions(+), 154 deletions(-) rename {rpc => internal/rpc}/core/CONTRIBUTING.md (100%) rename {rpc => internal/rpc}/core/README.md (100%) rename {rpc => internal/rpc}/core/abci.go (95%) rename {rpc => internal/rpc}/core/blocks.go (98%) rename {rpc => internal/rpc}/core/blocks_test.go (97%) rename {rpc => internal/rpc}/core/consensus.go (98%) rename {rpc => internal/rpc}/core/dev.go (85%) rename {rpc => internal/rpc}/core/doc.go (100%) rename {rpc => internal/rpc}/core/doc_template.txt (100%) rename {rpc => internal/rpc}/core/env.go (96%) rename {rpc => internal/rpc}/core/env_test.go (100%) rename {rpc => internal/rpc}/core/events.go (98%) rename {rpc => internal/rpc}/core/evidence.go (93%) rename {rpc => internal/rpc}/core/health.go (86%) rename {rpc => internal/rpc}/core/mempool.go (99%) rename {rpc => internal/rpc}/core/net.go (98%) rename {rpc => internal/rpc}/core/net_test.go (100%) rename {rpc => internal/rpc}/core/routes.go (100%) rename {rpc => internal/rpc}/core/status.go (98%) rename {rpc => internal/rpc}/core/tx.go (97%) rename {state => internal/state}/errors.go (100%) rename {state => internal/state}/execution.go (100%) rename {state => internal/state}/execution_test.go (98%) rename {state => internal/state}/export_test.go (100%) rename {state => internal/state}/helpers_test.go (98%) rename {state => internal/state}/indexer/block/kv/kv.go (99%) rename {state => internal/state}/indexer/block/kv/kv_test.go (97%) rename {state => internal/state}/indexer/block/kv/util.go (100%) rename {state => internal/state}/indexer/block/null/null.go (91%) rename {state => internal/state}/indexer/doc.go (100%) rename {state => internal/state}/indexer/eventsink.go (100%) rename {state => internal/state}/indexer/indexer.go (100%) rename {state => internal/state}/indexer/indexer_service.go (100%) rename {state => internal/state}/indexer/indexer_service_test.go (95%) rename {state => internal/state}/indexer/mocks/event_sink.go (98%) rename {state => internal/state}/indexer/query_range.go (100%) rename {state => internal/state}/indexer/sink/kv/kv.go (87%) rename {state => internal/state}/indexer/sink/kv/kv_test.go (98%) rename {state => internal/state}/indexer/sink/null/null.go (94%) rename {state => internal/state}/indexer/sink/null/null_test.go (93%) rename {state => internal/state}/indexer/sink/psql/psql.go (99%) rename {state => internal/state}/indexer/sink/psql/psql_test.go (99%) rename {state => internal/state}/indexer/sink/psql/schema.sql (100%) rename {state => internal/state}/indexer/sink/sink.go (85%) rename {state => internal/state}/indexer/tx/kv/kv.go (99%) rename {state => internal/state}/indexer/tx/kv/kv_bench_test.go (100%) rename {state => internal/state}/indexer/tx/kv/kv_test.go (99%) rename {state => internal/state}/indexer/tx/kv/utils.go (100%) rename {state => internal/state}/indexer/tx/kv/utils_test.go (100%) rename {state => internal/state}/indexer/tx/null/null.go (93%) rename {state => internal/state}/metrics.go (100%) rename {state => internal/state}/mocks/block_store.go (100%) rename {state => internal/state}/mocks/event_sink.go (98%) rename {state => internal/state}/mocks/evidence_pool.go (96%) rename {state => internal/state}/mocks/store.go (98%) rename {state => internal/state}/services.go (100%) rename {state => internal/state}/state.go (100%) rename {state => internal/state}/state_test.go (99%) rename {state => internal/state}/store.go (100%) rename {state => internal/state}/store_test.go (99%) rename {state => internal/state}/test/factory/block.go (96%) rename {state => internal/state}/time.go (100%) rename {state => internal/state}/time_test.go (100%) rename {state => internal/state}/tx_filter.go (100%) rename {state => internal/state}/tx_filter_test.go (94%) rename {state => internal/state}/validation.go (100%) rename {state => internal/state}/validation_test.go (98%) rename rpc/{core/types => coretypes}/responses.go (100%) rename rpc/{core/types => coretypes}/responses_test.go (100%) diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 1dbce2f74..b5e98a29a 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -11,11 +11,11 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/progressbar" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink/kv" - "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 5d9459f5a..452a6b2a8 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -11,9 +11,9 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/mocks" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/v0/reactor.go index 4ddfa4edc..645b39b4c 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -9,11 +9,11 @@ import ( bc "github.com/tendermint/tendermint/internal/blocksync" cons "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmSync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index 0d6bc9b18..9a79c8cf3 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -15,11 +15,11 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" diff --git a/internal/blocksync/v2/io.go b/internal/blocksync/v2/io.go index 743428516..b272db3ae 100644 --- a/internal/blocksync/v2/io.go +++ b/internal/blocksync/v2/io.go @@ -5,8 +5,8 @@ import ( "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/state" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go index b448e7d8a..12959d5b1 100644 --- a/internal/blocksync/v2/processor.go +++ b/internal/blocksync/v2/processor.go @@ -3,7 +3,7 @@ package v2 import ( "fmt" - tmState "github.com/tendermint/tendermint/state" + tmState "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go index bc6852565..caf25e09c 100644 --- a/internal/blocksync/v2/processor_context.go +++ b/internal/blocksync/v2/processor_context.go @@ -4,7 +4,7 @@ import ( "fmt" cons "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go index f7d51112b..c9b8c62e1 100644 --- a/internal/blocksync/v2/processor_test.go +++ b/internal/blocksync/v2/processor_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - tmState "github.com/tendermint/tendermint/state" + tmState "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go index caa5d73f0..bd2b85038 100644 --- a/internal/blocksync/v2/reactor.go +++ b/internal/blocksync/v2/reactor.go @@ -12,10 +12,10 @@ import ( cons "github.com/tendermint/tendermint/internal/consensus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go index 3e8b03ebb..ca8ba41a0 100644 --- a/internal/blocksync/v2/reactor_test.go +++ b/internal/blocksync/v2/reactor_test.go @@ -22,12 +22,12 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" tmstore "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go index 91fac3637..d2c4aab03 100644 --- a/internal/blocksync/v2/scheduler_test.go +++ b/internal/blocksync/v2/scheduler_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index f4a74ea6f..a202a4738 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -16,11 +16,11 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index f5c31d02a..b7fdd984e 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -25,6 +25,7 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" @@ -33,7 +34,6 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 5edec248a..8eb1b3f60 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -16,7 +16,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/internal/mempool" - sm "github.com/tendermint/tendermint/state" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index 263969798..5ac592f0d 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" ) // ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type diff --git a/internal/consensus/mocks/fast_sync_reactor.go b/internal/consensus/mocks/fast_sync_reactor.go index b7f521ff2..9da851065 100644 --- a/internal/consensus/mocks/fast_sync_reactor.go +++ b/internal/consensus/mocks/fast_sync_reactor.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" time "time" ) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 915756488..ca3f5d353 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -8,13 +8,13 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 571b6d832..bb4f72d8c 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -23,12 +23,12 @@ import ( mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + sm "github.com/tendermint/tendermint/internal/state" + statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" - sm "github.com/tendermint/tendermint/state" - statemocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 2660797c8..60ad0c041 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -12,8 +12,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index b59024ae1..a3a39fbbc 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -14,10 +14,10 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 591796245..c5623e36d 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -27,14 +27,14 @@ import ( cryptoenc "github.com/tendermint/tendermint/crypto/encoding" mempl "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 16f2bb0bd..165318c9c 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -17,6 +17,7 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/libs/fail" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" @@ -27,7 +28,6 @@ import ( "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 4a47c7cc5..b92230d2c 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -17,9 +17,9 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 2a48fe032..f342dec4c 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -14,9 +14,9 @@ import ( dbm "github.com/tendermint/tm-db" clist "github.com/tendermint/tendermint/internal/libs/clist" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index ac5f27b8e..5be2ec318 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -13,11 +13,11 @@ import ( "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + sm "github.com/tendermint/tendermint/internal/state" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - sm "github.com/tendermint/tendermint/state" - smmocks "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index 1cf995731..b8055263d 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -20,9 +20,9 @@ import ( "github.com/tendermint/tendermint/internal/evidence/mocks" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 85f997f2a..2bb3d8008 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -15,11 +15,11 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + sm "github.com/tendermint/tendermint/internal/state" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - smmocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index f27baa6ef..c48776d3c 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -8,12 +8,12 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect/rpc" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/libs/log" tmstrings "github.com/tendermint/tendermint/libs/strings" - rpccore "github.com/tendermint/tendermint/rpc/core" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 6d814208f..972932440 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -16,13 +16,13 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/internal/state/indexer" + indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" + statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" - "github.com/tendermint/tendermint/state/indexer" - indexermocks "github.com/tendermint/tendermint/state/indexer/mocks" - statemocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index 76dcda4eb..3043ba6b3 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -9,12 +9,12 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/rpc/core" "github.com/tendermint/tendermint/rpc/jsonrpc/server" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/CONTRIBUTING.md b/internal/rpc/core/CONTRIBUTING.md similarity index 100% rename from rpc/core/CONTRIBUTING.md rename to internal/rpc/core/CONTRIBUTING.md diff --git a/rpc/core/README.md b/internal/rpc/core/README.md similarity index 100% rename from rpc/core/README.md rename to internal/rpc/core/README.md diff --git a/rpc/core/abci.go b/internal/rpc/core/abci.go similarity index 95% rename from rpc/core/abci.go rename to internal/rpc/core/abci.go index 885ae2c23..fe497b336 100644 --- a/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -4,7 +4,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/core/blocks.go b/internal/rpc/core/blocks.go similarity index 98% rename from rpc/core/blocks.go rename to internal/rpc/core/blocks.go index 78b567583..909d2228f 100644 --- a/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -4,12 +4,12 @@ import ( "fmt" "sort" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go similarity index 97% rename from rpc/core/blocks_test.go rename to internal/rpc/core/blocks_test.go index 29db2f094..5b602e339 100644 --- a/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -10,10 +10,10 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + sm "github.com/tendermint/tendermint/internal/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/consensus.go b/internal/rpc/core/consensus.go similarity index 98% rename from rpc/core/consensus.go rename to internal/rpc/core/consensus.go index b067e1063..6fbba0c0f 100644 --- a/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -5,7 +5,7 @@ import ( cm "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/dev.go b/internal/rpc/core/dev.go similarity index 85% rename from rpc/core/dev.go rename to internal/rpc/core/dev.go index 0e365cdcc..fde7aef99 100644 --- a/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -1,7 +1,7 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/core/doc.go b/internal/rpc/core/doc.go similarity index 100% rename from rpc/core/doc.go rename to internal/rpc/core/doc.go diff --git a/rpc/core/doc_template.txt b/internal/rpc/core/doc_template.txt similarity index 100% rename from rpc/core/doc_template.txt rename to internal/rpc/core/doc_template.txt diff --git a/rpc/core/env.go b/internal/rpc/core/env.go similarity index 96% rename from rpc/core/env.go rename to internal/rpc/core/env.go index 9aaa499a2..44264e949 100644 --- a/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -11,12 +11,12 @@ import ( mempl "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/env_test.go b/internal/rpc/core/env_test.go similarity index 100% rename from rpc/core/env_test.go rename to internal/rpc/core/env_test.go diff --git a/rpc/core/events.go b/internal/rpc/core/events.go similarity index 98% rename from rpc/core/events.go rename to internal/rpc/core/events.go index e56295c52..d26a4d90f 100644 --- a/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -7,7 +7,7 @@ import ( tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/core/evidence.go b/internal/rpc/core/evidence.go similarity index 93% rename from rpc/core/evidence.go rename to internal/rpc/core/evidence.go index e071c5a7e..311180bdd 100644 --- a/rpc/core/evidence.go +++ b/internal/rpc/core/evidence.go @@ -3,7 +3,7 @@ package core import ( "fmt" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/health.go b/internal/rpc/core/health.go similarity index 86% rename from rpc/core/health.go rename to internal/rpc/core/health.go index 855911d83..fa92d57ab 100644 --- a/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -1,7 +1,7 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/core/mempool.go b/internal/rpc/core/mempool.go similarity index 99% rename from rpc/core/mempool.go rename to internal/rpc/core/mempool.go index 690b0a295..b44b9fb33 100644 --- a/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -9,7 +9,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/internal/mempool" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/net.go b/internal/rpc/core/net.go similarity index 98% rename from rpc/core/net.go rename to internal/rpc/core/net.go index 5b1672e26..633359449 100644 --- a/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/tendermint/tendermint/internal/p2p" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/core/net_test.go b/internal/rpc/core/net_test.go similarity index 100% rename from rpc/core/net_test.go rename to internal/rpc/core/net_test.go diff --git a/rpc/core/routes.go b/internal/rpc/core/routes.go similarity index 100% rename from rpc/core/routes.go rename to internal/rpc/core/routes.go diff --git a/rpc/core/status.go b/internal/rpc/core/status.go similarity index 98% rename from rpc/core/status.go rename to internal/rpc/core/status.go index c5a412369..567d73be4 100644 --- a/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -5,7 +5,7 @@ import ( "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/tx.go b/internal/rpc/core/tx.go similarity index 97% rename from rpc/core/tx.go rename to internal/rpc/core/tx.go index eb6c73858..75a537942 100644 --- a/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -5,12 +5,12 @@ import ( "fmt" "sort" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/errors.go b/internal/state/errors.go similarity index 100% rename from state/errors.go rename to internal/state/errors.go diff --git a/state/execution.go b/internal/state/execution.go similarity index 100% rename from state/execution.go rename to internal/state/execution.go diff --git a/state/execution_test.go b/internal/state/execution_test.go similarity index 98% rename from state/execution_test.go rename to internal/state/execution_test.go index e4d78e1b7..69085694b 100644 --- a/state/execution_test.go +++ b/internal/state/execution_test.go @@ -17,11 +17,11 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" mmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" diff --git a/state/export_test.go b/internal/state/export_test.go similarity index 100% rename from state/export_test.go rename to internal/state/export_test.go diff --git a/state/helpers_test.go b/internal/state/helpers_test.go similarity index 98% rename from state/helpers_test.go rename to internal/state/helpers_test.go index 3590e642d..f20475633 100644 --- a/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -13,13 +13,13 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go similarity index 99% rename from state/indexer/block/kv/kv.go rename to internal/state/indexer/block/kv/kv.go index bc90eadf5..d52f06c96 100644 --- a/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go similarity index 97% rename from state/indexer/block/kv/kv_test.go rename to internal/state/indexer/block/kv/kv_test.go index 2a342f870..2601c878b 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" ) diff --git a/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go similarity index 100% rename from state/indexer/block/kv/util.go rename to internal/state/indexer/block/kv/util.go diff --git a/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go similarity index 91% rename from state/indexer/block/null/null.go rename to internal/state/indexer/block/null/null.go index d36d8680e..9b28d93bb 100644 --- a/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/doc.go b/internal/state/indexer/doc.go similarity index 100% rename from state/indexer/doc.go rename to internal/state/indexer/doc.go diff --git a/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go similarity index 100% rename from state/indexer/eventsink.go rename to internal/state/indexer/eventsink.go diff --git a/state/indexer/indexer.go b/internal/state/indexer/indexer.go similarity index 100% rename from state/indexer/indexer.go rename to internal/state/indexer/indexer.go diff --git a/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go similarity index 100% rename from state/indexer/indexer_service.go rename to internal/state/indexer/indexer_service.go diff --git a/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go similarity index 95% rename from state/indexer/indexer_service_test.go rename to internal/state/indexer/indexer_service_test.go index 4d12cc86f..e630b07e9 100644 --- a/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -16,10 +16,10 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" + kv "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" - indexer "github.com/tendermint/tendermint/state/indexer" - kv "github.com/tendermint/tendermint/state/indexer/sink/kv" - psql "github.com/tendermint/tendermint/state/indexer/sink/psql" "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" ) diff --git a/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go similarity index 98% rename from state/indexer/mocks/event_sink.go rename to internal/state/indexer/mocks/event_sink.go index ce5b8ace5..98b32e935 100644 --- a/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/state/indexer/query_range.go b/internal/state/indexer/query_range.go similarity index 100% rename from state/indexer/query_range.go rename to internal/state/indexer/query_range.go diff --git a/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go similarity index 87% rename from state/indexer/sink/kv/kv.go rename to internal/state/indexer/sink/kv/kv.go index 7d51640d8..eda8da846 100644 --- a/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -4,10 +4,10 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" + kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - kvb "github.com/tendermint/tendermint/state/indexer/block/kv" - kvt "github.com/tendermint/tendermint/state/indexer/tx/kv" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" ) diff --git a/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go similarity index 98% rename from state/indexer/sink/kv/kv_test.go rename to internal/state/indexer/sink/kv/kv_test.go index a5d2dd81e..d59f4ea90 100644 --- a/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - kvtx "github.com/tendermint/tendermint/state/indexer/tx/kv" "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" ) diff --git a/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go similarity index 94% rename from state/indexer/sink/null/null.go rename to internal/state/indexer/sink/null/null.go index b5ad93ab4..f58142f21 100644 --- a/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/null/null_test.go b/internal/state/indexer/sink/null/null_test.go similarity index 93% rename from state/indexer/sink/null/null_test.go rename to internal/state/indexer/sink/null/null_test.go index eef63fd6e..15b77dc55 100644 --- a/state/indexer/sink/null/null_test.go +++ b/internal/state/indexer/sink/null/null_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go similarity index 99% rename from state/indexer/sink/psql/psql.go rename to internal/state/indexer/sink/psql/psql.go index e452ed406..4db6f4435 100644 --- a/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -11,8 +11,8 @@ import ( "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go similarity index 99% rename from state/indexer/sink/psql/psql_test.go rename to internal/state/indexer/sink/psql/psql_test.go index e8a1ce833..f19bbfba7 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" // Register the Postgres database driver. diff --git a/state/indexer/sink/psql/schema.sql b/internal/state/indexer/sink/psql/schema.sql similarity index 100% rename from state/indexer/sink/psql/schema.sql rename to internal/state/indexer/sink/psql/schema.sql diff --git a/state/indexer/sink/sink.go b/internal/state/indexer/sink/sink.go similarity index 85% rename from state/indexer/sink/sink.go rename to internal/state/indexer/sink/sink.go index f9dfa54df..b4c41ec31 100644 --- a/state/indexer/sink/sink.go +++ b/internal/state/indexer/sink/sink.go @@ -5,10 +5,10 @@ import ( "strings" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink/kv" - "github.com/tendermint/tendermint/state/indexer/sink/null" - "github.com/tendermint/tendermint/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/null" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" ) // EventSinksFromConfig constructs a slice of indexer.EventSink using the provided diff --git a/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go similarity index 99% rename from state/indexer/tx/kv/kv.go rename to internal/state/indexer/tx/kv/kv.go index 080dbce2c..f0550f8f3 100644 --- a/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - indexer "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go similarity index 100% rename from state/indexer/tx/kv/kv_bench_test.go rename to internal/state/indexer/tx/kv/kv_bench_test.go diff --git a/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go similarity index 99% rename from state/indexer/tx/kv/kv_test.go rename to internal/state/indexer/tx/kv/kv_test.go index dd63dd9a4..064421f5f 100644 --- a/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -14,9 +14,9 @@ import ( db "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" - indexer "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/tx/kv/utils.go b/internal/state/indexer/tx/kv/utils.go similarity index 100% rename from state/indexer/tx/kv/utils.go rename to internal/state/indexer/tx/kv/utils.go diff --git a/state/indexer/tx/kv/utils_test.go b/internal/state/indexer/tx/kv/utils_test.go similarity index 100% rename from state/indexer/tx/kv/utils_test.go rename to internal/state/indexer/tx/kv/utils_test.go diff --git a/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go similarity index 93% rename from state/indexer/tx/null/null.go rename to internal/state/indexer/tx/null/null.go index d92ed489e..0da7fc683 100644 --- a/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -5,8 +5,8 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" ) var _ indexer.TxIndexer = (*TxIndex)(nil) diff --git a/state/metrics.go b/internal/state/metrics.go similarity index 100% rename from state/metrics.go rename to internal/state/metrics.go diff --git a/state/mocks/block_store.go b/internal/state/mocks/block_store.go similarity index 100% rename from state/mocks/block_store.go rename to internal/state/mocks/block_store.go diff --git a/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go similarity index 98% rename from state/mocks/event_sink.go rename to internal/state/mocks/event_sink.go index 749515ccf..b8a8fc464 100644 --- a/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go similarity index 96% rename from state/mocks/evidence_pool.go rename to internal/state/mocks/evidence_pool.go index bb33547d2..9f5e747e5 100644 --- a/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/state/mocks/store.go b/internal/state/mocks/store.go similarity index 98% rename from state/mocks/store.go rename to internal/state/mocks/store.go index 750bf7f29..af2408dd9 100644 --- a/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" diff --git a/state/services.go b/internal/state/services.go similarity index 100% rename from state/services.go rename to internal/state/services.go diff --git a/state/state.go b/internal/state/state.go similarity index 100% rename from state/state.go rename to internal/state/state.go diff --git a/state/state_test.go b/internal/state/state_test.go similarity index 99% rename from state/state_test.go rename to internal/state/state_test.go index 99d45bb62..102b206c2 100644 --- a/state/state_test.go +++ b/internal/state/state_test.go @@ -19,9 +19,9 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) diff --git a/state/store.go b/internal/state/store.go similarity index 100% rename from state/store.go rename to internal/state/store.go diff --git a/state/store_test.go b/internal/state/store_test.go similarity index 99% rename from state/store_test.go rename to internal/state/store_test.go index 5d32040b5..939878d40 100644 --- a/state/store_test.go +++ b/internal/state/store_test.go @@ -14,10 +14,10 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/state/test/factory/block.go b/internal/state/test/factory/block.go similarity index 96% rename from state/test/factory/block.go rename to internal/state/test/factory/block.go index b4eb83fa7..dfcf5ebd9 100644 --- a/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -3,8 +3,8 @@ package factory import ( "time" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/state/time.go b/internal/state/time.go similarity index 100% rename from state/time.go rename to internal/state/time.go diff --git a/state/time_test.go b/internal/state/time_test.go similarity index 100% rename from state/time_test.go rename to internal/state/time_test.go diff --git a/state/tx_filter.go b/internal/state/tx_filter.go similarity index 100% rename from state/tx_filter.go rename to internal/state/tx_filter.go diff --git a/state/tx_filter_test.go b/internal/state/tx_filter_test.go similarity index 94% rename from state/tx_filter_test.go rename to internal/state/tx_filter_test.go index d6236fcbf..27af28a40 100644 --- a/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + sm "github.com/tendermint/tendermint/internal/state" tmrand "github.com/tendermint/tendermint/libs/rand" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/state/validation.go b/internal/state/validation.go similarity index 100% rename from state/validation.go rename to internal/state/validation.go diff --git a/state/validation_test.go b/internal/state/validation_test.go similarity index 98% rename from state/validation_test.go rename to internal/state/validation_test.go index 151f2be61..328c67c08 100644 --- a/state/validation_test.go +++ b/internal/state/validation_test.go @@ -13,13 +13,13 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" memmock "github.com/tendermint/tendermint/internal/mempool/mock" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 538c619fc..b8d681631 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index c2f598a8c..196ac1a9d 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -15,12 +15,12 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 6ab41b36c..a0312428c 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -18,13 +18,13 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - smmocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index b58cb35de..9eb776e28 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -12,6 +12,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" lightprovider "github.com/tendermint/tendermint/light/provider" @@ -20,7 +21,6 @@ import ( lightdb "github.com/tendermint/tendermint/light/store/db" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 05cfffe64..43fbf99e4 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -12,10 +12,10 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 867c99b0f..14297c049 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -15,9 +15,9 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/proxy" proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync/mocks" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/light/provider/http/http.go b/light/provider/http/http.go index ceea0f6d2..810e43ece 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/light/provider" rpcclient "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 62d70f545..2fdc177dc 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -4,7 +4,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" lrpc "github.com/tendermint/tendermint/light/rpc" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" diff --git a/light/rpc/client.go b/light/rpc/client.go index 84761fb04..16d5a5427 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -16,7 +16,7 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/node/node.go b/node/node.go index fa75a432d..c2bf88fde 100644 --- a/node/node.go +++ b/node/node.go @@ -23,6 +23,8 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/proxy" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -32,10 +34,8 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" - rpccore "github.com/tendermint/tendermint/rpc/core" grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/node/node_test.go b/node/node_test.go index 7512c732f..aae7f8008 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -26,13 +26,13 @@ import ( "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/node/setup.go b/node/setup.go index a966da85c..6d403b0b0 100644 --- a/node/setup.go +++ b/node/setup.go @@ -26,14 +26,14 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada..ea827cc7a 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/state/types.proto +// source: tendermint/internal/state/types.proto package state @@ -419,7 +419,7 @@ func init() { proto.RegisterType((*State)(nil), "tendermint.state.State") } -func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } +func init() { proto.RegisterFile("tendermint/internal/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ // 763 bytes of a gzipped FileDescriptorProto diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 5541c2803..c0fa0f3a7 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/statesync/types.proto +// source: tendermint/internal/statesync/types.proto package statesync @@ -631,7 +631,7 @@ func init() { proto.RegisterType((*ParamsResponse)(nil), "tendermint.statesync.ParamsResponse") } -func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } +func init() { proto.RegisterFile("tendermint/internal/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } var fileDescriptor_a1c2869546ca7914 = []byte{ // 589 bytes of a gzipped FileDescriptorProto diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 4acd0fee9..e5d2c35f3 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -13,7 +13,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 8c4c4f277..e241404f3 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctest "github.com/tendermint/tendermint/rpc/test" ) diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 3b78dfe5f..6d2c2883d 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" ) func TestWaitForHeight(t *testing.T) { diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 26a0ea5de..b66d9634f 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index afdaec861..2e08257d5 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -12,7 +12,7 @@ import ( tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 8244e9295..f63161118 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -25,7 +25,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/service" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 39c4295ac..2080e0d44 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -6,13 +6,13 @@ import ( "fmt" "time" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" - rpccore "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 960ee6501..0186ee9cc 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index d164b275a..206b8fac2 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 8ff474dd5..70003569d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -18,11 +18,11 @@ import ( "context" "reflect" + "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 6dd6a8d44..10da67008 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -4,7 +4,7 @@ import ( "context" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" ) // StatusMock returns the result specified by the Call diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 4f2f59f4c..37dce6095 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" ) func TestStatus(t *testing.T) { diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 8e4c7cbf5..3036c48f0 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -8,7 +8,7 @@ import ( context "context" - coretypes "github.com/tendermint/tendermint/rpc/core/types" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" log "github.com/tendermint/tendermint/libs/log" diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index f8962fb35..5ad9e4244 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -24,7 +24,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/types/responses.go b/rpc/coretypes/responses.go similarity index 100% rename from rpc/core/types/responses.go rename to rpc/coretypes/responses.go diff --git a/rpc/core/types/responses_test.go b/rpc/coretypes/responses_test.go similarity index 100% rename from rpc/core/types/responses_test.go rename to rpc/coretypes/responses_test.go diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 27f8c97e4..4db46bbe3 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -4,7 +4,7 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" - core "github.com/tendermint/tendermint/rpc/core" + core "github.com/tendermint/tendermint/internal/rpc/core" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 2fb0abb67..e5164dc64 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -6,8 +6,8 @@ import ( "google.golang.org/grpc" + "github.com/tendermint/tendermint/internal/rpc/core" tmnet "github.com/tendermint/tendermint/libs/net" - "github.com/tendermint/tendermint/rpc/core" ) // Config is an gRPC server configuration. diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index bbb32b407..3e31d3ec1 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -12,7 +12,7 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index f5a2ecebc..a07401578 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -11,7 +11,7 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index a7b77dbd3..c6a8a99b8 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 3a9a11108..91cff8386 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -13,7 +13,7 @@ import ( tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" nm "github.com/tendermint/tendermint/node" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + ctypes "github.com/tendermint/tendermint/rpc/coretypes" core_grpc "github.com/tendermint/tendermint/rpc/grpc" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) diff --git a/store/store_test.go b/store/store_test.go index 2132d9aff..e00715eb7 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -14,11 +14,11 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index 900f75d73..ccb3f6c51 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index ca6b743eb..5cfa1d64d 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -7,7 +7,7 @@ import ( "time" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index acc3ac78a..a645fd7c1 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 4d333949a..96eaaaff0 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -12,8 +12,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" From d04b6c2a5e17c49094749eb0cc474fbe7163e6ec Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 22 Sep 2021 13:13:31 -0400 Subject: [PATCH 014/174] e2e: run multiple should use preserve (#6972) --- test/e2e/run-multiple.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/run-multiple.sh b/test/e2e/run-multiple.sh index 571a78a7f..6f4170bb0 100755 --- a/test/e2e/run-multiple.sh +++ b/test/e2e/run-multiple.sh @@ -21,7 +21,7 @@ for MANIFEST in "$@"; do START=$SECONDS echo "==> Running testnet: $MANIFEST" - if ! ./build/runner -f "$MANIFEST"; then + if ! ./build/runner -p -f "$MANIFEST"; then echo "==> Testnet $MANIFEST failed, dumping manifest..." cat "$MANIFEST" From 1995ef2572bbefaa3bede1cb49bdba4e53157c05 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 22 Sep 2021 14:26:35 -0700 Subject: [PATCH 015/174] rpc: Strip down the base RPC client interface. (#6971) * rpc: Strip down the base RPC client interface. Prior to this change, the RPC client interface requires implementing the entire Service interface, but most of the methods of Service are not needed by the concrete clients. Dissociate the Client interface from the Service interface. - Extract only those methods of Service that are necessary to make the existing clients work. - Update the clients to combine Start/Onstart and Stop/OnStop. This does not change what the clients do to start or stop. Only the websocket clients make use of this functionality anyway. The websocket implementation uses some plumbing from the BaseService helper. We should be able to excising that entirely, but the current interface dependencies among the clients would require a much larger change, and one that leaks into other (non-RPC) packages. As a less-invasive intermediate step, preserve the existing client behaviour (and tests) by extracting the necessary subset of the BaseService functionality to an analogous RunState helper for clients. I plan to obsolete that type in a future PR, but for now this makes a useful waypoint. Related: - Clean up client implementations. - Update mocks. --- rpc/client/helpers.go | 85 ++++++++++++++++++++++++++++++ rpc/client/http/http.go | 6 --- rpc/client/http/ws.go | 30 +++++------ rpc/client/interface.go | 17 ++++-- rpc/client/mock/client.go | 11 +--- rpc/client/mocks/client.go | 89 -------------------------------- rpc/jsonrpc/client/ws_client.go | 20 +++---- rpc/jsonrpc/server/ws_handler.go | 29 ++++++----- 8 files changed, 138 insertions(+), 149 deletions(-) diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 49598e814..289d947a9 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "sync" "time" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -82,3 +84,86 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( return nil, errors.New("timed out waiting for event") } } + +var ( + // ErrClientRunning is returned by Start when the client is already running. + ErrClientRunning = errors.New("client already running") + + // ErrClientNotRunning is returned by Stop when the client is not running. + ErrClientNotRunning = errors.New("client is not running") +) + +// RunState is a helper that a client implementation can embed to implement +// common plumbing for keeping track of run state and logging. +// +// TODO(creachadair): This type is a temporary measure, and will be removed. +// See the discussion on #6971. +type RunState struct { + Logger log.Logger + + mu sync.Mutex + name string + isRunning bool + quit chan struct{} +} + +// NewRunState returns a new unstarted run state tracker with the given logging +// label and log sink. If logger == nil, a no-op logger is provided by default. +func NewRunState(name string, logger log.Logger) *RunState { + if logger == nil { + logger = log.NewNopLogger() + } + return &RunState{ + name: name, + Logger: logger, + } +} + +// Start sets the state to running, or reports an error. +func (r *RunState) Start() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.isRunning { + r.Logger.Error("not starting client, it is already started", "client", r.name) + return ErrClientRunning + } + r.Logger.Info("starting client", "client", r.name) + r.isRunning = true + r.quit = make(chan struct{}) + return nil +} + +// Stop sets the state to not running, or reports an error. +func (r *RunState) Stop() error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.isRunning { + r.Logger.Error("not stopping client; it is already stopped", "client", r.name) + return ErrClientNotRunning + } + r.Logger.Info("stopping client", "client", r.name) + r.isRunning = false + close(r.quit) + return nil +} + +// SetLogger updates the log sink. +func (r *RunState) SetLogger(logger log.Logger) { + r.mu.Lock() + defer r.mu.Unlock() + r.Logger = logger +} + +// IsRunning reports whether the state is running. +func (r *RunState) IsRunning() bool { + r.mu.Lock() + defer r.mu.Unlock() + return r.isRunning +} + +// Quit returns a channel that is closed when a call to Stop succeeds. +func (r *RunState) Quit() <-chan struct{} { + r.mu.Lock() + defer r.mu.Unlock() + return r.quit +} diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index b66d9634f..c3a0f379b 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -6,7 +6,6 @@ import ( "time" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" @@ -158,11 +157,6 @@ func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*H var _ rpcclient.Client = (*HTTP)(nil) -// SetLogger sets a logger. -func (c *HTTP) SetLogger(l log.Logger) { - c.wsEvents.SetLogger(l) -} - // Remote returns the remote network address in a string form. func (c *HTTP) Remote() string { return c.remote diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 2e08257d5..1735ca9c3 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -10,14 +10,11 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmjson "github.com/tendermint/tendermint/libs/json" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) -var errNotRunning = errors.New("client is not running. Use .Start() method to start") - // WSOptions for the WS part of the HTTP client. type WSOptions struct { Path string // path (e.g. "/ws") @@ -48,7 +45,7 @@ func (wso WSOptions) Validate() error { // wsEvents is a wrapper around WSClient, which implements EventsClient. type wsEvents struct { - service.BaseService + *rpcclient.RunState ws *jsonrpcclient.WSClient mtx tmsync.RWMutex @@ -78,7 +75,7 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { w := &wsEvents{ subscriptions: make(map[string]*wsSubscription), } - w.BaseService = *service.NewBaseService(nil, "wsEvents", w) + w.RunState = rpcclient.NewRunState("wsEvents", nil) var err error w.ws, err = jsonrpcclient.NewWSWithOptions(remote, wso.Path, wso.WSOptions) @@ -94,23 +91,20 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { return w, nil } -// OnStart implements service.Service by starting WSClient and event loop. -func (w *wsEvents) OnStart() error { +// Start starts the websocket client and the event loop. +func (w *wsEvents) Start() error { if err := w.ws.Start(); err != nil { return err } - go w.eventListener() - return nil } -// OnStop implements service.Service by stopping WSClient. -func (w *wsEvents) OnStop() { - if err := w.ws.Stop(); err != nil { - w.Logger.Error("Can't stop ws client", "err", err) - } -} +// IsRunning reports whether the websocket client is running. +func (w *wsEvents) IsRunning() bool { return w.ws.IsRunning() } + +// Stop shuts down the websocket client. +func (w *wsEvents) Stop() error { return w.ws.Stop() } // Subscribe implements EventsClient by using WSClient to subscribe given // subscriber to query. By default, it returns a channel with cap=1. Error is @@ -128,7 +122,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { if !w.IsRunning() { - return nil, errNotRunning + return nil, rpcclient.ErrClientNotRunning } if err := w.ws.Subscribe(ctx, query); err != nil { @@ -156,7 +150,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, // It returns an error if wsEvents is not running. func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { if !w.IsRunning() { - return errNotRunning + return rpcclient.ErrClientNotRunning } if err := w.ws.Unsubscribe(ctx, query); err != nil { @@ -182,7 +176,7 @@ func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // It returns an error if wsEvents is not running. func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { if !w.IsRunning() { - return errNotRunning + return rpcclient.ErrClientNotRunning } if err := w.ws.UnsubscribeAll(ctx); err != nil { diff --git a/rpc/client/interface.go b/rpc/client/interface.go index f63161118..ae54928df 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -24,17 +24,26 @@ import ( "context" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/service" ctypes "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) //go:generate ../../scripts/mockery_generate.sh Client -// Client wraps most important rpc calls a client would make if you want to -// listen for events, test if it also implements events.EventSwitch. +// Client describes the interface of Tendermint RPC client implementations. type Client interface { - service.Service + // These methods define the operational structure of the client. + + // Start the client. Start must report an error if the client is running. + Start() error + + // Stop the client. Stop must report an error if the client is not running. + Stop() error + + // IsRunning reports whether the client is running. + IsRunning() bool + + // These embedded interfaces define the callable methods of the service. ABCIClient EventsClient HistoryClient diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 70003569d..b0f27ae88 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -20,7 +20,6 @@ import ( "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" @@ -29,15 +28,7 @@ import ( // Client wraps arbitrary implementations of the various interfaces. type Client struct { - client.ABCIClient - client.SignClient - client.HistoryClient - client.StatusClient - client.EventsClient - client.EvidenceClient - client.MempoolClient - service.Service - + client.Client env *core.Environment } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 3036c48f0..898f67aa6 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -10,8 +10,6 @@ import ( coretypes "github.com/tendermint/tendermint/rpc/coretypes" - log "github.com/tendermint/tendermint/libs/log" - mock "github.com/stretchr/testify/mock" types "github.com/tendermint/tendermint/types" @@ -542,74 +540,6 @@ func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUncon return r0, r1 } -// OnReset provides a mock function with given fields: -func (_m *Client) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Client) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Client) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Client) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Client) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - // Start provides a mock function with given fields: func (_m *Client) Start() error { ret := _m.Called() @@ -661,20 +591,6 @@ func (_m *Client) Stop() error { return r0 } -// String provides a mock function with given fields: -func (_m *Client) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) @@ -824,8 +740,3 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } - -// Wait provides a mock function with given fields: -func (_m *Client) Wait() { - _m.Called() -} diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index f47186429..8530f32d9 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -14,7 +14,7 @@ import ( metrics "github.com/rcrowley/go-metrics" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" + tmclient "github.com/tendermint/tendermint/rpc/client" types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -41,6 +41,7 @@ func DefaultWSOptions() WSOptions { // // WSClient is safe for concurrent use by multiple goroutines. type WSClient struct { // nolint: maligned + *tmclient.RunState conn *websocket.Conn Address string // IP:PORT or /path/to/socket @@ -83,8 +84,6 @@ type WSClient struct { // nolint: maligned // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. pingPeriod time.Duration - service.BaseService - // Time between sending a ping and receiving a pong. See // https://godoc.org/github.com/rcrowley/go-metrics#Timer. PingPongLatencyTimer metrics.Timer @@ -114,6 +113,7 @@ func NewWSWithOptions(remoteAddr, endpoint string, opts WSOptions) (*WSClient, e } c := &WSClient{ + RunState: tmclient.NewRunState("WSClient", nil), Address: parsedURL.GetTrimmedHostWithPath(), Dialer: dialFn, Endpoint: endpoint, @@ -127,7 +127,6 @@ func NewWSWithOptions(remoteAddr, endpoint string, opts WSOptions) (*WSClient, e // sentIDs: make(map[types.JSONRPCIntID]bool), } - c.BaseService = *service.NewBaseService(nil, "WSClient", c) return c, nil } @@ -143,9 +142,11 @@ func (c *WSClient) String() string { return fmt.Sprintf("WSClient{%s (%s)}", c.Address, c.Endpoint) } -// OnStart implements service.Service by dialing a server and creating read and -// write routines. -func (c *WSClient) OnStart() error { +// Start dials the specified service address and starts the I/O routines. +func (c *WSClient) Start() error { + if err := c.RunState.Start(); err != nil { + return err + } err := c.dial() if err != nil { return err @@ -167,10 +168,9 @@ func (c *WSClient) OnStart() error { return nil } -// Stop overrides service.Service#Stop. There is no other way to wait until Quit -// channel is closed. +// Stop shuts down the client. func (c *WSClient) Stop() error { - if err := c.BaseService.Stop(); err != nil { + if err := c.RunState.Stop(); err != nil { return err } // only close user-facing channels when we can't write to them diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index c6a8a99b8..63731799b 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -13,7 +13,7 @@ import ( "github.com/gorilla/websocket" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/coretypes" types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -86,8 +86,8 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ }() // register connection - con := newWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) - con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) + logger := wm.logger.With("remote", wsConn.RemoteAddr()) + con := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) wm.logger.Info("New websocket connection", "remote", con.remoteAddr) err = con.Start() // BLOCKING if err != nil { @@ -106,7 +106,7 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // // In case of an error, the connection is stopped. type wsConnection struct { - service.BaseService + *client.RunState remoteAddr string baseConn *websocket.Conn @@ -150,9 +150,11 @@ type wsConnection struct { func newWSConnection( baseConn *websocket.Conn, funcMap map[string]*RPCFunc, + logger log.Logger, options ...func(*wsConnection), ) *wsConnection { wsc := &wsConnection{ + RunState: client.NewRunState("wsConnection", logger), remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, funcMap: funcMap, @@ -166,7 +168,6 @@ func newWSConnection( option(wsc) } wsc.baseConn.SetReadLimit(wsc.readLimit) - wsc.BaseService = *service.NewBaseService(nil, "wsConnection", wsc) return wsc } @@ -218,9 +219,11 @@ func ReadLimit(readLimit int64) func(*wsConnection) { } } -// OnStart implements service.Service by starting the read and write routines. It -// blocks until there's some error. -func (wsc *wsConnection) OnStart() error { +// Start starts the client service routines and blocks until there is an error. +func (wsc *wsConnection) Start() error { + if err := wsc.RunState.Start(); err != nil { + return err + } wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) // Read subscriptions/unsubscriptions to events @@ -231,16 +234,18 @@ func (wsc *wsConnection) OnStart() error { return nil } -// OnStop implements service.Service by unsubscribing remoteAddr from all -// subscriptions. -func (wsc *wsConnection) OnStop() { +// Stop unsubscribes the remote from all subscriptions. +func (wsc *wsConnection) Stop() error { + if err := wsc.RunState.Stop(); err != nil { + return err + } if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) } - if wsc.ctx != nil { wsc.cancel() } + return nil } // GetRemoteAddr returns the remote address of the underlying connection. From 7e4cc595d399fa2724cd75fcdb0c004570db27be Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 22 Sep 2021 17:45:11 -0700 Subject: [PATCH 016/174] Remove the unused rpc/client/mocks package. (#6974) This package is not used in the tendermint repository since 31e7cdee. Note that this is not the same package as rpc/client/mock (N.B. singular) which is still used in some tests. A search of GitHub turns up only 11 uses, all of which are in clones of the tendermint repo at old commits.. --- rpc/client/mocks/client.go | 742 ------------------------------------- 1 file changed, 742 deletions(-) delete mode 100644 rpc/client/mocks/client.go diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go deleted file mode 100644 index 898f67aa6..000000000 --- a/rpc/client/mocks/client.go +++ /dev/null @@ -1,742 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - bytes "github.com/tendermint/tendermint/libs/bytes" - client "github.com/tendermint/tendermint/rpc/client" - - context "context" - - coretypes "github.com/tendermint/tendermint/rpc/coretypes" - - mock "github.com/stretchr/testify/mock" - - types "github.com/tendermint/tendermint/types" -) - -// Client is an autogenerated mock type for the Client type -type Client struct { - mock.Mock -} - -// ABCIInfo provides a mock function with given fields: _a0 -func (_m *Client) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultABCIInfo - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultABCIInfo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultABCIInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ABCIQuery provides a mock function with given fields: ctx, path, data -func (_m *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { - ret := _m.Called(ctx, path, data) - - var r0 *coretypes.ResultABCIQuery - if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) *coretypes.ResultABCIQuery); ok { - r0 = rf(ctx, path, data) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultABCIQuery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes) error); ok { - r1 = rf(ctx, path, data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ABCIQueryWithOptions provides a mock function with given fields: ctx, path, data, opts -func (_m *Client) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - ret := _m.Called(ctx, path, data, opts) - - var r0 *coretypes.ResultABCIQuery - if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) *coretypes.ResultABCIQuery); ok { - r0 = rf(ctx, path, data, opts) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultABCIQuery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) error); ok { - r1 = rf(ctx, path, data, opts) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Block provides a mock function with given fields: ctx, height -func (_m *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultBlock - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlock); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { - ret := _m.Called(ctx, hash) - - var r0 *coretypes.ResultBlock - if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultBlock); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockResults provides a mock function with given fields: ctx, height -func (_m *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultBlockResults - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlockResults); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlockResults) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockSearch provides a mock function with given fields: ctx, query, page, perPage, orderBy -func (_m *Client) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { - ret := _m.Called(ctx, query, page, perPage, orderBy) - - var r0 *coretypes.ResultBlockSearch - if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) *coretypes.ResultBlockSearch); ok { - r0 = rf(ctx, query, page, perPage, orderBy) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlockSearch) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, *int, *int, string) error); ok { - r1 = rf(ctx, query, page, perPage, orderBy) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockchainInfo provides a mock function with given fields: ctx, minHeight, maxHeight -func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - ret := _m.Called(ctx, minHeight, maxHeight) - - var r0 *coretypes.ResultBlockchainInfo - if rf, ok := ret.Get(0).(func(context.Context, int64, int64) *coretypes.ResultBlockchainInfo); ok { - r0 = rf(ctx, minHeight, maxHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlockchainInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, int64, int64) error); ok { - r1 = rf(ctx, minHeight, maxHeight) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastEvidence provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastEvidence - if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) *coretypes.ResultBroadcastEvidence); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastEvidence) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Evidence) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastTxCommit provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastTxCommit - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTxCommit); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastTxCommit) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CheckTx provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultCheckTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultCheckTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultCheckTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Commit provides a mock function with given fields: ctx, height -func (_m *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultCommit - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultCommit); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultCommit) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ConsensusParams provides a mock function with given fields: ctx, height -func (_m *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultConsensusParams - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultConsensusParams); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultConsensusParams) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ConsensusState provides a mock function with given fields: _a0 -func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultConsensusState - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultConsensusState); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultConsensusState) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DumpConsensusState provides a mock function with given fields: _a0 -func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultDumpConsensusState - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultDumpConsensusState); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultDumpConsensusState) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Genesis provides a mock function with given fields: _a0 -func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultGenesis - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultGenesis); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultGenesis) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GenesisChunked provides a mock function with given fields: _a0, _a1 -func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultGenesisChunk - if rf, ok := ret.Get(0).(func(context.Context, uint) *coretypes.ResultGenesisChunk); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultGenesisChunk) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Health provides a mock function with given fields: _a0 -func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultHealth - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultHealth); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultHealth) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IsRunning provides a mock function with given fields: -func (_m *Client) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// NetInfo provides a mock function with given fields: _a0 -func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultNetInfo - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultNetInfo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultNetInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NumUnconfirmedTxs provides a mock function with given fields: _a0 -func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Status provides a mock function with given fields: _a0 -func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultStatus - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultStatus); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultStatus) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity -func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { - _va := make([]interface{}, len(outCapacity)) - for _i := range outCapacity { - _va[_i] = outCapacity[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, subscriber, query) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 <-chan coretypes.ResultEvent - if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) <-chan coretypes.ResultEvent); ok { - r0 = rf(ctx, subscriber, query, outCapacity...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan coretypes.ResultEvent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, string, ...int) error); ok { - r1 = rf(ctx, subscriber, query, outCapacity...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Tx provides a mock function with given fields: ctx, hash, prove -func (_m *Client) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { - ret := _m.Called(ctx, hash, prove) - - var r0 *coretypes.ResultTx - if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes, bool) *coretypes.ResultTx); ok { - r0 = rf(ctx, hash, prove) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes, bool) error); ok { - r1 = rf(ctx, hash, prove) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TxSearch provides a mock function with given fields: ctx, query, prove, page, perPage, orderBy -func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { - ret := _m.Called(ctx, query, prove, page, perPage, orderBy) - - var r0 *coretypes.ResultTxSearch - if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) *coretypes.ResultTxSearch); ok { - r0 = rf(ctx, query, prove, page, perPage, orderBy) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultTxSearch) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, bool, *int, *int, string) error); ok { - r1 = rf(ctx, query, prove, page, perPage, orderBy) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UnconfirmedTxs provides a mock function with given fields: ctx, limit -func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(ctx, limit) - - var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(ctx, limit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { - r1 = rf(ctx, limit) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Unsubscribe provides a mock function with given fields: ctx, subscriber, query -func (_m *Client) Unsubscribe(ctx context.Context, subscriber string, query string) error { - ret := _m.Called(ctx, subscriber, query) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { - r0 = rf(ctx, subscriber, query) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UnsubscribeAll provides a mock function with given fields: ctx, subscriber -func (_m *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { - ret := _m.Called(ctx, subscriber) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(ctx, subscriber) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Validators provides a mock function with given fields: ctx, height, page, perPage -func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perPage *int) (*coretypes.ResultValidators, error) { - ret := _m.Called(ctx, height, page, perPage) - - var r0 *coretypes.ResultValidators - if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int) *coretypes.ResultValidators); ok { - r0 = rf(ctx, height, page, perPage) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultValidators) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64, *int, *int) error); ok { - r1 = rf(ctx, height, page, perPage) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} From 41ac5b90c58bcac266a9fca2d8775de04004c4e2 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 22 Sep 2021 18:02:24 -0700 Subject: [PATCH 017/174] Fix script paths in go:generate directives. (#6973) We moved some files further down in the directory structure in #6964, which caused the relative paths to the mockery wrapper to stop working. There does not seem to be an obvious way to get the module root as a default environment variable, so for now I just added the extra up-slashes. --- internal/proxy/app_conn.go | 2 +- internal/state/indexer/eventsink.go | 2 +- internal/state/mocks/evidence_pool.go | 1 - internal/state/mocks/store.go | 1 - internal/state/services.go | 4 ++-- internal/state/store.go | 2 +- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go index 39860e2a2..ca2c7c109 100644 --- a/internal/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/abci/types" ) -//go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot +//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level diff --git a/internal/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go index d7c9d10e0..dba50b6af 100644 --- a/internal/state/indexer/eventsink.go +++ b/internal/state/indexer/eventsink.go @@ -16,7 +16,7 @@ const ( PSQL EventSinkType = "psql" ) -//go:generate ../../scripts/mockery_generate.sh EventSink +//go:generate ../../../scripts/mockery_generate.sh EventSink // EventSink interface is defined the APIs for the IndexerService to interact with the data store, // including the block/transaction indexing and the search functions. diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 9f5e747e5..8bf4a9b64 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -5,7 +5,6 @@ package mocks import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" - types "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index af2408dd9..4452f9bec 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -5,7 +5,6 @@ package mocks import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" - tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" types "github.com/tendermint/tendermint/types" diff --git a/internal/state/services.go b/internal/state/services.go index c692d0b94..49388cc12 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -9,7 +9,7 @@ import ( // NOTE: Interfaces used by RPC must be thread safe! //------------------------------------------------------ -//go:generate ../scripts/mockery_generate.sh BlockStore +//go:generate ../../scripts/mockery_generate.sh BlockStore //------------------------------------------------------ // blockstore @@ -38,7 +38,7 @@ type BlockStore interface { //----------------------------------------------------------------------------- // evidence pool -//go:generate ../scripts/mockery_generate.sh EvidencePool +//go:generate ../../scripts/mockery_generate.sh EvidencePool // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { diff --git a/internal/state/store.go b/internal/state/store.go index 5ce11e47d..787b5c8ad 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -68,7 +68,7 @@ func init() { //---------------------- -//go:generate ../scripts/mockery_generate.sh Store +//go:generate ../../scripts/mockery_generate.sh Store // Store defines the state store interface // From c9beef796de6cd7b78252a9098aacb29efbda748 Mon Sep 17 00:00:00 2001 From: Marko Date: Thu, 23 Sep 2021 12:37:11 +0000 Subject: [PATCH 018/174] proto: regenerate code (#6977) ## Description Replace all seemed to have been used causing proto files to be changed without being regenerated --- proto/tendermint/state/types.pb.go | 4 ++-- proto/tendermint/statesync/types.pb.go | 4 ++-- rpc/grpc/types.pb.go | 7 +++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index ea827cc7a..85f38cada 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/internal/state/types.proto +// source: tendermint/state/types.proto package state @@ -419,7 +419,7 @@ func init() { proto.RegisterType((*State)(nil), "tendermint.state.State") } -func init() { proto.RegisterFile("tendermint/internal/state/types.proto", fileDescriptor_ccfacf933f22bf93) } +func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ // 763 bytes of a gzipped FileDescriptorProto diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index c0fa0f3a7..5541c2803 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/internal/statesync/types.proto +// source: tendermint/statesync/types.proto package statesync @@ -631,7 +631,7 @@ func init() { proto.RegisterType((*ParamsResponse)(nil), "tendermint.statesync.ParamsResponse") } -func init() { proto.RegisterFile("tendermint/internal/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } +func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } var fileDescriptor_a1c2869546ca7914 = []byte{ // 589 bytes of a gzipped FileDescriptorProto diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index f2b92c878..b9cbee03f 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -6,15 +6,14 @@ package coregrpc import ( context "context" fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - proto "github.com/gogo/protobuf/proto" types "github.com/tendermint/tendermint/abci/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. From cf7537ea5f8306c60df4e5dab39e0686d390117a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 23 Sep 2021 07:52:07 -0700 Subject: [PATCH 019/174] cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name. --- abci/example/kvstore/persistent_kvstore.go | 14 +- abci/types/pubkey.go | 8 +- cmd/tendermint/commands/debug/dump.go | 8 +- cmd/tendermint/commands/debug/kill.go | 6 +- cmd/tendermint/commands/debug/util.go | 4 +- cmd/tendermint/commands/light.go | 1 - cmd/tendermint/commands/reindex_event.go | 19 +- cmd/tendermint/main.go | 8 +- config/db.go | 11 +- crypto/encoding/codec.go | 32 +-- crypto/secp256k1/secp256k1_test.go | 3 +- crypto/xchacha20poly1305/xchachapoly_test.go | 22 +- internal/blocksync/v0/reactor.go | 16 +- internal/blocksync/v0/reactor_test.go | 34 +-- internal/blocksync/v2/processor.go | 10 +- internal/blocksync/v2/processor_context.go | 6 +- internal/blocksync/v2/processor_test.go | 10 +- internal/blocksync/v2/reactor.go | 12 +- internal/blocksync/v2/reactor_test.go | 18 +- internal/consensus/byzantine_test.go | 3 +- internal/consensus/common_test.go | 72 +++--- internal/consensus/mempool_test.go | 11 +- internal/consensus/reactor_test.go | 44 ++-- internal/consensus/replay_file.go | 18 +- internal/consensus/replay_stubs.go | 10 +- internal/consensus/replay_test.go | 116 ++++----- internal/consensus/state.go | 8 +- .../consensus/types/height_vote_set_test.go | 12 +- internal/consensus/wal_generator.go | 22 +- internal/evidence/verify_test.go | 1 - internal/mempool/mock/mempool.go | 10 +- internal/mempool/v0/clist_mempool.go | 12 +- internal/mempool/v0/clist_mempool_test.go | 24 +- internal/mempool/v0/reactor.go | 16 +- internal/mempool/v0/reactor_test.go | 34 +-- internal/mempool/v1/reactor.go | 16 +- .../p2p/conn/evil_secret_connection_test.go | 4 +- internal/p2p/conn/secret_connection.go | 6 +- internal/p2p/peermanager_scoring_test.go | 3 +- internal/p2p/pex/reactor_test.go | 70 +++--- internal/rpc/core/abci.go | 10 +- internal/rpc/core/blocks.go | 42 ++-- internal/rpc/core/blocks_test.go | 6 +- internal/rpc/core/consensus.go | 32 +-- internal/rpc/core/dev.go | 6 +- internal/rpc/core/env.go | 20 +- internal/rpc/core/events.go | 16 +- internal/rpc/core/evidence.go | 8 +- internal/rpc/core/health.go | 6 +- internal/rpc/core/mempool.go | 40 +-- internal/rpc/core/net.go | 46 ++-- internal/rpc/core/net_test.go | 6 +- internal/rpc/core/status.go | 12 +- internal/rpc/core/tx.go | 16 +- internal/state/execution.go | 12 +- internal/state/execution_test.go | 16 +- internal/state/helpers_test.go | 8 +- internal/state/indexer/block/kv/kv_test.go | 5 +- .../state/indexer/indexer_service_test.go | 8 +- internal/state/indexer/sink/kv/kv.go | 3 +- internal/state/indexer/sink/kv/kv_test.go | 17 +- internal/state/indexer/tx/kv/kv_test.go | 17 +- internal/state/state_test.go | 51 ++-- internal/state/store_test.go | 13 +- internal/state/tx_filter.go | 10 +- internal/state/validation_test.go | 20 +- internal/statesync/dispatcher.go | 4 +- internal/test/factory/genesis.go | 6 +- light/client_test.go | 1 - light/provider/http/http.go | 6 +- light/proxy/routes.go | 102 ++++---- light/rpc/client.go | 88 +++---- light/store/db/db_test.go | 1 - node/node.go | 238 +++++++++--------- node/node_test.go | 151 ++++++----- node/setup.go | 217 ++++++++-------- privval/grpc/client.go | 4 +- privval/grpc/server.go | 4 +- privval/grpc/util.go | 12 +- privval/msgs_test.go | 4 +- privval/secret_connection.go | 6 +- privval/signer_client.go | 4 +- privval/signer_requestHandler.go | 4 +- proto/tendermint/blocksync/message.go | 4 +- proto/tendermint/blocksync/message_test.go | 2 +- proto/tendermint/consensus/message.go | 4 +- proto/tendermint/mempool/message.go | 4 +- proto/tendermint/p2p/pex.go | 4 +- proto/tendermint/statesync/message.go | 4 +- proto/tendermint/statesync/message_test.go | 2 +- rpc/client/event_test.go | 4 +- rpc/client/evidence_test.go | 4 +- rpc/client/examples_test.go | 4 +- rpc/client/helpers_test.go | 10 +- rpc/client/http/http.go | 108 ++++---- rpc/client/http/ws.go | 14 +- rpc/client/interface.go | 58 ++--- rpc/client/local/local.go | 98 ++++---- rpc/client/mock/abci.go | 58 ++--- rpc/client/mock/abci_test.go | 6 +- rpc/client/mock/client.go | 46 ++-- rpc/client/mock/status.go | 8 +- rpc/client/mock/status_test.go | 8 +- rpc/client/rpc_test.go | 52 ++-- rpc/grpc/api.go | 2 +- rpc/grpc/grpc_test.go | 4 +- rpc/jsonrpc/client/decode.go | 24 +- rpc/jsonrpc/client/http_json_client.go | 18 +- rpc/jsonrpc/client/http_uri_client.go | 4 +- rpc/jsonrpc/client/ws_client.go | 30 +-- rpc/jsonrpc/client/ws_client_test.go | 6 +- rpc/jsonrpc/jsonrpc_test.go | 16 +- rpc/jsonrpc/server/http_json_handler.go | 40 +-- rpc/jsonrpc/server/http_json_handler_test.go | 50 ++-- rpc/jsonrpc/server/http_server.go | 10 +- rpc/jsonrpc/server/http_server_test.go | 12 +- rpc/jsonrpc/server/http_uri_handler.go | 32 +-- rpc/jsonrpc/server/parse_test.go | 6 +- rpc/jsonrpc/server/ws_handler.go | 40 +-- rpc/jsonrpc/server/ws_handler_test.go | 10 +- rpc/test/helpers.go | 36 +-- scripts/json2wal/main.go | 6 +- scripts/wal2json/main.go | 6 +- store/store_test.go | 20 +- types/protobuf.go | 6 +- types/protobuf_test.go | 6 +- types/validator.go | 8 +- 127 files changed, 1473 insertions(+), 1473 deletions(-) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 0fcfcadf7..40451baa9 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -11,9 +11,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" - pc "github.com/tendermint/tendermint/proto/tendermint/crypto" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) const ( @@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct { // validator set ValUpdates []types.ValidatorUpdate - valAddrToPubKeyMap map[string]pc.PublicKey + valAddrToPubKeyMap map[string]cryptoproto.PublicKey logger log.Logger } @@ -46,7 +46,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication return &PersistentKVStoreApplication{ app: &Application{state: state}, - valAddrToPubKeyMap: make(map[string]pc.PublicKey), + valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey), logger: log.NewNopLogger(), } } @@ -194,8 +194,8 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida return } -func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte { - pk, err := cryptoenc.PubKeyFromProto(pubkey) +func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte { + pk, err := encoding.PubKeyFromProto(pubkey) if err != nil { panic(err) } @@ -243,7 +243,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // add, update, or remove a validator func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { - pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey) + pubkey, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { panic(fmt.Errorf("can't decode public key: %w", err)) } diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index a0f746992..c188fc8f5 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -4,7 +4,7 @@ import ( fmt "fmt" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" ) @@ -12,7 +12,7 @@ import ( func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { pke := ed25519.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } @@ -29,7 +29,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { return Ed25519ValidatorUpdate(pk, power) case secp256k1.KeyType: pke := secp256k1.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } @@ -39,7 +39,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { } case sr25519.KeyType: pke := sr25519.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index f99975a75..cb1cc942a 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -65,9 +65,9 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { } home := viper.GetString(cli.HomeFlag) - conf := cfg.DefaultConfig() + conf := config.DefaultConfig() conf = conf.SetRoot(home) - cfg.EnsureRoot(conf.RootDir) + config.EnsureRoot(conf.RootDir) dumpDebugData(outDir, conf, rpc) @@ -79,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { return nil } -func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { +func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index bef534152..3e749e513 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -50,9 +50,9 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { } home := viper.GetString(cli.HomeFlag) - conf := cfg.DefaultConfig() + conf := config.DefaultConfig() conf = conf.SetRoot(home) - cfg.EnsureRoot(conf.RootDir) + config.EnsureRoot(conf.RootDir) // Create a temporary directory which will contain all the state dumps and // relevant files and directories that will be compressed into a file. diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index 226bfadc7..fa356c488 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -9,7 +9,7 @@ import ( "path" "path/filepath" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -48,7 +48,7 @@ func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { // copyWAL copies the Tendermint node's WAL file. It returns an error if the // WAL file cannot be read or copied. -func copyWAL(conf *cfg.Config, dir string) error { +func copyWAL(conf *config.Config, dir string) error { walPath := conf.Consensus.WalFile() walFile := filepath.Base(walPath) diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index 5e7446e51..f4c9a21da 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -11,7 +11,6 @@ import ( "time" "github.com/spf13/cobra" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index b5e98a29a..ca176df11 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/spf13/cobra" - tmdb "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -129,17 +129,17 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { } func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { - dbType := tmdb.BackendType(cfg.DBBackend) + dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore - blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { return nil, nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get StateStore - stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { return nil, nil, err } @@ -221,14 +221,15 @@ func checkValidHeight(bs state.BlockStore) error { } if startHeight < base { - return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base) + return fmt.Errorf("%s (requested start height: %d, base height: %d)", + coretypes.ErrHeightNotAvailable, startHeight, base) } height := bs.Height() if startHeight > height { return fmt.Errorf( - "%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height) + "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height) } if endHeight == 0 || endHeight > height { @@ -238,13 +239,13 @@ func checkValidHeight(bs state.BlockStore) error { if endHeight < base { return fmt.Errorf( - "%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base) + "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base) } if endHeight < startHeight { return fmt.Errorf( "%s (requested the end height: %d is less than the start height: %d)", - ctypes.ErrInvalidRequest, startHeight, endHeight) + coretypes.ErrInvalidRequest, startHeight, endHeight) } return nil diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index c006c297d..9f908a1ed 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -6,9 +6,9 @@ import ( cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" "github.com/tendermint/tendermint/cmd/tendermint/commands/debug" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/node" ) func main() { @@ -42,12 +42,12 @@ func main() { // * Provide their own DB implementation // can copy this file and use something other than the // node.NewDefault function - nodeFunc := nm.NewDefault + nodeFunc := node.NewDefault // Create & start node rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) - cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) + cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir))) if err := cmd.Execute(); err != nil { panic(err) } diff --git a/config/db.go b/config/db.go index 3ae274a50..8f489a87a 100644 --- a/config/db.go +++ b/config/db.go @@ -1,9 +1,10 @@ package config import ( + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - db "github.com/tendermint/tm-db" ) // ServiceProvider takes a config and a logger and returns a ready to go Node. @@ -16,11 +17,11 @@ type DBContext struct { } // DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (db.DB, error) +type DBProvider func(*DBContext) (dbm.DB, error) // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the Config. -func DefaultDBProvider(ctx *DBContext) (db.DB, error) { - dbType := db.BackendType(ctx.Config.DBBackend) - return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) } diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go index 64e4e7c6f..37249bcb3 100644 --- a/crypto/encoding/codec.go +++ b/crypto/encoding/codec.go @@ -8,34 +8,34 @@ import ( "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" "github.com/tendermint/tendermint/libs/json" - pc "github.com/tendermint/tendermint/proto/tendermint/crypto" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) func init() { - json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey") - json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") - json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") + json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey") + json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") + json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") } // PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey -func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { - var kp pc.PublicKey +func PubKeyToProto(k crypto.PubKey) (cryptoproto.PublicKey, error) { + var kp cryptoproto.PublicKey switch k := k.(type) { case ed25519.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Ed25519{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Ed25519{ Ed25519: k, }, } case secp256k1.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Secp256K1{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Secp256K1{ Secp256K1: k, }, } case sr25519.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Sr25519{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Sr25519{ Sr25519: k, }, } @@ -46,9 +46,9 @@ func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { } // PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey -func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { +func PubKeyFromProto(k cryptoproto.PublicKey) (crypto.PubKey, error) { switch k := k.Sum.(type) { - case *pc.PublicKey_Ed25519: + case *cryptoproto.PublicKey_Ed25519: if len(k.Ed25519) != ed25519.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d", len(k.Ed25519), ed25519.PubKeySize) @@ -56,7 +56,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(ed25519.PubKey, ed25519.PubKeySize) copy(pk, k.Ed25519) return pk, nil - case *pc.PublicKey_Secp256K1: + case *cryptoproto.PublicKey_Secp256K1: if len(k.Secp256K1) != secp256k1.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d", len(k.Secp256K1), secp256k1.PubKeySize) @@ -64,7 +64,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(secp256k1.PubKey, secp256k1.PubKeySize) copy(pk, k.Secp256K1) return pk, nil - case *pc.PublicKey_Sr25519: + case *cryptoproto.PublicKey_Sr25519: if len(k.Sr25519) != sr25519.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d", len(k.Sr25519), sr25519.PubKeySize) diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index f8bf29971..7a1109293 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -5,14 +5,13 @@ import ( "math/big" "testing" + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcutil/base58" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/secp256k1" - - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" ) type keyData struct { diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go index b17b1c376..75953d72d 100644 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ b/crypto/xchacha20poly1305/xchachapoly_test.go @@ -2,8 +2,8 @@ package xchacha20poly1305 import ( "bytes" - cr "crypto/rand" - mr "math/rand" + crand "crypto/rand" + mrand "math/rand" "testing" ) @@ -19,23 +19,23 @@ func TestRandom(t *testing.T) { var nonce [24]byte var key [32]byte - al := mr.Intn(128) - pl := mr.Intn(16384) + al := mrand.Intn(128) + pl := mrand.Intn(16384) ad := make([]byte, al) plaintext := make([]byte, pl) - _, err := cr.Read(key[:]) + _, err := crand.Read(key[:]) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(nonce[:]) + _, err = crand.Read(nonce[:]) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(ad) + _, err = crand.Read(ad) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(plaintext) + _, err = crand.Read(plaintext) if err != nil { t.Errorf("error on read: %w", err) } @@ -59,7 +59,7 @@ func TestRandom(t *testing.T) { } if len(ad) > 0 { - alterAdIdx := mr.Intn(len(ad)) + alterAdIdx := mrand.Intn(len(ad)) ad[alterAdIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering additional data", i) @@ -67,14 +67,14 @@ func TestRandom(t *testing.T) { ad[alterAdIdx] ^= 0x80 } - alterNonceIdx := mr.Intn(aead.NonceSize()) + alterNonceIdx := mrand.Intn(aead.NonceSize()) nonce[alterNonceIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering nonce", i) } nonce[alterNonceIdx] ^= 0x80 - alterCtIdx := mr.Intn(len(ct)) + alterCtIdx := mrand.Intn(len(ct)) ct[alterCtIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering ciphertext", i) diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/v0/reactor.go index 645b39b4c..4c378d040 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -6,13 +6,13 @@ import ( "sync" "time" - bc "github.com/tendermint/tendermint/internal/blocksync" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/blocksync" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmSync "github.com/tendermint/tendermint/libs/sync" + tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" @@ -36,7 +36,7 @@ var ( Priority: 5, SendQueueCapacity: 1000, RecvBufferCapacity: 1024, - RecvMessageCapacity: bc.MaxMsgSize, + RecvMessageCapacity: blocksync.MaxMsgSize, MaxSendBytes: 100, }, }, @@ -85,7 +85,7 @@ type Reactor struct { store *store.BlockStore pool *BlockPool consReactor consensusReactor - blockSync *tmSync.AtomicBool + blockSync *tmsync.AtomicBool blockSyncCh *p2p.Channel // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope @@ -107,7 +107,7 @@ type Reactor struct { // stopping the p2p Channel(s). poolWG sync.WaitGroup - metrics *cons.Metrics + metrics *consensus.Metrics syncStartTime time.Time } @@ -122,7 +122,7 @@ func NewReactor( blockSyncCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, blockSync bool, - metrics *cons.Metrics, + metrics *consensus.Metrics, ) (*Reactor, error) { if state.LastBlockHeight != store.Height() { return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) @@ -142,7 +142,7 @@ func NewReactor( store: store, pool: NewBlockPool(startHeight, requestsCh, errorsCh), consReactor: consReactor, - blockSync: tmSync.NewBool(blockSync), + blockSync: tmsync.NewBool(blockSync), requestsCh: requestsCh, errorsCh: errorsCh, blockSyncCh: blockSyncCh, diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index 9a79c8cf3..14e0fbf1d 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -6,11 +6,12 @@ import ( "time" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" @@ -22,7 +23,6 @@ import ( bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) type reactorTestSuite struct { @@ -165,7 +165,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, rts.blockSyncChannels[nodeID], rts.peerUpdates[nodeID], rts.blockSync, - cons.NopMetrics()) + consensus.NopMetrics()) require.NoError(t, err) require.NoError(t, rts.reactors[nodeID].Start()) @@ -182,10 +182,10 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(64) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) @@ -217,10 +217,10 @@ func TestReactor_AbruptDisconnect(t *testing.T) { } func TestReactor_SyncTime(t *testing.T) { - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(101) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) @@ -240,10 +240,10 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(65) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) @@ -287,11 +287,11 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - config := cfg.ResetTestRoot("block_sync_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) maxBlockHeight := int64(48) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) @@ -325,7 +325,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // // XXX: This causes a potential race condition. // See: https://github.com/tendermint/tendermint/issues/6005 - otherGenDoc, otherPrivVals := factory.RandGenesisDoc(config, 1, false, 30) + otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30) newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{ MaxPeers: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1), diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go index 12959d5b1..d1850eb1c 100644 --- a/internal/blocksync/v2/processor.go +++ b/internal/blocksync/v2/processor.go @@ -3,7 +3,7 @@ package v2 import ( "fmt" - tmState "github.com/tendermint/tendermint/internal/state" + tmstate "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) @@ -36,7 +36,7 @@ func (e pcBlockProcessed) String() string { type pcFinished struct { priorityNormal blocksSynced int - tmState tmState.State + tmState tmstate.State } func (p pcFinished) Error() string { @@ -148,11 +148,11 @@ func (state *pcState) handle(event Event) (Event, error) { return noOp, nil case rProcessBlock: - tmState := state.context.tmState() + tmstate := state.context.tmState() firstItem, secondItem, err := state.nextTwo() if err != nil { if state.draining { - return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil + return pcFinished{tmState: tmstate, blocksSynced: state.blocksSynced}, nil } return noOp, nil } @@ -164,7 +164,7 @@ func (state *pcState) handle(event Event) (Event, error) { ) // verify if +second+ last commit "confirms" +first+ block - err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit) + err = state.context.verifyCommit(tmstate.ChainID, firstID, first.Height, second.LastCommit) if err != nil { state.purgePeer(firstItem.peerID) if firstItem.peerID != secondItem.peerID { diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go index caf25e09c..86b380f98 100644 --- a/internal/blocksync/v2/processor_context.go +++ b/internal/blocksync/v2/processor_context.go @@ -3,7 +3,7 @@ package v2 import ( "fmt" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) @@ -21,10 +21,10 @@ type pContext struct { store blockStore applier blockApplier state state.State - metrics *cons.Metrics + metrics *consensus.Metrics } -func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext { +func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *consensus.Metrics) *pContext { return &pContext{ store: st, applier: ex, diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go index c9b8c62e1..7c12b3610 100644 --- a/internal/blocksync/v2/processor_test.go +++ b/internal/blocksync/v2/processor_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - tmState "github.com/tendermint/tendermint/internal/state" + tmstate "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/types" ) @@ -33,7 +33,7 @@ func makePcBlock(height int64) *types.Block { // makeState takes test parameters and creates a specific processor state. func makeState(p *params) *pcState { var ( - tmState = tmState.State{LastBlockHeight: p.height} + tmState = tmstate.State{LastBlockHeight: p.height} context = newMockProcessorContext(tmState, p.verBL, p.appBL) ) state := newPcState(context) @@ -207,7 +207,7 @@ func TestRProcessBlockSuccess(t *testing.T) { { // finish when H+1 or/and H+2 are missing event: rProcessBlock{}, wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1}, + wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 1}, blocksSynced: 1}, }, }, }, @@ -271,7 +271,7 @@ func TestScFinishedEv(t *testing.T) { { currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, + wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100}, }, }, }, @@ -282,7 +282,7 @@ func TestScFinishedEv(t *testing.T) { currentState: ¶ms{height: 100, items: []pcBlock{ {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, + wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100}, }, }, }, diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go index bd2b85038..f8dcbcf64 100644 --- a/internal/blocksync/v2/reactor.go +++ b/internal/blocksync/v2/reactor.go @@ -5,11 +5,11 @@ import ( "fmt" "time" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" - bc "github.com/tendermint/tendermint/internal/blocksync" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/state" @@ -61,7 +61,7 @@ type blockApplier interface { // XXX: unify naming in this package around tmState func newReactor(state state.State, store blockStore, reporter behavior.Reporter, - blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor { + blockApplier blockApplier, blockSync bool, metrics *consensus.Metrics) *BlockchainReactor { initHeight := state.LastBlockHeight + 1 if initHeight == 1 { initHeight = state.InitialHeight @@ -91,7 +91,7 @@ func NewBlockchainReactor( blockApplier blockApplier, store blockStore, blockSync bool, - metrics *cons.Metrics) *BlockchainReactor { + metrics *consensus.Metrics) *BlockchainReactor { reporter := behavior.NewMockReporter() return newReactor(state, store, reporter, blockApplier, blockSync, metrics) } @@ -605,7 +605,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 5, SendQueueCapacity: 2000, RecvBufferCapacity: 1024, - RecvMessageCapacity: bc.MaxMsgSize, + RecvMessageCapacity: blocksync.MaxMsgSize, }, } } diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go index ca8ba41a0..8cb675a0a 100644 --- a/internal/blocksync/v2/reactor_test.go +++ b/internal/blocksync/v2/reactor_test.go @@ -15,9 +15,9 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" @@ -177,7 +177,7 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { require.NoError(t, err) } - r := newReactor(state, store, reporter, appl, true, cons.NopMetrics()) + r := newReactor(state, store, reporter, appl, true, consensus.NopMetrics()) logger := log.TestingLogger() r.SetLogger(logger.With("module", "blockchain")) @@ -365,9 +365,9 @@ func TestReactorHelperMode(t *testing.T) { channelID = byte(0x40) ) - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + cfg := config.ResetTestRoot("blockchain_reactor_v2_test") + defer os.RemoveAll(cfg.RootDir) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) params := testReactorParams{ logger: log.TestingLogger(), @@ -455,9 +455,9 @@ func TestReactorHelperMode(t *testing.T) { } func TestReactorSetSwitchNil(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + cfg := config.ResetTestRoot("blockchain_reactor_v2_test") + defer os.RemoveAll(cfg.RootDir) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) reactor := newTestReactor(t, testReactorParams{ logger: log.TestingLogger(), diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index a202a4738..bff9f4ace 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -10,6 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/evidence" @@ -23,7 +25,6 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // Byzantine node sends two different prevotes (nil and blockID) to the same diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index b7fdd984e..2bd926c45 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -7,21 +7,19 @@ import ( "io" "io/ioutil" "os" + "path" "path/filepath" "sync" "testing" "time" "github.com/stretchr/testify/require" - - "path" - dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" @@ -49,10 +47,10 @@ const ( // test. type cleanupFunc func() -func configSetup(t *testing.T) *cfg.Config { +func configSetup(t *testing.T) *config.Config { t.Helper() - config := ResetConfig("consensus_reactor_test") + cfg := ResetConfig("consensus_reactor_test") consensusReplayConfig := ResetConfig("consensus_replay_test") configStateTest := ResetConfig("consensus_state_test") @@ -60,13 +58,13 @@ func configSetup(t *testing.T) *cfg.Config { configByzantineTest := ResetConfig("consensus_byzantine_test") t.Cleanup(func() { - os.RemoveAll(config.RootDir) + os.RemoveAll(cfg.RootDir) os.RemoveAll(consensusReplayConfig.RootDir) os.RemoveAll(configStateTest.RootDir) os.RemoveAll(configMempoolTest.RootDir) os.RemoveAll(configByzantineTest.RootDir) }) - return config + return cfg } func ensureDir(dir string, mode os.FileMode) { @@ -75,8 +73,8 @@ func ensureDir(dir string, mode os.FileMode) { } } -func ResetConfig(name string) *cfg.Config { - return cfg.ResetTestRoot(name) +func ResetConfig(name string) *config.Config { + return config.ResetTestRoot(name) } //------------------------------------------------------------------------------- @@ -102,7 +100,7 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida } func (vs *validatorStub) signVote( - config *cfg.Config, + cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { @@ -122,7 +120,7 @@ func (vs *validatorStub) signVote( BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() - if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil { + if err := vs.PrivValidator.SignVote(context.Background(), cfg.ChainID(), v); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } @@ -141,12 +139,12 @@ func (vs *validatorStub) signVote( // Sign vote for type/hash/header func signVote( vs *validatorStub, - config *cfg.Config, + cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { - v, err := vs.signVote(config, voteType, hash, header) + v, err := vs.signVote(cfg, voteType, hash, header) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) } @@ -157,14 +155,14 @@ func signVote( } func signVotes( - config *cfg.Config, + cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, config, voteType, hash, header) + votes[i] = signVote(vs, cfg, voteType, hash, header) } return votes } @@ -255,14 +253,14 @@ func addVotes(to *State, votes ...*types.Vote) { } func signAddVotes( - config *cfg.Config, + cfg *config.Config, to *State, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub, ) { - votes := signVotes(config, voteType, hash, header, vss...) + votes := signVotes(cfg, voteType, hash, header, vss...) addVotes(to, votes...) } @@ -387,12 +385,12 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { // consensus states func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { - config := cfg.ResetTestRoot("consensus_state_test") - return newStateWithConfig(config, state, pv, app) + cfg := config.ResetTestRoot("consensus_state_test") + return newStateWithConfig(cfg, state, pv, app) } func newStateWithConfig( - thisConfig *cfg.Config, + thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, @@ -402,7 +400,7 @@ func newStateWithConfig( } func newStateWithConfigAndBlockStore( - thisConfig *cfg.Config, + thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, @@ -444,10 +442,10 @@ func newStateWithConfigAndBlockStore( return cs } -func loadPrivValidator(config *cfg.Config) *privval.FilePV { - privValidatorKeyFile := config.PrivValidator.KeyFile() +func loadPrivValidator(cfg *config.Config) *privval.FilePV { + privValidatorKeyFile := cfg.PrivValidator.KeyFile() ensureDir(filepath.Dir(privValidatorKeyFile), 0700) - privValidatorStateFile := config.PrivValidator.StateFile() + privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { panic(err) @@ -456,9 +454,9 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV { return privValidator } -func randState(config *cfg.Config, nValidators int) (*State, []*validatorStub) { +func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) { // Get State - state, privVals := randGenesisState(config, nValidators, false, 10) + state, privVals := randGenesisState(cfg, nValidators, false, 10) vss := make([]*validatorStub, nValidators) @@ -704,15 +702,15 @@ func consensusLogger() log.Logger { func randConsensusState( t *testing.T, - config *cfg.Config, + cfg *config.Config, nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, - configOpts ...func(*cfg.Config), + configOpts ...func(*config.Config), ) ([]*State, cleanupFunc) { - genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, 30) css := make([]*State, nValidators) logger := consensusLogger() @@ -759,18 +757,18 @@ func randConsensusState( // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers( - config *cfg.Config, + cfg *config.Config, nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application, -) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) { - genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, testMinPower) +) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) { + genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, testMinPower) css := make([]*State, nPeers) logger := consensusLogger() - var peer0Config *cfg.Config + var peer0Config *config.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) @@ -799,7 +797,7 @@ func randConsensusNetWithPeers( } } - app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + app := appFunc(path.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { // simulate handshake, receive app version. If don't do this, replay test will fail @@ -820,12 +818,12 @@ func randConsensusNetWithPeers( } func randGenesisState( - config *cfg.Config, + cfg *config.Config, numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { - genDoc, privValidators := factory.RandGenesisDoc(config, numValidators, randPower, minPower) + genDoc, privValidators := factory.RandGenesisDoc(cfg, numValidators, randPower, minPower) s0, _ := sm.MakeGenesisState(genDoc) return s0, privValidators } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 8eb1b3f60..58784a2ee 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -10,20 +10,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) mempl.Mempool { - return txn.(mempl.Mempool) +func assertMempool(txn txNotifier) mempool.Mempool { + return txn.(mempool.Mempool) } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { @@ -113,7 +112,7 @@ func deliverTxsRange(cs *State, start, end int) { for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempl.TxInfo{}) + err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) if err != nil { panic(fmt.Sprintf("Error after CheckTx: %v", err)) } @@ -179,7 +178,7 @@ func TestMempoolRmBadTx(t *testing.T) { return } checkTxRespCh <- struct{}{} - }, mempl.TxInfo{}) + }, mempool.TxInfo{}) if err != nil { t.Errorf("error after CheckTx: %v", err) return diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index bb4f72d8c..a5f06c884 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -12,12 +12,13 @@ import ( "github.com/fortytw2/leaktest" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/encoding" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" @@ -31,7 +32,6 @@ import ( tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) var ( @@ -273,11 +273,11 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he } func TestReactorBasic(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState(t, - config, n, "consensus_reactor_test", + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) @@ -316,14 +316,14 @@ func TestReactorBasic(t *testing.T) { } func TestReactorWithEvidence(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 testName := "consensus_reactor_test" tickerFunc := newMockTickerFunc(true) appFunc := newKVStore - genDoc, privVals := factory.RandGenesisDoc(config, n, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, n, false, 30) states := make([]*State, n) logger := consensusLogger() @@ -360,7 +360,7 @@ func TestReactorWithEvidence(t *testing.T) { // everyone includes evidence of another double signing vIdx := (i + 1) % n - ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID()) evpool := &statemocks.EvidencePool{} evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ @@ -412,17 +412,17 @@ func TestReactorWithEvidence(t *testing.T) { } func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState( t, - config, + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, - func(c *cfg.Config) { + func(c *config.Config) { c.Consensus.CreateEmptyBlocks = false }, ) @@ -462,11 +462,11 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } func TestReactorRecordsVotesAndBlockParts(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState(t, - config, n, "consensus_reactor_test", + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) @@ -521,12 +521,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorVotingPowerChange(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState( t, - config, + cfg, n, "consensus_voting_power_changes_test", newMockTickerFunc(true), @@ -573,7 +573,7 @@ func TestReactorVotingPowerChange(t *testing.T) { val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) + val1PubKeyABCI, err := encoding.PubKeyToProto(val1PubKey) require.NoError(t, err) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) @@ -622,12 +622,12 @@ func TestReactorVotingPowerChange(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) nPeers := 7 nVals := 4 states, _, _, cleanup := randConsensusNetWithPeers( - config, + cfg, nVals, nPeers, "consensus_val_set_changes_test", @@ -668,7 +668,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) @@ -701,7 +701,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) @@ -721,7 +721,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) @@ -729,7 +729,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index a3a39fbbc..47ed61874 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -12,7 +12,7 @@ import ( dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -31,8 +31,8 @@ const ( // replay messages interactively or all at once // replay the wal file -func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(config, csConfig) +func RunReplayFile(cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool) { + consensusState := newConsensusStateForReplay(cfg, csConfig) if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) @@ -286,22 +286,22 @@ func (pb *playback) replayConsoleLoop() int { //-------------------------------------------------------------------------------- // convenience for replay mode -func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { - dbType := dbm.BackendType(config.DBBackend) +func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.ConsensusConfig) *State { + dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore - blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { tmos.Exit(err.Error()) } blockStore := store.NewBlockStore(blockStoreDB) // Get State - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { tmos.Exit(err.Error()) } stateStore := sm.NewStore(stateDB) - gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) if err != nil { tmos.Exit(err.Error()) } @@ -311,7 +311,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo } // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) + clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) proxyApp := proxy.NewAppConns(clientCreator) err = proxyApp.Start() if err != nil { diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 7a3d2c34a..361ac6ec8 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -6,7 +6,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" @@ -16,12 +16,12 @@ import ( type emptyMempool struct{} -var _ mempl.Mempool = emptyMempool{} +var _ mempool.Mempool = emptyMempool{} func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } -func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } @@ -30,8 +30,8 @@ func (emptyMempool) Update( _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, ) error { return nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index c5623e36d..248cf6da3 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -22,10 +22,10 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" @@ -54,7 +54,7 @@ import ( // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, +func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) @@ -103,7 +103,7 @@ func sendTxs(ctx context.Context, cs *State) { return default: tx := []byte{byte(i)} - if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempl.TxInfo{}); err != nil { + if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { panic(err) } i++ @@ -137,7 +137,7 @@ func TestWALCrash(t *testing.T) { } } -func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, +func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -286,12 +286,12 @@ func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ type simulatorTestSuite struct { GenesisState sm.State - Config *cfg.Config + Config *config.Config Chain []*types.Block Commits []*types.Commit CleanupFunc cleanupFunc - Mempool mempl.Mempool + Mempool mempool.Mempool Evpool sm.EvidencePool } @@ -311,7 +311,7 @@ var modes = []uint{0, 1, 2, 3} // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay func setupSimulator(t *testing.T) *simulatorTestSuite { t.Helper() - config := configSetup(t) + cfg := configSetup(t) sim := &simulatorTestSuite{ Mempool: emptyMempool{}, @@ -321,14 +321,14 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { nPeers := 7 nVals := 4 - css, genDoc, config, cleanup := randConsensusNetWithPeers( - config, + css, genDoc, cfg, cleanup := randConsensusNetWithPeers( + cfg, nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) - sim.Config = config + sim.Config = cfg sim.GenesisState, _ = sm.MakeGenesisState(genDoc) sim.CleanupFunc = cleanup @@ -361,10 +361,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { incrementHeight(vss...) newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) @@ -372,7 +372,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal := types.NewProposal(vss[1].Height, round, -1, blockID) p := proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -393,10 +393,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { incrementHeight(vss...) updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -404,7 +404,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[2].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[2].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -425,17 +425,17 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { incrementHeight(vss...) newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -463,7 +463,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[3].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[3].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -475,7 +475,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { ensureNewProposal(proposalCh, height, round) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) rs = css[0].GetRoundState() @@ -514,7 +514,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -526,7 +526,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { selfIndex = valIndexFn(0) proposal = types.NewProposal(vss[1].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -611,8 +611,8 @@ func TestHandshakeReplayNone(t *testing.T) { // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx func TestMockProxyApp(t *testing.T) { sim := setupSimulator(t) // setup config and simulator - config := sim.Config - assert.NotNil(t, config) + cfg := sim.Config + assert.NotNil(t, cfg) logger := log.TestingLogger() var validTxs, invalidTxs = 0, 0 @@ -687,7 +687,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod var stateDB dbm.DB var genesisState sm.State - config := sim.Config + cfg := sim.Config if testValidatorsChange { testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) @@ -695,19 +695,19 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod stateDB = dbm.NewMemDB() genesisState = sim.GenesisState - config = sim.Config + cfg = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits - store = newMockBlockStore(config, genesisState.ConsensusParams) + store = newMockBlockStore(cfg, genesisState.ConsensusParams) } else { // test single node testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() walBody, err := WALWithNBlocks(t, numBlocks) require.NoError(t, err) walFile := tempWALWithData(walBody) - config.Consensus.SetWalFile(walFile) + cfg.Consensus.SetWalFile(walFile) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) wal, err := NewWAL(walFile) @@ -724,7 +724,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod require.NoError(t, err) pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) - stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) + stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion) } stateStore := sm.NewStore(stateDB) @@ -733,12 +733,12 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) + state = buildTMStateFromChain(cfg, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) latestAppHash := state.AppHash // make a new client creator kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) + filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) clientCreator2 := abciclient.NewLocalCreator(kvstoreApp) @@ -763,7 +763,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) handshaker := NewHandshaker(stateStore, state, store, genDoc) proxyApp := proxy.NewAppConns(clientCreator2) if err := proxyApp.Start(); err != nil { @@ -811,7 +811,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } func applyBlock(stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, st sm.State, blk *types.Block, @@ -831,7 +831,7 @@ func applyBlock(stateStore sm.Store, func buildAppStateFromChain( proxyApp proxy.AppConns, stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, @@ -878,8 +878,8 @@ func buildAppStateFromChain( } func buildTMStateFromChain( - config *cfg.Config, - mempool mempl.Mempool, + cfg *config.Config, + mempool mempool.Mempool, evpool sm.EvidencePool, stateStore sm.Store, state sm.State, @@ -889,7 +889,7 @@ func buildTMStateFromChain( blockStore *mockBlockStore) sm.State { // run the whole chain against this client to build up the tendermint state kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) + filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() clientCreator := abciclient.NewLocalCreator(kvstoreApp) @@ -938,16 +938,16 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x01 // - 0x02 // - 0x03 - config := ResetConfig("handshake_test_") - t.Cleanup(func() { os.RemoveAll(config.RootDir) }) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + cfg := ResetConfig("handshake_test_") + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) const appVersion = 0x0 pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) - stateDB, state, store := stateAndStore(config, pubKey, appVersion) + stateDB, state, store := stateAndStore(cfg, pubKey, appVersion) stateStore := sm.NewStore(stateDB) - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks blocks := sf.MakeBlocks(3, &state, privVal) @@ -1153,14 +1153,14 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { // fresh state and mock store func stateAndStore( - config *cfg.Config, + cfg *config.Config, pubKey crypto.PubKey, appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) state.Version.Consensus.App = appVersion - store := newMockBlockStore(config, state.ConsensusParams) + store := newMockBlockStore(cfg, state.ConsensusParams) if err := stateStore.Save(state); err != nil { panic(err) } @@ -1171,7 +1171,7 @@ func stateAndStore( // mock block store type mockBlockStore struct { - config *cfg.Config + cfg *config.Config params types.ConsensusParams chain []*types.Block commits []*types.Commit @@ -1179,8 +1179,8 @@ type mockBlockStore struct { } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{config, params, nil, nil, 0} +func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore { + return &mockBlockStore{cfg, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } @@ -1228,20 +1228,20 @@ func TestHandshakeUpdatesValidators(t *testing.T) { app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := abciclient.NewLocalCreator(app) - config := ResetConfig("handshake_test_") - t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) + cfg := ResetConfig("handshake_test_") + t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) - stateDB, state, store := stateAndStore(config, pubKey, 0x0) + stateDB, state, store := stateAndStore(cfg, pubKey, 0x0) stateStore := sm.NewStore(stateDB) oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) handshaker := NewHandshaker(stateStore, state, store, genDoc) proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 165318c9c..46bacf5cc 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -12,7 +12,7 @@ import ( "github.com/gogo/protobuf/proto" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/libs/fail" @@ -80,7 +80,7 @@ type State struct { service.BaseService // config details - config *cfg.ConsensusConfig + config *config.ConsensusConfig privValidator types.PrivValidator // for signing votes privValidatorType types.PrivValidatorType @@ -152,7 +152,7 @@ type StateOption func(*State) // NewState returns a new State. func NewState( - config *cfg.ConsensusConfig, + cfg *config.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, @@ -161,7 +161,7 @@ func NewState( options ...StateOption, ) *State { cs := &State{ - config: config, + config: cfg, blockExec: blockExec, blockStore: blockStore, txNotifier: txNotifier, diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index a9e309b4f..b3830a3f6 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -15,19 +15,19 @@ import ( "github.com/tendermint/tendermint/types" ) -var config *cfg.Config // NOTE: must be reset for each _test.go file +var cfg *config.Config // NOTE: must be reset for each _test.go file func TestMain(m *testing.M) { - config = cfg.ResetTestRoot("consensus_height_vote_set_test") + cfg = config.ResetTestRoot("consensus_height_vote_set_test") code := m.Run() - os.RemoveAll(config.RootDir) + os.RemoveAll(cfg.RootDir) os.Exit(code) } func TestPeerCatchupRounds(t *testing.T) { valSet, privVals := factory.RandValidatorSet(10, 1) - hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) + hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet) vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1") @@ -75,7 +75,7 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty Type: tmproto.PrecommitType, BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, } - chainID := config.ChainID() + chainID := cfg.ChainID() v := vote.ToProto() err = privVal.SignVote(context.Background(), chainID, v) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index b92230d2c..ccc6685fc 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -11,11 +11,11 @@ import ( "time" "github.com/stretchr/testify/require" - db "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -30,9 +30,9 @@ import ( // (byteBufferWAL) and waits until numBlocks are created. // If the node fails to produce given numBlocks, it returns an error. func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { - config := getConfig(t) + cfg := getConfig(t) - app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) + app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator")) t.Cleanup(func() { require.NoError(t, app.Close()) }) logger := log.TestingLogger().With("wal_generator", "wal_generator") @@ -41,17 +41,17 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // NOTE: we can't import node package because of circular dependency. // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. - privValidatorKeyFile := config.PrivValidator.KeyFile() - privValidatorStateFile := config.PrivValidator.StateFile() + privValidatorKeyFile := cfg.PrivValidator.KeyFile() + privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { return err } - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) if err != nil { return fmt.Errorf("failed to read genesis file: %w", err) } - blockStoreDB := db.NewMemDB() + blockStoreDB := dbm.NewMemDB() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) @@ -89,7 +89,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) + consensusState := NewState(cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) if privValidator != nil && privValidator != (*privval.FilePV)(nil) { @@ -153,8 +153,8 @@ func makeAddrs() (string, string, string) { } // getConfig returns a config for test cases -func getConfig(t *testing.T) *cfg.Config { - c := cfg.ResetTestRoot(t.Name()) +func getConfig(t *testing.T) *config.Config { + c := config.ResetTestRoot(t.Name()) // and we use random ports to run in parallel tm, rpc, grpc := makeAddrs() diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 2bb3d8008..df1642f82 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 5749d2d3c..bf9ce8bd0 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -5,19 +5,19 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) // Mempool is an empty implementation of a Mempool, useful for testing. type Mempool struct{} -var _ mempl.Mempool = Mempool{} +var _ mempool.Mempool = Mempool{} func (Mempool) Lock() {} func (Mempool) Unlock() {} func (Mempool) Size() int { return 0 } -func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } @@ -26,8 +26,8 @@ func (Mempool) Update( _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, ) error { return nil } diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 9edf067de..f3571ede0 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -8,7 +8,7 @@ import ( "sync/atomic" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" @@ -32,7 +32,7 @@ type CListMempool struct { notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - config *cfg.MempoolConfig + config *config.MempoolConfig // Exclusive mutex for Update method to prevent concurrent execution of // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. @@ -69,14 +69,14 @@ type CListMempoolOption func(*CListMempool) // NewCListMempool returns a new mempool with the given configuration and // connection to an application. func NewCListMempool( - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64, options ...CListMempoolOption, ) *CListMempool { mp := &CListMempool{ - config: config, + config: cfg, proxyAppConn: proxyAppConn, txs: clist.New(), height: height, @@ -86,8 +86,8 @@ func NewCListMempool( metrics: mempool.NopMetrics(), } - if config.CacheSize > 0 { - mp.cache = mempool.NewLRUTxCache(config.CacheSize) + if cfg.CacheSize > 0 { + mp.cache = mempool.NewLRUTxCache(cfg.CacheSize) } else { mp.cache = mempool.NopTxCache{} } diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index 6d023b5f6..e55942e48 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -19,7 +19,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -32,10 +32,10 @@ import ( type cleanupFunc func() func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) { - return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) + return newMempoolWithAppAndConfig(cc, config.ResetTestRoot("mempool_test")) } -func newMempoolWithAppAndConfig(cc abciclient.Creator, config *cfg.Config) (*CListMempool, cleanupFunc) { +func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) { appConnMem, _ := cc() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() @@ -43,10 +43,10 @@ func newMempoolWithAppAndConfig(cc abciclient.Creator, config *cfg.Config) (*CLi panic(err) } - mp := NewCListMempool(config.Mempool, appConnMem, 0) + mp := NewCListMempool(cfg.Mempool, appConnMem, 0) mp.SetLogger(log.TestingLogger()) - return mp, func() { os.RemoveAll(config.RootDir) } + return mp, func() { os.RemoveAll(cfg.RootDir) } } func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { @@ -217,7 +217,7 @@ func TestMempoolUpdate(t *testing.T) { func TestMempool_KeepInvalidTxsInCache(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - wcfg := cfg.DefaultConfig() + wcfg := config.DefaultConfig() wcfg.Mempool.KeepInvalidTxsInCache = true mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) defer cleanup() @@ -465,9 +465,9 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { func TestMempoolTxsBytes(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - config := cfg.ResetTestRoot("mempool_test") - config.Mempool.MaxTxsBytes = 10 - mp, cleanup := newMempoolWithAppAndConfig(cc, config) + cfg := config.ResetTestRoot("mempool_test") + cfg.Mempool.MaxTxsBytes = 10 + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) defer cleanup() // 1. zero by default @@ -564,8 +564,8 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { t.Error(err) } }) - config := cfg.ResetTestRoot("mempool_test") - mp, cleanup := newMempoolWithAppAndConfig(cc, config) + cfg := config.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) defer cleanup() // generate small number of txs @@ -577,7 +577,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { } // simulate a group of peers sending them over and over - N := config.Mempool.Size + N := cfg.Mempool.Size maxPeers := 5 for i := 0; i < N; i++ { peerID := mrand.Intn(maxPeers) diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 29dec5833..b8aac3b5c 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" @@ -37,7 +37,7 @@ type PeerManager interface { type Reactor struct { service.BaseService - config *cfg.MempoolConfig + cfg *config.MempoolConfig mempool *CListMempool ids *mempool.MempoolIDs @@ -61,7 +61,7 @@ type Reactor struct { // NewReactor returns a reference to a new reactor. func NewReactor( logger log.Logger, - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, peerMgr PeerManager, mp *CListMempool, mempoolCh *p2p.Channel, @@ -69,7 +69,7 @@ func NewReactor( ) *Reactor { r := &Reactor{ - config: config, + cfg: cfg, peerMgr: peerMgr, mempool: mp, ids: mempool.NewMempoolIDs(), @@ -90,8 +90,8 @@ func NewReactor( // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, config.MaxTxBytes) +func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { + largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, @@ -117,7 +117,7 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. func (r *Reactor) OnStart() error { - if !r.config.Broadcast { + if !r.cfg.Broadcast { r.Logger.Info("tx broadcasting is disabled") } @@ -254,7 +254,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { return } - if r.config.Broadcast { + if r.cfg.Broadcast { // Check if we've already started a goroutine for this peer, if not we create // a new done channel so we can explicitly close the goroutine if the peer // is later removed, we increment the waitgroup so the reactor can stop diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 8bab45dd0..b0462a249 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -11,7 +11,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" @@ -36,7 +36,7 @@ type reactorTestSuite struct { nodes []types.NodeID } -func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { +func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() rts := &reactorTestSuite{ @@ -68,7 +68,7 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac rts.reactors[nodeID] = NewReactor( rts.logger.With("nodeID", nodeID), - cfg, + config, rts.network.Nodes[nodeID].PeerManager, mempool, rts.mempoolChnnels[nodeID], @@ -158,9 +158,9 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...typ func TestReactorBroadcastTxs(t *testing.T) { numTxs := 1000 numNodes := 10 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setup(t, cfg.Mempool, numNodes, 0) primary := rts.nodes[0] secondaries := rts.nodes[1:] @@ -185,9 +185,9 @@ func TestReactorBroadcastTxs(t *testing.T) { func TestReactorConcurrency(t *testing.T) { numTxs := 5 numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setup(t, cfg.Mempool, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -244,9 +244,9 @@ func TestReactorConcurrency(t *testing.T) { func TestReactorNoBroadcastToSender(t *testing.T) { numTxs := 1000 numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, uint(numTxs)) + rts := setup(t, cfg.Mempool, numNodes, uint(numTxs)) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -267,16 +267,16 @@ func TestReactorNoBroadcastToSender(t *testing.T) { func TestReactor_MaxTxBytes(t *testing.T) { numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setup(t, cfg.Mempool, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] // Broadcast a tx, which has the max size and ensure it's received by the // second reactor. - tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes) + tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) err := rts.reactors[primary].mempool.CheckTx( context.Background(), tx1, @@ -297,7 +297,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { rts.reactors[secondary].mempool.Flush() // broadcast a tx, which is beyond the max size and ensure it's not sent - tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1) + tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) require.Error(t, err) @@ -305,11 +305,11 @@ func TestReactor_MaxTxBytes(t *testing.T) { } func TestDontExhaustMaxActiveIDs(t *testing.T) { - config := cfg.TestConfig() + cfg := config.TestConfig() // we're creating a single node network, but not starting the // network. - rts := setup(t, config.Mempool, 1, mempool.MaxActiveIDs+1) + rts := setup(t, cfg.Mempool, 1, mempool.MaxActiveIDs+1) nodeID := rts.nodes[0] @@ -374,9 +374,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { t.Skip("skipping test in short mode") } - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, 2, 0) + rts := setup(t, cfg.Mempool, 2, 0) primary := rts.nodes[0] secondary := rts.nodes[1] diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index 3014e0519..75f89d07e 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" @@ -37,7 +37,7 @@ type PeerManager interface { type Reactor struct { service.BaseService - config *cfg.MempoolConfig + cfg *config.MempoolConfig mempool *TxMempool ids *mempool.MempoolIDs @@ -65,7 +65,7 @@ type Reactor struct { // NewReactor returns a reference to a new reactor. func NewReactor( logger log.Logger, - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, peerMgr PeerManager, txmp *TxMempool, mempoolCh *p2p.Channel, @@ -73,7 +73,7 @@ func NewReactor( ) *Reactor { r := &Reactor{ - config: config, + cfg: cfg, peerMgr: peerMgr, mempool: txmp, ids: mempool.NewMempoolIDs(), @@ -97,8 +97,8 @@ func defaultObservePanic(r interface{}) {} // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, config.MaxTxBytes) +func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { + largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, @@ -124,7 +124,7 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. func (r *Reactor) OnStart() error { - if !r.config.Broadcast { + if !r.cfg.Broadcast { r.Logger.Info("tx broadcasting is disabled") } @@ -262,7 +262,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { return } - if r.config.Broadcast { + if r.cfg.Broadcast { // Check if we've already started a goroutine for this peer, if not we create // a new done channel so we can explicitly close the goroutine if the peer // is later removed, we increment the waitgroup so the reactor can stop diff --git a/internal/p2p/conn/evil_secret_connection_test.go b/internal/p2p/conn/evil_secret_connection_test.go index 6d8b7cbf7..05e88cd85 100644 --- a/internal/p2p/conn/evil_secret_connection_test.go +++ b/internal/p2p/conn/evil_secret_connection_test.go @@ -13,7 +13,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -113,7 +113,7 @@ func (c *evilConn) Read(data []byte) (n int, err error) { case 1: signature := c.signChallenge() if !c.badAuthSignature { - pkpb, err := cryptoenc.PubKeyToProto(c.privKey.PubKey()) + pkpb, err := encoding.PubKeyToProto(c.privKey.PubKey()) if err != nil { panic(err) } diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index 2f0d269d6..35fac488a 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" @@ -406,7 +406,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - pbpk, err := cryptoenc.PubKeyToProto(pubKey) + pbpk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, true, err } @@ -423,7 +423,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + pk, err := encoding.PubKeyFromProto(pba.PubKey) if err != nil { return nil, true, err // abort } diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index 0825af948..edb5fc6fc 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -6,9 +6,10 @@ import ( "time" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestPeerScoring(t *testing.T) { diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index a5acb0d5e..63e479e6a 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/libs/log" - proto "github.com/tendermint/tendermint/proto/tendermint/p2p" + p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -45,7 +45,7 @@ func TestReactorBasic(t *testing.T) { // assert that when a mock node sends a request it receives a response (and // the correct one) testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []proto.PexAddressV2(nil)) + testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { @@ -71,17 +71,17 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequestV2{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*proto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponseV2) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequestV2{}, } peerErr := <-r.pexErrCh @@ -136,10 +136,10 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { require.NoError(t, err) require.True(t, added) - addresses := make([]proto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddressV2, 101) for i := 0; i < len(addresses); i++ { nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = proto.PexAddressV2{ + addresses[i] = p2pproto.PexAddressV2{ URL: nodeAddress.String(), } } @@ -152,12 +152,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*proto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &proto.PexResponseV2{ + Message: &p2pproto.PexResponseV2{ Addresses: addresses, }, } @@ -290,7 +290,7 @@ func setupSingle(t *testing.T) *singleTestReactor { pexErrCh := make(chan p2p.PeerError, chBuf) pexCh := p2p.NewChannel( p2p.ChannelID(pex.PexChannel), - new(proto.PexMessage), + new(p2pproto.PexMessage), pexInCh, pexOutCh, pexErrCh, @@ -381,7 +381,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { // NOTE: we don't assert that the channels get drained after stopping the // reactor rts.pexChannels = rts.network.MakeChannelsNoCleanup( - t, pex.ChannelDescriptor(), new(proto.PexMessage), chBuf, + t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), chBuf, ) idx := 0 @@ -447,7 +447,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { r.network.Nodes[node.NodeID] = node nodeID := node.NodeID r.pexChannels[nodeID] = node.MakeChannelNoCleanup( - t, pex.ChannelDescriptor(), new(proto.PexMessage), r.opts.BufferSize, + t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), r.opts.BufferSize, ) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) @@ -488,11 +488,11 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, r.logger.Info("Listening for request", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexRequestV2) + _, ok := msg.Message.(*p2pproto.PexRequestV2) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexRequestV2{}, msg.Message) + require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -507,11 +507,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponseV2) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*proto.PexResponseV2) + m, ok := msg.Message.(*p2pproto.PexResponseV2) if !ok { require.Fail(t, "expected pex response v2") return true @@ -534,17 +534,17 @@ func (r *reactorTestSuite) listenForResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, - addresses []proto.PexAddressV2, + addresses []p2pproto.PexAddressV2, ) { r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponseV2) r.logger.Info("message", msg, "ok", ok) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexResponseV2{Addresses: addresses}, msg.Message) + require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -554,16 +554,16 @@ func (r *reactorTestSuite) listenForLegacyResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, - addresses []proto.PexAddress, + addresses []p2pproto.PexAddress, ) { r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponse) + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexResponse{Addresses: addresses}, msg.Message) + require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -595,26 +595,26 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []proto.PexAddressV2 { - addresses := make([]proto.PexAddressV2, len(nodes)) +func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 { + addresses := make([]p2pproto.PexAddressV2, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - addresses[idx] = proto.PexAddressV2{ + addresses[idx] = p2pproto.PexAddressV2{ URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []proto.PexAddress { - addresses := make([]proto.PexAddress, len(nodes)) +func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress { + addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] nodeAddrs := r.network.Nodes[nodeID].NodeAddress endpoints, err := nodeAddrs.Resolve(context.Background()) require.NoError(t, err) require.Len(t, endpoints, 1) - addresses[idx] = proto.PexAddress{ + addresses[idx] = p2pproto.PexAddress{ ID: string(nodeAddrs.NodeID), IP: endpoints[0].IP.String(), Port: uint32(endpoints[0].Port), @@ -628,12 +628,12 @@ func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bo if v2 { r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequestV2{}, } } else { r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexRequest{}, + Message: &p2pproto.PexRequest{}, } } } @@ -649,7 +649,7 @@ func (r *reactorTestSuite) sendResponse( addrs := r.getV2AddressesFor(withNodes) r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexResponseV2{ + Message: &p2pproto.PexResponseV2{ Addresses: addrs, }, } @@ -657,7 +657,7 @@ func (r *reactorTestSuite) sendResponse( addrs := r.getAddressesFor(t, withNodes) r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexResponse{ + Message: &p2pproto.PexResponse{ Addresses: addrs, }, } @@ -764,8 +764,8 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int } // nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto.PexAddress { - var addresses []proto.PexAddress +func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress { + var addresses []p2pproto.PexAddress for _, i := range nodeIndices { if i < len(r.nodes) { require.Fail(t, "index for pex address is greater than number of nodes") @@ -777,7 +777,7 @@ func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto require.NoError(t, err) for _, endpoint := range endpoints { if endpoint.IP != nil { - addresses = append(addresses, proto.PexAddress{ + addresses = append(addresses, p2pproto.PexAddress{ ID: string(nodeAddrs.NodeID), IP: endpoint.IP.String(), Port: uint32(endpoint.Port), diff --git a/internal/rpc/core/abci.go b/internal/rpc/core/abci.go index fe497b336..06c033050 100644 --- a/internal/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -4,7 +4,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -16,7 +16,7 @@ func (env *Environment) ABCIQuery( data bytes.HexBytes, height int64, prove bool, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ Path: path, Data: data, @@ -27,16 +27,16 @@ func (env *Environment) ABCIQuery( return nil, err } - return &ctypes.ResultABCIQuery{Response: *resQuery}, nil + return &coretypes.ResultABCIQuery{Response: *resQuery}, nil } // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{Response: *resInfo}, nil + return &coretypes.ResultABCIInfo{Response: *resInfo}, nil } diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 909d2228f..26472fab4 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -25,7 +25,7 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain func (env *Environment) BlockchainInfo( ctx *rpctypes.Context, - minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -49,7 +49,7 @@ func (env *Environment) BlockchainInfo( } } - return &ctypes.ResultBlockchainInfo{ + return &coretypes.ResultBlockchainInfo{ LastHeight: env.BlockStore.Height(), BlockMetas: blockMetas}, nil } @@ -60,7 +60,7 @@ func (env *Environment) BlockchainInfo( func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // filter negatives if min < 0 || max < 0 { - return min, max, ctypes.ErrZeroOrNegativeHeight + return min, max, coretypes.ErrZeroOrNegativeHeight } // adjust for default values @@ -83,7 +83,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { if min > max { return min, max, fmt.Errorf("%w: min height %d can't be greater than max height %d", - ctypes.ErrInvalidRequest, min, max) + coretypes.ErrInvalidRequest, min, max) } return min, max, nil } @@ -91,7 +91,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { +func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -99,33 +99,33 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } block := env.BlockStore.LoadBlock(height) - return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { +func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { // N.B. The hash parameter is HexBytes so that the reflective parameter // decoding logic in the HTTP service will correctly translate from JSON. // See https://github.com/tendermint/tendermint/issues/6802 for context. block := env.BlockStore.LoadBlockByHash(hash) if block == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } // If block is not nil, then blockMeta can't be nil. blockMeta := env.BlockStore.LoadBlockMeta(block.Height) - return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { +func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -144,7 +144,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // NOTE: we can't yet ensure atomicity of operations in asserting // whether this is the latest height and retrieving the seen commit if commit != nil && commit.Height == height { - return ctypes.NewResultCommit(&header, commit, false), nil + return coretypes.NewResultCommit(&header, commit, false), nil } } @@ -153,7 +153,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes if commit == nil { return nil, nil } - return ctypes.NewResultCommit(&header, commit, true), nil + return coretypes.NewResultCommit(&header, commit, true), nil } // BlockResults gets ABCIResults at a given height. @@ -163,7 +163,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { +func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -179,7 +179,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* totalGasUsed += tx.GetGasUsed() } - return &ctypes.ResultBlockResults{ + return &coretypes.ResultBlockResults{ Height: height, TxsResults: results.DeliverTxs, TotalGasUsed: totalGasUsed, @@ -197,7 +197,7 @@ func (env *Environment) BlockSearch( query string, pagePtr, perPagePtr *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("block searching is disabled due to no kvEventSink") @@ -229,7 +229,7 @@ func (env *Environment) BlockSearch( sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) } // paginate results @@ -244,13 +244,13 @@ func (env *Environment) BlockSearch( skipCount := validateSkipCount(page, perPage) pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - apiResults := make([]*ctypes.ResultBlock, 0, pageSize) + apiResults := make([]*coretypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { block := env.BlockStore.LoadBlock(results[i]) if block != nil { blockMeta := env.BlockStore.LoadBlockMeta(block.Height) if blockMeta != nil { - apiResults = append(apiResults, &ctypes.ResultBlock{ + apiResults = append(apiResults, &coretypes.ResultBlock{ Block: block, BlockID: blockMeta.BlockID, }) @@ -258,5 +258,5 @@ func (env *Environment) BlockSearch( } } - return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil + return &coretypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index 5b602e339..68237bc0b 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -12,7 +12,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" sm "github.com/tendermint/tendermint/internal/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -89,12 +89,12 @@ func TestBlockResults(t *testing.T) { testCases := []struct { height int64 wantErr bool - wantRes *ctypes.ResultBlockResults + wantRes *coretypes.ResultBlockResults }{ {-1, true, nil}, {0, true, nil}, {101, true, nil}, - {100, false, &ctypes.ResultBlockResults{ + {100, false, &coretypes.ResultBlockResults{ Height: 100, TxsResults: results.DeliverTxs, TotalGasUsed: 15, diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index 6fbba0c0f..ac49bbd31 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -3,9 +3,9 @@ package core import ( "errors" - cm "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -20,7 +20,7 @@ import ( func (env *Environment) Validators( ctx *rpctypes.Context, heightPtr *int64, - pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) @@ -44,7 +44,7 @@ func (env *Environment) Validators( v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &coretypes.ResultValidators{ BlockHeight: height, Validators: v, Count: len(v), @@ -54,16 +54,16 @@ func (env *Environment) Validators( // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - var peerStates []ctypes.PeerStateInfo + var peerStates []coretypes.PeerStateInfo switch { case env.P2PPeers != nil: peers := env.P2PPeers.Peers().List() - peerStates = make([]ctypes.PeerStateInfo, 0, len(peers)) + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) for _, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) + peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState) if !ok { // peer does not have a state yet continue } @@ -71,7 +71,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul if err != nil { return nil, err } - peerStates = append(peerStates, ctypes.PeerStateInfo{ + peerStates = append(peerStates, coretypes.PeerStateInfo{ // Peer basic info. NodeAddress: peer.SocketAddr().String(), // Peer consensus state. @@ -80,7 +80,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul } case env.PeerManager != nil: peers := env.PeerManager.Peers() - peerStates = make([]ctypes.PeerStateInfo, 0, len(peers)) + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) for _, pid := range peers { peerState, ok := env.ConsensusReactor.GetPeerState(pid) if !ok { @@ -94,7 +94,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul addr := env.PeerManager.Addresses(pid) if len(addr) >= 1 { - peerStates = append(peerStates, ctypes.PeerStateInfo{ + peerStates = append(peerStates, coretypes.PeerStateInfo{ // Peer basic info. NodeAddress: addr[0].String(), // Peer consensus state. @@ -111,7 +111,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul if err != nil { return nil, err } - return &ctypes.ResultDumpConsensusState{ + return &coretypes.ResultDumpConsensusState{ RoundState: roundState, Peers: peerStates}, nil } @@ -119,10 +119,10 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() - return &ctypes.ResultConsensusState{RoundState: bz}, err + return &coretypes.ResultConsensusState{RoundState: bz}, err } // ConsensusParams gets the consensus parameters at the given block height. @@ -130,7 +130,7 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.Result // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params func (env *Environment) ConsensusParams( ctx *rpctypes.Context, - heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + heightPtr *int64) (*coretypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. @@ -144,7 +144,7 @@ func (env *Environment) ConsensusParams( return nil, err } - return &ctypes.ResultConsensusParams{ + return &coretypes.ResultConsensusParams{ BlockHeight: height, ConsensusParams: consensusParams}, nil } diff --git a/internal/rpc/core/dev.go b/internal/rpc/core/dev.go index fde7aef99..21c5154ff 100644 --- a/internal/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -1,12 +1,12 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*coretypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() - return &ctypes.ResultUnsafeFlushMempool{}, nil + return &coretypes.ResultUnsafeFlushMempool{}, nil } diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 44264e949..11b138eac 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -5,10 +5,10 @@ import ( "fmt" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/consensus" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -16,7 +16,7 @@ import ( "github.com/tendermint/tendermint/internal/statesync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -96,13 +96,13 @@ type Environment struct { GenDoc *types.GenesisDoc // cache the genesis structure EventSinks []indexer.EventSink EventBus *types.EventBus // thread safe - Mempool mempl.Mempool + Mempool mempool.Mempool BlockSyncReactor consensus.BlockSyncReactor StateSyncMetricer statesync.Metricer Logger log.Logger - Config cfg.RPCConfig + Config config.RPCConfig // cache of chunked genesis data. genChunks []string @@ -113,7 +113,7 @@ type Environment struct { func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { // this can only happen if we haven't first run validatePerPage if perPage < 1 { - panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage)) + panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) } if pagePtr == nil { // no page parameter @@ -126,7 +126,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page) + return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page) } return page, nil @@ -191,15 +191,15 @@ func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, if heightPtr != nil { height := *heightPtr if height <= 0 { - return 0, fmt.Errorf("%w (requested height: %d)", ctypes.ErrZeroOrNegativeHeight, height) + return 0, fmt.Errorf("%w (requested height: %d)", coretypes.ErrZeroOrNegativeHeight, height) } if height > latestHeight { return 0, fmt.Errorf("%w (requested height: %d, blockchain height: %d)", - ctypes.ErrHeightExceedsChainHead, height, latestHeight) + coretypes.ErrHeightExceedsChainHead, height, latestHeight) } base := env.BlockStore.Base() if height < base { - return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, height, base) + return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, height, base) } return height, nil } diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index d26a4d90f..8632e00c1 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -7,7 +7,7 @@ import ( tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -18,7 +18,7 @@ const ( // Subscribe for events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe -func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { @@ -49,7 +49,7 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. select { case msg := <-sub.Out(): var ( - resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} + resultEvent = &coretypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) ) writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -80,12 +80,12 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. } }() - return &ctypes.ResultSubscribe{}, nil + return &coretypes.ResultSubscribe{}, nil } // Unsubscribe from events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe -func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()} env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query) @@ -100,17 +100,17 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctype if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // UnsubscribeAll from all events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all -func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) err := env.EventBus.UnsubscribeAll(ctx.Context(), addr) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } diff --git a/internal/rpc/core/evidence.go b/internal/rpc/core/evidence.go index 311180bdd..a7641b99d 100644 --- a/internal/rpc/core/evidence.go +++ b/internal/rpc/core/evidence.go @@ -3,7 +3,7 @@ package core import ( "fmt" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -12,10 +12,10 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( ctx *rpctypes.Context, - ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { if ev == nil { - return nil, fmt.Errorf("%w: no evidence was provided", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("%w: no evidence was provided", coretypes.ErrInvalidRequest) } if err := ev.ValidateBasic(); err != nil { @@ -25,5 +25,5 @@ func (env *Environment) BroadcastEvidence( if err := env.EvidencePool.AddEvidence(ev); err != nil { return nil, fmt.Errorf("failed to add evidence: %w", err) } - return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + return &coretypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil } diff --git a/internal/rpc/core/health.go b/internal/rpc/core/health.go index fa92d57ab..fc355c7e7 100644 --- a/internal/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -1,13 +1,13 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.tendermint.com/master/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { - return &ctypes.ResultHealth{}, nil +func (env *Environment) Health(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { + return &coretypes.ResultHealth{}, nil } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index b44b9fb33..d66cabadd 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -7,9 +7,9 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -20,25 +20,25 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempl.TxInfo{}) +func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } - return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil + return &coretypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } // BroadcastTxSync returns with the response from CheckTx. Does not wait for // DeliverTx result. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := env.Mempool.CheckTx( ctx.Context(), tx, func(res *abci.Response) { resCh <- res }, - mempl.TxInfo{}, + mempool.TxInfo{}, ) if err != nil { return nil, err @@ -47,7 +47,7 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct res := <-resCh r := res.GetCheckTx() - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: r.Code, Data: r.Data, Log: r.Log, @@ -59,7 +59,7 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit -func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { //nolint:lll subscriber := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { @@ -91,7 +91,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* ctx.Context(), tx, func(res *abci.Response) { checkTxResCh <- res }, - mempl.TxInfo{}, + mempool.TxInfo{}, ) if err != nil { env.Logger.Error("Error on broadcastTxCommit", "err", err) @@ -102,7 +102,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* checkTxRes := checkTxResMsg.GetCheckTx() if checkTxRes.Code != abci.CodeTypeOK { - return &ctypes.ResultBroadcastTxCommit{ + return &coretypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), @@ -113,7 +113,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* select { case msg := <-deliverTxSub.Out(): // The tx was included in a block. deliverTxRes := msg.Data().(types.EventDataTx) - return &ctypes.ResultBroadcastTxCommit{ + return &coretypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: deliverTxRes.Result, Hash: tx.Hash(), @@ -128,7 +128,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* } err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason) env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &ctypes.ResultBroadcastTxCommit{ + return &coretypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), @@ -136,7 +136,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* case <-time.After(env.Config.TimeoutBroadcastTxCommit): err = errors.New("timed out waiting for tx to be included in a block") env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &ctypes.ResultBroadcastTxCommit{ + return &coretypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), @@ -147,12 +147,12 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. // More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*coretypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit := env.validatePerPage(limitPtr) txs := env.Mempool.ReapMaxTxs(limit) - return &ctypes.ResultUnconfirmedTxs{ + return &coretypes.ResultUnconfirmedTxs{ Count: len(txs), Total: env.Mempool.Size(), TotalBytes: env.Mempool.SizeBytes(), @@ -161,8 +161,8 @@ func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*c // NumUnconfirmedTxs gets number of unconfirmed transactions. // More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return &ctypes.ResultUnconfirmedTxs{ +func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { + return &coretypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), TotalBytes: env.Mempool.SizeBytes()}, nil @@ -171,10 +171,10 @@ func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.Result // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. // More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err } - return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil + return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil } diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index 633359449..8bcc04dd0 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -6,21 +6,21 @@ import ( "strings" "github.com/tendermint/tendermint/internal/p2p" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - var peers []ctypes.Peer +func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { + var peers []coretypes.Peer switch { case env.P2PPeers != nil: peersList := env.P2PPeers.Peers().List() - peers = make([]ctypes.Peer, 0, len(peersList)) + peers = make([]coretypes.Peer, 0, len(peersList)) for _, peer := range peersList { - peers = append(peers, ctypes.Peer{ + peers = append(peers, coretypes.Peer{ ID: peer.ID(), URL: peer.SocketAddr().String(), }) @@ -33,7 +33,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e continue } - peers = append(peers, ctypes.Peer{ + peers = append(peers, coretypes.Peer{ ID: peer, URL: addrs[0].String(), }) @@ -42,7 +42,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e return nil, errors.New("peer management system does not support NetInfo responses") } - return &ctypes.ResultNetInfo{ + return &coretypes.ResultNetInfo{ Listening: env.P2PTransport.IsListening(), Listeners: env.P2PTransport.Listeners(), NPeers: len(peers), @@ -51,19 +51,19 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e } // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { if env.P2PPeers == nil { return nil, errors.New("peer management system does not support this operation") } if len(seeds) == 0 { - return &ctypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", ctypes.ErrInvalidRequest) + return &coretypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", coretypes.ErrInvalidRequest) } env.Logger.Info("DialSeeds", "seeds", seeds) if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { - return &ctypes.ResultDialSeeds{}, err + return &coretypes.ResultDialSeeds{}, err } - return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil + return &coretypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil } // UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), @@ -71,19 +71,19 @@ func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) ( func (env *Environment) UnsafeDialPeers( ctx *rpctypes.Context, peers []string, - persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { + persistent, unconditional, private bool) (*coretypes.ResultDialPeers, error) { if env.P2PPeers == nil { return nil, errors.New("peer management system does not support this operation") } if len(peers) == 0 { - return &ctypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", ctypes.ErrInvalidRequest) + return &coretypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", coretypes.ErrInvalidRequest) } ids, err := getIDs(peers) if err != nil { - return &ctypes.ResultDialPeers{}, err + return &coretypes.ResultDialPeers{}, err } env.Logger.Info("DialPeers", "peers", peers, "persistent", @@ -91,40 +91,40 @@ func (env *Environment) UnsafeDialPeers( if persistent { if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { - return &ctypes.ResultDialPeers{}, err + return &coretypes.ResultDialPeers{}, err } } if private { if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { - return &ctypes.ResultDialPeers{}, err + return &coretypes.ResultDialPeers{}, err } } if unconditional { if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { - return &ctypes.ResultDialPeers{}, err + return &coretypes.ResultDialPeers{}, err } } if err := env.P2PPeers.DialPeersAsync(peers); err != nil { - return &ctypes.ResultDialPeers{}, err + return &coretypes.ResultDialPeers{}, err } - return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil + return &coretypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil } // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { +func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } - return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil + return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } @@ -139,7 +139,7 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctyp return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) } - return &ctypes.ResultGenesisChunk{ + return &coretypes.ResultGenesisChunk{ TotalChunks: len(env.genChunks), ChunkNumber: id, Data: env.genChunks[id], diff --git a/internal/rpc/core/net_test.go b/internal/rpc/core/net_test.go index 821cdb663..7c12a69d6 100644 --- a/internal/rpc/core/net_test.go +++ b/internal/rpc/core/net_test.go @@ -6,14 +6,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) err := sw.Start() require.NoError(t, err) @@ -48,7 +48,7 @@ func TestUnsafeDialSeeds(t *testing.T) { } func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) sw.SetAddrBook(&p2p.AddrBookMock{ Addrs: make(map[string]struct{}), diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index 567d73be4..bdd7ee1fa 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -5,7 +5,7 @@ import ( "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -13,7 +13,7 @@ import ( // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height, current max peer block height, and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { +func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -50,17 +50,17 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil { votingPower = val.VotingPower } - validatorInfo := ctypes.ValidatorInfo{} + validatorInfo := coretypes.ValidatorInfo{} if env.PubKey != nil { - validatorInfo = ctypes.ValidatorInfo{ + validatorInfo = coretypes.ValidatorInfo{ Address: env.PubKey.Address(), PubKey: env.PubKey, VotingPower: votingPower, } } - result := &ctypes.ResultStatus{ + result := &coretypes.ResultStatus{ NodeInfo: env.P2PTransport.NodeInfo(), - SyncInfo: ctypes.SyncInfo{ + SyncInfo: coretypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, LatestBlockHeight: latestHeight, diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 75a537942..60c7519c0 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -18,7 +18,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { +func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { // if index is disabled, return error // N.B. The hash parameter is HexBytes so that the reflective parameter @@ -45,7 +45,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } - return &ctypes.ResultTx{ + return &coretypes.ResultTx{ Hash: hash, Height: height, Index: index, @@ -68,7 +68,7 @@ func (env *Environment) TxSearch( prove bool, pagePtr, perPagePtr *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") @@ -103,7 +103,7 @@ func (env *Environment) TxSearch( return results[i].Height < results[j].Height }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) } // paginate results @@ -118,7 +118,7 @@ func (env *Environment) TxSearch( skipCount := validateSkipCount(page, perPage) pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - apiResults := make([]*ctypes.ResultTx, 0, pageSize) + apiResults := make([]*coretypes.ResultTx, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { r := results[i] @@ -128,7 +128,7 @@ func (env *Environment) TxSearch( proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } - apiResults = append(apiResults, &ctypes.ResultTx{ + apiResults = append(apiResults, &coretypes.ResultTx{ Hash: types.Tx(r.Tx).Hash(), Height: r.Height, Index: r.Index, @@ -138,7 +138,7 @@ func (env *Environment) TxSearch( }) } - return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil + return &coretypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } } diff --git a/internal/state/execution.go b/internal/state/execution.go index f5380de7c..e3dc80dca 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -7,9 +7,9 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/fail" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -37,7 +37,7 @@ type BlockExecutor struct { // manage the mempool lock during commit // and update both with block results after commit. - mempool mempl.Mempool + mempool mempool.Mempool evpool EvidencePool logger log.Logger @@ -61,7 +61,7 @@ func NewBlockExecutor( stateStore Store, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool mempl.Mempool, + pool mempool.Mempool, evpool EvidencePool, blockStore BlockStore, options ...BlockExecutorOption, @@ -70,7 +70,7 @@ func NewBlockExecutor( store: stateStore, proxyApp: proxyApp, eventBus: types.NopEventBus{}, - mempool: mempool, + mempool: pool, evpool: evpool, logger: logger, metrics: NopMetrics(), @@ -424,7 +424,7 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, } // Check if validator's pubkey matches an ABCI type in the consensus params - pk, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey) + pk, err := encoding.PubKeyFromProto(valUpdate.PubKey) if err != nil { return err } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 69085694b..d777d1699 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -8,12 +8,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" mmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/proxy" @@ -25,7 +26,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) var ( @@ -218,9 +218,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) { func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey() - pk1, err := cryptoenc.PubKeyToProto(pubkey1) + pk1, err := encoding.PubKeyToProto(pubkey1) assert.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) + pk2, err := encoding.PubKeyToProto(pubkey2) assert.NoError(t, err) defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} @@ -278,9 +278,9 @@ func TestUpdateValidators(t *testing.T) { pubkey2 := ed25519.GenPrivKey().PubKey() val2 := types.NewValidator(pubkey2, 20) - pk, err := cryptoenc.PubKeyToProto(pubkey1) + pk, err := encoding.PubKeyToProto(pubkey1) require.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) + pk2, err := encoding.PubKeyToProto(pubkey2) require.NoError(t, err) testCases := []struct { @@ -385,7 +385,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() - pk, err := cryptoenc.PubKeyToProto(pubkey) + pk, err := encoding.PubKeyToProto(pubkey) require.NoError(t, err) app.ValidatorUpdates = []abci.ValidatorUpdate{ {PubKey: pk, Power: 10}, @@ -442,7 +442,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) + vp, err := encoding.PubKeyToProto(state.Validators.Validators[0].PubKey) require.NoError(t, err) // Remove the only validator app.ValidatorUpdates = []abci.ValidatorUpdate{ diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index f20475633..f8ed77b32 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -11,7 +11,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" @@ -148,11 +148,11 @@ func makeHeaderPartsResponsesValPubKeyChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { - vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey) + vPbPk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } - pbPk, err := cryptoenc.PubKeyToProto(pubkey) + pbPk, err := encoding.PubKeyToProto(pubkey) if err != nil { panic(err) } @@ -181,7 +181,7 @@ func makeHeaderPartsResponsesValPowerChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { - vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey) + vPbPk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index 2601c878b..024df332c 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -6,15 +6,16 @@ import ( "testing" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestBlockIndexer(t *testing.T) { - store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events")) indexer := blockidxkv.New(store) require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index e630b07e9..d9f29b677 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -9,11 +9,11 @@ import ( "time" "github.com/adlio/schema" - _ "github.com/lib/pq" dockertest "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" indexer "github.com/tendermint/tendermint/internal/state/indexer" @@ -21,7 +21,9 @@ import ( psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" + + // Register the Postgre database driver. + _ "github.com/lib/pq" ) var psqldb *sql.DB @@ -55,7 +57,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { pool, err := setupDB(t) assert.Nil(t, err) - store := db.NewMemDB() + store := dbm.NewMemDB() eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink} assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) diff --git a/internal/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go index eda8da846..4c471b4d3 100644 --- a/internal/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -3,13 +3,14 @@ package kv import ( "context" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/state/indexer" kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) var _ indexer.EventSink = (*EventSink)(nil) diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index d59f4ea90..7d7552946 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -5,6 +5,8 @@ import ( "fmt" "testing" + dbm "github.com/tendermint/tm-db" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -13,21 +15,20 @@ import ( kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestType(t *testing.T) { - kvSink := NewEventSink(db.NewMemDB()) + kvSink := NewEventSink(dbm.NewMemDB()) assert.Equal(t, indexer.KV, kvSink.Type()) } func TestStop(t *testing.T) { - kvSink := NewEventSink(db.NewMemDB()) + kvSink := NewEventSink(dbm.NewMemDB()) assert.Nil(t, kvSink.Stop()) } func TestBlockFuncs(t *testing.T) { - store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events")) indexer := NewEventSink(store) require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ @@ -158,7 +159,7 @@ func TestBlockFuncs(t *testing.T) { } func TestTxSearchWithCancelation(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -180,7 +181,7 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - esdb := db.NewMemDB() + esdb := dbm.NewMemDB() indexer := NewEventSink(esdb) // index tx using events indexing (composite key) @@ -260,7 +261,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -282,7 +283,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index 064421f5f..c8ab2b0f2 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -10,8 +10,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - db "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" indexer "github.com/tendermint/tendermint/internal/state/indexer" @@ -21,7 +20,7 @@ import ( ) func TestTxIndex(t *testing.T) { - txIndexer := NewTxIndex(db.NewMemDB()) + txIndexer := NewTxIndex(dbm.NewMemDB()) tx := types.Tx("HELLO WORLD") txResult := &abci.TxResult{ @@ -67,7 +66,7 @@ func TestTxIndex(t *testing.T) { } func TestTxSearch(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -147,7 +146,7 @@ func TestTxSearch(t *testing.T) { } func TestTxSearchWithCancelation(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -165,7 +164,7 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) // index tx using events indexing (composite key) txResult1 := txResultWithEvents([]abci.Event{ @@ -244,7 +243,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -266,7 +265,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ @@ -339,7 +338,7 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { require.NoError(b, err) defer os.RemoveAll(dir) - store, err := db.NewDB("tx_index", "goleveldb", dir) + store, err := dbm.NewDB("tx_index", "goleveldb", dir) require.NoError(b, err) txIndexer := NewTxIndex(store) diff --git a/internal/state/state_test.go b/internal/state/state_test.go index 102b206c2..ab73bc90c 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -12,36 +12,35 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" sm "github.com/tendermint/tendermint/internal/state" - sf "github.com/tendermint/tendermint/internal/state/test/factory" + statefactory "github.com/tendermint/tendermint/internal/state/test/factory" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - config := cfg.ResetTestRoot("state_") - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + cfg := config.ResetTestRoot("state_") + dbType := dbm.BackendType(cfg.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(t, err) stateStore := sm.NewStore(stateDB) state, err := stateStore.Load() require.NoError(t, err) require.Empty(t, state) - state, err = sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err = sm.MakeGenesisStateFromFile(cfg.GenesisFile()) assert.NoError(t, err) assert.NotNil(t, state) err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } + tearDown := func(t *testing.T) { os.RemoveAll(cfg.RootDir) } return tearDown, stateDB, state } @@ -106,7 +105,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ // Build mock responses. - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) @@ -114,7 +113,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} - pbpk, err := cryptoenc.PubKeyToProto(ed25519.GenPrivKey().PubKey()) + pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey()) require.NoError(t, err) abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}} @@ -448,7 +447,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, @@ -465,7 +464,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // add a validator val2PubKey := ed25519.GenPrivKey().PubKey() val2VotingPower := int64(100) - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + fvp, err := encoding.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} @@ -562,7 +561,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ @@ -583,7 +582,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // add a validator with the same voting power as the first val2PubKey := ed25519.GenPrivKey().PubKey() - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + fvp, err := encoding.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) @@ -749,7 +748,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -769,7 +768,7 @@ func TestLargeGenesisValidator(t *testing.T) { // see: https://github.com/tendermint/tendermint/issues/2960 firstAddedValPubKey := ed25519.GenPrivKey().PubKey() firstAddedValVotingPower := int64(10) - fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey) + fvp, err := encoding.PubKeyToProto(firstAddedValPubKey) require.NoError(t, err) firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) @@ -778,7 +777,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -793,7 +792,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -816,7 +815,7 @@ func TestLargeGenesisValidator(t *testing.T) { // add 10 validators with the same voting power as the one added directly after genesis: for i := 0; i < 10; i++ { addedPubKey := ed25519.GenPrivKey().PubKey() - ap, err := cryptoenc.PubKeyToProto(addedPubKey) + ap, err := encoding.PubKeyToProto(addedPubKey) require.NoError(t, err) addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) @@ -826,7 +825,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -834,14 +833,14 @@ func TestLargeGenesisValidator(t *testing.T) { require.Equal(t, 10+2, len(state.NextValidators.Validators)) // remove genesis validator: - gp, err := cryptoenc.PubKeyToProto(genesisPubKey) + gp, err := encoding.PubKeyToProto(genesisPubKey) require.NoError(t, err) removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} abciResponses = &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } - block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -862,7 +861,7 @@ func TestLargeGenesisValidator(t *testing.T) { } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -887,7 +886,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -982,7 +981,7 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) // test we set some fields assert.Equal(t, stateVersion, block.Version) diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 939878d40..118350fff 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -7,11 +7,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" sm "github.com/tendermint/tendermint/internal/state" @@ -102,13 +101,13 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - config := cfg.ResetTestRoot("state_") - defer os.RemoveAll(config.RootDir) - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + cfg := config.ResetTestRoot("state_") + defer os.RemoveAll(cfg.RootDir) + dbType := dbm.BackendType(cfg.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(b, err) stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { b.Fatal(err) } diff --git a/internal/state/tx_filter.go b/internal/state/tx_filter.go index 61340e135..871e08ae6 100644 --- a/internal/state/tx_filter.go +++ b/internal/state/tx_filter.go @@ -1,22 +1,22 @@ package state import ( - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. -func TxPreCheck(state State) mempl.PreCheckFunc { +func TxPreCheck(state State) mempool.PreCheckFunc { maxDataBytes := types.MaxDataBytesNoEvidence( state.ConsensusParams.Block.MaxBytes, state.Validators.Size(), ) - return mempl.PreCheckMaxBytes(maxDataBytes) + return mempool.PreCheckMaxBytes(maxDataBytes) } // TxPostCheck returns a function to filter transactions after processing. // The function limits the gas wanted by a transaction to the block's maximum total gas. -func TxPostCheck(state State) mempl.PostCheckFunc { - return mempl.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) +func TxPostCheck(state State) mempool.PostCheckFunc { + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) } diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 328c67c08..d2427f35a 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" @@ -15,14 +16,13 @@ import ( memmock "github.com/tendermint/tendermint/internal/mempool/mock" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" - sf "github.com/tendermint/tendermint/internal/state/test/factory" - "github.com/tendermint/tendermint/internal/test/factory" + statefactory "github.com/tendermint/tendermint/internal/state/test/factory" + testfactory "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const validationTestsStopHeight int64 = 10 @@ -90,7 +90,7 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block := sf.MakeBlock(state, height, lastCommit) + block := statefactory.MakeBlock(state, height, lastCommit) tc.malleateBlock(block) err := blockExec.ValidateBlock(state, block) t.Logf("%s: %v", tc.name, err) @@ -107,7 +107,7 @@ func TestValidateBlockHeader(t *testing.T) { } nextHeight := validationTestsStopHeight - block := sf.MakeBlock(state, nextHeight, lastCommit) + block := statefactory.MakeBlock(state, nextHeight, lastCommit) state.InitialHeight = nextHeight + 1 err := blockExec.ValidateBlock(state, block) require.Error(t, err, "expected an error when state is ahead of block") @@ -141,7 +141,7 @@ func TestValidateBlockCommit(t *testing.T) { #2589: ensure state.LastValidators.VerifyCommit fails here */ // should be height-1 instead of height - wrongHeightVote, err := factory.MakeVote( + wrongHeightVote, err := testfactory.MakeVote( privVals[proposerAddr.String()], chainID, 1, @@ -158,7 +158,7 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, []types.CommitSig{wrongHeightVote.CommitSig()}, ) - block := sf.MakeBlock(state, height, wrongHeightCommit) + block := statefactory.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) @@ -166,7 +166,7 @@ func TestValidateBlockCommit(t *testing.T) { /* #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() */ - block = sf.MakeBlock(state, height, wrongSigsCommit) + block = statefactory.MakeBlock(state, height, wrongSigsCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, @@ -195,7 +195,7 @@ func TestValidateBlockCommit(t *testing.T) { /* wrongSigsCommit is fine except for the extra bad precommit */ - goodVote, err := factory.MakeVote( + goodVote, err := testfactory.MakeVote( privVals[proposerAddr.String()], chainID, 1, @@ -278,7 +278,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 37010986f..2ff93a023 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - proto "github.com/tendermint/tendermint/proto/tendermint/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -109,7 +109,7 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh // Respond allows the underlying process which receives requests on the // requestCh to respond with the respective light block. A nil response is used to // represent that the receiver of the request does not have a light block at that height. -func (d *Dispatcher) Respond(lb *proto.LightBlock, peer types.NodeID) error { +func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { d.mtx.Lock() defer d.mtx.Unlock() diff --git a/internal/test/factory/genesis.go b/internal/test/factory/genesis.go index 31ec1674f..d3a0a8464 100644 --- a/internal/test/factory/genesis.go +++ b/internal/test/factory/genesis.go @@ -3,13 +3,13 @@ package factory import ( "sort" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" ) func RandGenesisDoc( - config *cfg.Config, + cfg *config.Config, numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { @@ -29,7 +29,7 @@ func RandGenesisDoc( return &types.GenesisDoc{ GenesisTime: tmtime.Now(), InitialHeight: 1, - ChainID: config.ChainID(), + ChainID: cfg.ChainID(), Validators: validators, }, privValidators } diff --git a/light/client_test.go b/light/client_test.go index 291a3e5b1..c7c974ee5 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/internal/test/factory" diff --git a/light/provider/http/http.go b/light/provider/http/http.go index 810e43ece..f8bf7d29e 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/light/provider" rpcclient "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -300,11 +300,11 @@ func (p *http) noBlock(e error) error { func (p *http) parseRPCError(e *rpctypes.RPCError) error { switch { // 1) check if the error indicates that the peer doesn't have the block - case strings.Contains(e.Data, ctypes.ErrHeightNotAvailable.Error()): + case strings.Contains(e.Data, coretypes.ErrHeightNotAvailable.Error()): return p.noBlock(provider.ErrLightBlockNotFound) // 2) check if the height requested is too high - case strings.Contains(e.Data, ctypes.ErrHeightExceedsChainHead.Error()): + case strings.Contains(e.Data, coretypes.ErrHeightExceedsChainHead.Error()): return p.noBlock(provider.ErrHeightTooHigh) // 3) check if the provider closed the connection diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 2fdc177dc..436ae1b76 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -4,7 +4,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" lrpc "github.com/tendermint/tendermint/light/rpc" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" @@ -52,91 +52,91 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { } } -type rpcHealthFunc func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) +type rpcHealthFunc func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { return c.Health(ctx.Context()) } } -type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) +type rpcStatusFunc func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) // nolint: interfacer func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { return c.Status(ctx.Context()) } } -type rpcNetInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) +type rpcNetInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { return c.NetInfo(ctx.Context()) } } -type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) +type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) //nolint:lll func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) } } -type rpcGenesisFunc func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) +type rpcGenesisFunc func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { return c.Genesis(ctx.Context()) } } -type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) +type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc { - return func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { + return func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { return c.GenesisChunked(ctx.Context(), chunk) } } -type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) +type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) { return c.Block(ctx.Context(), height) } } -type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) +type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { - return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) { return c.BlockByHash(ctx.Context(), hash) } } -type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) +type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) { return c.BlockResults(ctx.Context(), height) } } -type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) +type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) { return c.Commit(ctx.Context(), height) } } -type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) +type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) func makeTxFunc(c *lrpc.Client) rpcTxFunc { - return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + return func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { return c.Tx(ctx.Context(), hash, prove) } } @@ -147,7 +147,7 @@ type rpcTxSearchFunc func( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) +) (*coretypes.ResultTxSearch, error) func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { return func( @@ -156,7 +156,7 @@ func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultTxSearch, error) { + ) (*coretypes.ResultTxSearch, error) { return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) } } @@ -167,7 +167,7 @@ type rpcBlockSearchFunc func( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) +) (*coretypes.ResultBlockSearch, error) func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { return func( @@ -176,90 +176,90 @@ func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultBlockSearch, error) { + ) (*coretypes.ResultBlockSearch, error) { return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy) } } type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage *int) (*ctypes.ResultValidators, error) + page, perPage *int) (*coretypes.ResultValidators, error) func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { return c.Validators(ctx.Context(), height, page, perPage) } } -type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) +type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { return c.DumpConsensusState(ctx.Context()) } } -type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) +type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { return c.ConsensusState(ctx.Context()) } } -type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) +type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.ConsensusParams(ctx.Context(), height) } } -type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) +type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { + return func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.UnconfirmedTxs(ctx.Context(), limit) } } -type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) +type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.NumUnconfirmedTxs(ctx.Context()) } } -type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) +type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.BroadcastTxCommit(ctx.Context(), tx) } } -type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.BroadcastTxSync(ctx.Context(), tx) } } -type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.BroadcastTxAsync(ctx.Context(), tx) } } type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, - data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) + data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, - height int64, prove bool) (*ctypes.ResultABCIQuery, error) { + height int64, prove bool) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ Height: height, @@ -268,19 +268,19 @@ func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { } } -type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) +type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { return c.ABCIInfo(ctx.Context()) } } -type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) // nolint: interfacer func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { - return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.BroadcastEvidence(ctx.Context(), ev) } } diff --git a/light/rpc/client.go b/light/rpc/client.go index 16d5a5427..4461028cd 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -16,7 +16,7 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -113,22 +113,22 @@ func (c *Client) OnStop() { } } -func (c *Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.next.Status(ctx) } -func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.next.ABCIInfo(ctx) } // ABCIQuery requests proof by default. -func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*coretypes.ResultABCIQuery, error) { //nolint:lll return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } // ABCIQueryWithOptions returns an error if opts.Prove is false. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { // always request the proof opts.Prove = true @@ -150,7 +150,7 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return nil, errors.New("no proof ops") } if resp.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -185,46 +185,46 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb } } - return &ctypes.ResultABCIQuery{Response: resp}, nil + return &coretypes.ResultABCIQuery{Response: resp}, nil } -func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.next.BroadcastTxCommit(ctx, tx) } -func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.next.BroadcastTxAsync(ctx, tx) } -func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.next.BroadcastTxSync(ctx, tx) } -func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, limit) } -func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.NumUnconfirmedTxs(ctx) } -func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.next.CheckTx(ctx, tx) } -func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.next.NetInfo(ctx) } -func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.next.DumpConsensusState(ctx) } -func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.next.ConsensusState(ctx) } -func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { res, err := c.next.ConsensusParams(ctx, height) if err != nil { return nil, err @@ -235,7 +235,7 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return nil, err } if res.BlockHeight <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -253,13 +253,13 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return res, nil } -func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.next.Health(ctx) } // BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header // returned. -func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight) if err != nil { return nil, err @@ -298,16 +298,16 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) return res, nil } -func (c *Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.next.Genesis(ctx) } -func (c *Client) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Client) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { return c.next.GenesisChunked(ctx, id) } // Block calls rpcclient#Block and then verifies the result. -func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { res, err := c.next.Block(ctx, height) if err != nil { return nil, err @@ -341,7 +341,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, } // BlockByHash calls rpcclient#BlockByHash and then verifies the result. -func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctypes.ResultBlock, error) { +func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*coretypes.ResultBlock, error) { res, err := c.next.BlockByHash(ctx, hash) if err != nil { return nil, err @@ -376,7 +376,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctype // BlockResults returns the block results for the given height. If no height is // provided, the results of the block preceding the latest are returned. -func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { var h int64 if height == nil { res, err := c.next.Status(ctx) @@ -397,7 +397,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul // Validate res. if res.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -438,7 +438,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul return res, nil } -func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { // Update the light client if we're behind and retrieve the light block at the requested height // or at the latest height if no height is provided. l, err := c.updateLightClientIfNeededTo(ctx, height) @@ -446,7 +446,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return nil, err } - return &ctypes.ResultCommit{ + return &coretypes.ResultCommit{ SignedHeader: *l.SignedHeader, CanonicalCommit: true, }, nil @@ -454,7 +454,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi // Tx calls rpcclient#Tx method and then verifies the proof if such was // requested. -func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { +func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { res, err := c.next.Tx(ctx, hash, prove) if err != nil || !prove { return res, err @@ -462,7 +462,7 @@ func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*ct // Validate res. if res.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -481,7 +481,7 @@ func (c *Client) TxSearch( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { return c.next.TxSearch(ctx, query, prove, page, perPage, orderBy) } @@ -490,7 +490,7 @@ func (c *Client) BlockSearch( query string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { return c.next.BlockSearch(ctx, query, page, perPage, orderBy) } @@ -499,7 +499,7 @@ func (c *Client) Validators( ctx context.Context, height *int64, pagePtr, perPagePtr *int, -) (*ctypes.ResultValidators, error) { +) (*coretypes.ResultValidators, error) { // Update the light client if we're behind and retrieve the light block at the // requested height or at the latest height if no height is provided. @@ -518,19 +518,19 @@ func (c *Client) Validators( skipCount := validateSkipCount(page, perPage) v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &coretypes.ResultValidators{ BlockHeight: l.Height, Validators: v, Count: len(v), Total: totalCount}, nil } -func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.next.BroadcastEvidence(ctx, ev) } func (c *Client) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { return c.next.Subscribe(ctx, subscriber, query, outCapacity...) } @@ -565,7 +565,7 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { // SubscribeWS subscribes for events using the given query and remote address as // a subscriber, but does not verify responses (UNSAFE)! // TODO: verify data -func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { return nil, err @@ -588,27 +588,27 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.Resul } }() - return &ctypes.ResultSubscribe{}, nil + return &coretypes.ResultSubscribe{}, nil } // UnsubscribeWS calls original client's Unsubscribe using remote address as a // subscriber. -func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // UnsubscribeAllWS calls original client's UnsubscribeAll using remote address // as a subscriber. -func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // XXX: Copied from rpc/core/env.go @@ -620,7 +620,7 @@ const ( func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { if perPage < 1 { - panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage)) + panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) } if pagePtr == nil { // no page parameter @@ -633,7 +633,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page) + return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page) } return page, nil diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index b373d5126..7f963eb92 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" diff --git a/node/node.go b/node/node.go index c2bf88fde..4e44a5678 100644 --- a/node/node.go +++ b/node/node.go @@ -6,19 +6,17 @@ import ( "fmt" "net" "net/http" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port "strconv" "time" - _ "github.com/lib/pq" // provide the psql db driver "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" @@ -38,6 +36,10 @@ import ( rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" + + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + + _ "github.com/lib/pq" // provide the psql db driver ) // nodeImpl is the highest level interface to a full Tendermint node. @@ -46,7 +48,7 @@ type nodeImpl struct { service.BaseService // config - config *cfg.Config + config *config.Config genesisDoc *types.GenesisDoc // initial validator set privValidator types.PrivValidator // local node's validator key @@ -69,7 +71,7 @@ type nodeImpl struct { mempool mempool.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusReactor *cs.Reactor // for participating in the consensus + consensusReactor *consensus.Reactor // for participating in the consensus pexReactor service.Service // for exchanging peer addresses evidenceReactor service.Service rpcListeners []net.Listener // rpc servers @@ -81,23 +83,23 @@ type nodeImpl struct { // newDefaultNode returns a Tendermint node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. -func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, error) { - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) +func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, error) { + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) if err != nil { - return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) + return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err) } - if config.Mode == cfg.ModeSeed { - return makeSeedNode(config, - cfg.DefaultDBProvider, + if cfg.Mode == config.ModeSeed { + return makeSeedNode(cfg, + config.DefaultDBProvider, nodeKey, - defaultGenesisDocProviderFunc(config), + defaultGenesisDocProviderFunc(cfg), logger, ) } var pval *privval.FilePV - if config.Mode == cfg.ModeValidator { - pval, err = privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + if cfg.Mode == config.ModeValidator { + pval, err = privval.LoadOrGenFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) if err != nil { return nil, err } @@ -105,27 +107,27 @@ func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, err pval = nil } - appClient, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - return makeNode(config, + appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + return makeNode(cfg, pval, nodeKey, appClient, - defaultGenesisDocProviderFunc(config), - cfg.DefaultDBProvider, + defaultGenesisDocProviderFunc(cfg), + config.DefaultDBProvider, logger, ) } // makeNode returns a new, ready to go, Tendermint Node. -func makeNode(config *cfg.Config, +func makeNode(cfg *config.Config, privValidator types.PrivValidator, nodeKey types.NodeKey, clientCreator abciclient.Creator, genesisDocProvider genesisDocProvider, - dbProvider cfg.DBProvider, + dbProvider config.DBProvider, logger log.Logger) (service.Service, error) { - blockStore, stateDB, err := initDBs(config, dbProvider) + blockStore, stateDB, err := initDBs(cfg, dbProvider) if err != nil { return nil, err } @@ -161,31 +163,31 @@ func makeNode(config *cfg.Config, return nil, err } - indexerService, eventSinks, err := createAndStartIndexerService(config, dbProvider, eventBus, logger, genDoc.ChainID) + indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) if err != nil { return nil, err } // If an address is provided, listen on the socket for a connection from an // external signing process. - if config.PrivValidator.ListenAddr != "" { - protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr) + if cfg.PrivValidator.ListenAddr != "" { + protocol, _ := tmnet.ProtocolAndAddress(cfg.PrivValidator.ListenAddr) // FIXME: we should start services inside OnStart switch protocol { case "grpc": - privValidator, err = createAndStartPrivValidatorGRPCClient(config, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, logger) if err != nil { return nil, fmt.Errorf("error with private validator grpc client: %w", err) } default: - privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidator.ListenAddr, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, logger) if err != nil { return nil, fmt.Errorf("error with private validator socket client: %w", err) } } } var pubKey crypto.PubKey - if config.Mode == cfg.ModeValidator { + if cfg.Mode == config.ModeValidator { pubKey, err = privValidator.GetPubKey(context.TODO()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) @@ -196,7 +198,7 @@ func makeNode(config *cfg.Config, } // Determine whether we should attempt state sync. - stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + stateSync := cfg.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) if stateSync && state.LastBlockHeight > 0 { logger.Info("Found local state with non-zero height, skipping state sync") stateSync = false @@ -221,43 +223,43 @@ func makeNode(config *cfg.Config, // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - blockSync := config.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey) + blockSync := cfg.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey) - logNodeStartupInfo(state, pubKey, logger, consensusLogger, config.Mode) + logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. // TODO: Use a persistent peer database. - nodeInfo, err := makeNodeInfo(config, nodeKey, eventSinks, genDoc, state) + nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state) if err != nil { return nil, err } p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, config) + transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) + peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID) if err != nil { return nil, fmt.Errorf("failed to create peer manager: %w", err) } nodeMetrics := - defaultMetricsProvider(config.Instrumentation)(genDoc.ChainID) + defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(config, proxyApp)) + peerManager, transport, getRouterConfig(cfg, proxyApp)) if err != nil { return nil, fmt.Errorf("failed to create router: %w", err) } mpReactorShim, mpReactor, mp, err := createMempoolReactor( - config, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, + cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { return nil, err } evReactorShim, evReactor, evPool, err := createEvidenceReactor( - config, dbProvider, stateDB, blockStore, peerManager, router, logger, + cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, ) if err != nil { return nil, err @@ -275,16 +277,16 @@ func makeNode(config *cfg.Config, ) csReactorShim, csReactor, csState := createConsensusReactor( - config, state, blockExec, blockStore, mp, evPool, - privValidator, nodeMetrics.cs, stateSync || blockSync, eventBus, + cfg, state, blockExec, blockStore, mp, evPool, + privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, peerManager, router, consensusLogger, ) // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. bcReactorShim, bcReactor, err := createBlockchainReactor( - logger, config, state, blockExec, blockStore, csReactor, - peerManager, router, blockSync && !stateSync, nodeMetrics.cs, + logger, cfg, state, blockExec, blockStore, csReactor, + peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, ) if err != nil { return nil, fmt.Errorf("could not create blockchain reactor: %w", err) @@ -301,9 +303,9 @@ func makeNode(config *cfg.Config, // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { - nodeMetrics.cs.StateSyncing.Set(1) + nodeMetrics.consensus.StateSyncing.Set(1) } else if blockSync { - nodeMetrics.cs.BlockSyncing.Set(1) + nodeMetrics.consensus.BlockSyncing.Set(1) } // Set up state sync reactor, and schedule a sync if requested. @@ -320,7 +322,7 @@ func makeNode(config *cfg.Config, stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { channels = getChannelsFromShim(stateSyncReactorShim) peerUpdates = stateSyncReactorShim.PeerUpdates } else { @@ -331,7 +333,7 @@ func makeNode(config *cfg.Config, stateSyncReactor = statesync.NewReactor( genDoc.ChainID, genDoc.InitialHeight, - *config.StateSync, + *cfg.StateSync, stateSyncReactorShim.Logger, proxyApp.Snapshot(), proxyApp.Query(), @@ -342,7 +344,7 @@ func makeNode(config *cfg.Config, peerUpdates, stateStore, blockStore, - config.StateSync.TempDir, + cfg.StateSync.TempDir, nodeMetrics.statesync, ) @@ -378,46 +380,46 @@ func makeNode(config *cfg.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { // setup Transport and Switch sw = createSwitch( - config, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch, + cfg, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch, stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, ) - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) if err != nil { return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) } - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) if err != nil { return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) } - addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) if err != nil { return nil, fmt.Errorf("could not create addrbook: %w", err) } - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) } else { addrBook = nil - pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) + pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) if err != nil { return nil, err } } - if config.RPC.PprofListenAddress != "" { + if cfg.RPC.PprofListenAddress != "" { go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) }() } node := &nodeImpl{ - config: config, + config: cfg, genesisDoc: genDoc, privValidator: privValidator, @@ -452,7 +454,7 @@ func makeNode(config *cfg.Config, ConsensusState: csState, ConsensusReactor: csReactor, - BlockSyncReactor: bcReactor.(cs.BlockSyncReactor), + BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), P2PPeers: sw, PeerManager: peerManager, @@ -462,7 +464,7 @@ func makeNode(config *cfg.Config, EventBus: eventBus, Mempool: mp, Logger: logger.With("module", "rpc"), - Config: *config.RPC, + Config: *cfg.RPC, }, } @@ -485,8 +487,8 @@ func makeNode(config *cfg.Config, } // makeSeedNode returns a new seed node, containing only p2p, pex reactor -func makeSeedNode(config *cfg.Config, - dbProvider cfg.DBProvider, +func makeSeedNode(cfg *config.Config, + dbProvider config.DBProvider, nodeKey types.NodeKey, genesisDocProvider genesisDocProvider, logger log.Logger, @@ -502,23 +504,23 @@ func makeSeedNode(config *cfg.Config, return nil, err } - nodeInfo, err := makeSeedNodeInfo(config, nodeKey, genDoc, state) + nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) if err != nil { return nil, err } // Setup Transport and Switch. - p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID) + p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, config) + transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) + peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID) if err != nil { return nil, fmt.Errorf("failed to create peer manager: %w", err) } router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(config, nil)) + peerManager, transport, getRouterConfig(cfg, nil)) if err != nil { return nil, fmt.Errorf("failed to create router: %w", err) } @@ -536,44 +538,44 @@ func makeSeedNode(config *cfg.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { sw = createSwitch( - config, transport, p2pMetrics, nil, nil, + cfg, transport, p2pMetrics, nil, nil, nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, ) - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) if err != nil { return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) } - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) if err != nil { return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) } - addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) if err != nil { return nil, fmt.Errorf("could not create addrbook: %w", err) } - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) } else { - pexReactor, err = createPEXReactorV2(config, logger, peerManager, router) + pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) if err != nil { return nil, err } } - if config.RPC.PprofListenAddress != "" { + if cfg.RPC.PprofListenAddress != "" { go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) }() } node := &nodeImpl{ - config: config, + config: cfg, genesisDoc: genDoc, transport: transport, @@ -602,7 +604,7 @@ func (n *nodeImpl) OnStart() error { // Start the RPC server before the P2P server // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" && n.config.Mode != cfg.ModeSeed { + if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed { listeners, err := n.startRPC() if err != nil { return err @@ -637,8 +639,8 @@ func (n *nodeImpl) OnStart() error { return err } - if n.config.Mode != cfg.ModeSeed { - if n.config.BlockSync.Version == cfg.BlockSyncV0 { + if n.config.Mode != config.ModeSeed { + if n.config.BlockSync.Version == config.BlockSyncV0 { if err := n.bcReactor.Start(); err != nil { return err } @@ -679,7 +681,7 @@ func (n *nodeImpl) OnStart() error { // TODO: We shouldn't run state sync if we already have state that has a // LastBlockHeight that is not InitialHeight if n.stateSync { - bcR, ok := n.bcReactor.(cs.BlockSyncReactor) + bcR, ok := n.bcReactor.(consensus.BlockSyncReactor) if !ok { return fmt.Errorf("this blockchain reactor does not support switching from state sync") } @@ -758,9 +760,9 @@ func (n *nodeImpl) OnStop() { n.Logger.Error("Error closing indexerService", "err", err) } - if n.config.Mode != cfg.ModeSeed { + if n.config.Mode != config.ModeSeed { // now stop the reactors - if n.config.BlockSync.Version == cfg.BlockSyncV0 { + if n.config.BlockSync.Version == config.BlockSyncV0 { // Stop the real blockchain reactor separately since the switch uses the shim. if err := n.bcReactor.Stop(); err != nil { n.Logger.Error("failed to stop the blockchain reactor", "err", err) @@ -831,7 +833,7 @@ func (n *nodeImpl) OnStop() { } func (n *nodeImpl) startRPC() ([]net.Listener, error) { - if n.config.Mode == cfg.ModeValidator { + if n.config.Mode == config.ModeValidator { pubKey, err := n.privValidator.GetPubKey(context.TODO()) if pubKey == nil || err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) @@ -849,15 +851,15 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { n.rpcEnv.AddUnsafe(routes) } - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = n.config.RPC.MaxOpenConnections // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second } // we may expose the rpc over both a unix and tcp socket @@ -873,14 +875,14 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) } }), - rpcserver.ReadLimit(config.MaxBodyBytes), + rpcserver.ReadLimit(cfg.MaxBodyBytes), ) wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) listener, err := rpcserver.Listen( listenAddr, - config.MaxOpenConnections, + cfg.MaxOpenConnections, ) if err != nil { return nil, err @@ -903,7 +905,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { n.config.RPC.CertFile(), n.config.RPC.KeyFile(), rpcLogger, - config, + cfg, ); err != nil { n.Logger.Error("Error serving server with TLS", "err", err) } @@ -914,7 +916,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { listener, rootHandler, rpcLogger, - config, + cfg, ); err != nil { n.Logger.Error("Error serving server", "err", err) } @@ -927,18 +929,18 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { // we expose a simplified api over grpc for convenience to app devs grpcListenAddr := n.config.RPC.GRPCListenAddress if grpcListenAddr != "" { - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections - config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections + cfg.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second } - listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections) + listener, err := rpcserver.Listen(grpcListenAddr, cfg.MaxOpenConnections) if err != nil { return nil, err } @@ -977,7 +979,7 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { } // ConsensusReactor returns the Node's ConsensusReactor. -func (n *nodeImpl) ConsensusReactor() *cs.Reactor { +func (n *nodeImpl) ConsensusReactor() *consensus.Reactor { return n.consensusReactor } @@ -1031,14 +1033,14 @@ type genesisDocProvider func() (*types.GenesisDoc, error) // defaultGenesisDocProviderFunc returns a GenesisDocProvider that loads // the GenesisDoc from the config.GenesisFile() on the filesystem. -func defaultGenesisDocProviderFunc(config *cfg.Config) genesisDocProvider { +func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider { return func() (*types.GenesisDoc, error) { - return types.GenesisDocFromFile(config.GenesisFile()) + return types.GenesisDocFromFile(cfg.GenesisFile()) } } type nodeMetrics struct { - cs *cs.Metrics + consensus *consensus.Metrics p2p *p2p.Metrics mempool *mempool.Metrics state *sm.Metrics @@ -1050,19 +1052,19 @@ type metricsProvider func(chainID string) *nodeMetrics // defaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. -func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider { +func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { return func(chainID string) *nodeMetrics { - if config.Prometheus { + if cfg.Prometheus { return &nodeMetrics{ - cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), - mempool.PrometheusMetrics(config.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(config.Namespace, "chain_id", chainID), - statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID), + consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), } } return &nodeMetrics{ - cs.NopMetrics(), + consensus.NopMetrics(), p2p.NopMetrics(), mempool.NopMetrics(), sm.NopMetrics(), @@ -1130,15 +1132,15 @@ func createAndStartPrivValidatorSocketClient( } func createAndStartPrivValidatorGRPCClient( - config *cfg.Config, + cfg *config.Config, chainID string, logger log.Logger, ) (types.PrivValidator, error) { pvsc, err := tmgrpc.DialRemoteSigner( - config.PrivValidator, + cfg.PrivValidator, chainID, logger, - config.Instrumentation.Prometheus, + cfg.Instrumentation.Prometheus, ) if err != nil { return nil, fmt.Errorf("failed to start private validator: %w", err) @@ -1153,7 +1155,7 @@ func createAndStartPrivValidatorGRPCClient( return pvsc, nil } -func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOptions { +func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions { opts := p2p.RouterOptions{ QueueType: conf.P2P.QueueType, } diff --git a/node/node_test.go b/node/node_test.go index aae7f8008..a940fef54 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -13,12 +13,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" @@ -38,12 +37,12 @@ import ( ) func TestNodeStartStop(t *testing.T) { - config := cfg.ResetTestRoot("node_node_test") + cfg := config.ResetTestRoot("node_node_test") - defer os.RemoveAll(config.RootDir) + defer os.RemoveAll(cfg.RootDir) // create & start node - ns, err := newDefaultNode(config, log.TestingLogger()) + ns, err := newDefaultNode(cfg, log.TestingLogger()) require.NoError(t, err) require.NoError(t, ns.Start()) @@ -81,7 +80,7 @@ func TestNodeStartStop(t *testing.T) { } } -func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl { +func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { t.Helper() ns, err := newDefaultNode(conf, logger) require.NoError(t, err) @@ -92,12 +91,12 @@ func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl { } func TestNodeDelayedStart(t *testing.T) { - config := cfg.ResetTestRoot("node_delayed_start_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_delayed_start_test") + defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() // create & start node - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) require.NoError(t, n.Start()) @@ -108,11 +107,11 @@ func TestNodeDelayedStart(t *testing.T) { } func TestNodeSetAppVersion(t *testing.T) { - config := cfg.ResetTestRoot("node_app_version_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_app_version_test") + defer os.RemoveAll(cfg.RootDir) // create node - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) // default config uses the kvstore app var appVersion uint64 = kvstore.ProtocolVersion @@ -129,9 +128,9 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = addr + cfg := config.ResetTestRoot("node_priv_val_tcp_test") + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) dialerEndpoint := privval.NewSignerDialerEndpoint( @@ -142,7 +141,7 @@ func TestNodeSetPrivValTCP(t *testing.T) { signerServer := privval.NewSignerServer( dialerEndpoint, - config.ChainID(), + cfg.ChainID(), types.NewMockPV(), ) @@ -154,7 +153,7 @@ func TestNodeSetPrivValTCP(t *testing.T) { }() defer signerServer.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } @@ -162,11 +161,11 @@ func TestNodeSetPrivValTCP(t *testing.T) { func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { addrNoPrefix := testFreeAddr(t) - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = addrNoPrefix + cfg := config.ResetTestRoot("node_priv_val_tcp_test") + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = addrNoPrefix - _, err := newDefaultNode(config, log.TestingLogger()) + _, err := newDefaultNode(cfg, log.TestingLogger()) assert.Error(t, err) } @@ -174,9 +173,9 @@ func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = "unix://" + tmpfile + cfg := config.ResetTestRoot("node_priv_val_tcp_test") + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = "unix://" + tmpfile dialer := privval.DialUnixFn(tmpfile) dialerEndpoint := privval.NewSignerDialerEndpoint( @@ -187,7 +186,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { pvsc := privval.NewSignerServer( dialerEndpoint, - config.ChainID(), + cfg.ChainID(), types.NewMockPV(), ) @@ -196,7 +195,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { require.NoError(t, err) }() defer pvsc.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } @@ -212,8 +211,8 @@ func testFreeAddr(t *testing.T) string { // create a proposal block using real and full // mempool and evidence pool and validate it. func TestCreateProposalBlock(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() @@ -233,7 +232,7 @@ func TestCreateProposalBlock(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(mempool.NopMetrics()), @@ -304,8 +303,8 @@ func TestCreateProposalBlock(t *testing.T) { } func TestMaxTxsProposalBlockSize(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() @@ -325,7 +324,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { // Make Mempool mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(mempool.NopMetrics()), @@ -366,8 +365,8 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() @@ -385,7 +384,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // Make Mempool mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(mempool.NopMetrics()), @@ -481,17 +480,17 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") - config.Mode = cfg.ModeSeed - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_new_node_custom_reactors_test") + cfg.Mode = config.ModeSeed + defer os.RemoveAll(cfg.RootDir) - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) require.NoError(t, err) - ns, err := makeSeedNode(config, - cfg.DefaultDBProvider, + ns, err := makeSeedNode(cfg, + config.DefaultDBProvider, nodeKey, - defaultGenesisDocProviderFunc(config), + defaultGenesisDocProviderFunc(cfg), log.TestingLogger(), ) require.NoError(t, err) @@ -505,68 +504,68 @@ func TestNodeNewSeedNode(t *testing.T) { } func TestNodeSetEventSink(t *testing.T) { - config := cfg.ResetTestRoot("node_app_version_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_app_version_test") + defer os.RemoveAll(cfg.RootDir) logger := log.TestingLogger() - setupTest := func(t *testing.T, conf *cfg.Config) []indexer.EventSink { + setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { eventBus, err := createAndStartEventBus(logger) require.NoError(t, err) - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - indexService, eventSinks, err := createAndStartIndexerService(config, - cfg.DefaultDBProvider, eventBus, logger, genDoc.ChainID) + indexService, eventSinks, err := createAndStartIndexerService(cfg, + config.DefaultDBProvider, eventBus, logger, genDoc.ChainID) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) return eventSinks } - eventSinks := setupTest(t, config) + eventSinks := setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.KV, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"null"} - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"null"} + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"null", "kv"} - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"null", "kv"} + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"kvv"} + ns, err := newDefaultNode(cfg, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("unsupported event sink type"), err) - config.TxIndex.Indexer = []string{} - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{} + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"psql"} + ns, err = newDefaultNode(cfg, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) var psqlConn = "test" - config.TxIndex.Indexer = []string{"psql"} - config.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"psql"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"psql", "kv"} - config.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"psql", "kv"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) assert.Equal(t, 2, len(eventSinks)) // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. @@ -577,9 +576,9 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - config.TxIndex.Indexer = []string{"kv", "psql"} - config.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, config) + cfg.TxIndex.Indexer = []string{"kv", "psql"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) assert.Equal(t, 2, len(eventSinks)) if eventSinks[0].Type() == indexer.KV { @@ -590,15 +589,15 @@ func TestNodeSetEventSink(t *testing.T) { } var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") - config.TxIndex.Indexer = []string{"psql", "kv", "Kv"} - config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} + cfg.TxIndex.PsqlConn = psqlConn + _, err = newDefaultNode(cfg, logger) require.Error(t, err) assert.Equal(t, e, err) - config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} - config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, logger) + cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} + cfg.TxIndex.PsqlConn = psqlConn + _, err = newDefaultNode(cfg, logger) require.Error(t, err) assert.Equal(t, e, err) } @@ -648,13 +647,13 @@ func loadStatefromGenesis(t *testing.T) sm.State { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - config := cfg.ResetTestRoot("load_state_from_genesis") + cfg := config.ResetTestRoot("load_state_from_genesis") loadedState, err := stateStore.Load() require.NoError(t, err) require.True(t, loadedState.IsEmpty()) - genDoc, _ := factory.RandGenesisDoc(config, 0, false, 10) + genDoc, _ := factory.RandGenesisDoc(cfg, 0, false, 10) state, err := loadStateFromDBOrGenesisDocProvider( stateStore, diff --git a/node/setup.go b/node/setup.go index 6d403b0b0..63fb4c739 100644 --- a/node/setup.go +++ b/node/setup.go @@ -7,18 +7,17 @@ import ( "fmt" "math" "net" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port "time" dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" @@ -37,17 +36,19 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" + + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port ) -func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { +func initDBs(cfg *config.Config, dbProvider config.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { //nolint:lll var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config}) + blockStoreDB, err = dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) if err != nil { return } blockStore = store.NewBlockStore(blockStoreDB) - stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config}) + stateDB, err = dbProvider(&config.DBContext{ID: "state", Config: cfg}) return } @@ -70,13 +71,13 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { } func createAndStartIndexerService( - config *cfg.Config, - dbProvider cfg.DBProvider, + cfg *config.Config, + dbProvider config.DBProvider, eventBus *types.EventBus, logger log.Logger, chainID string, ) (*indexer.Service, []indexer.EventSink, error) { - eventSinks, err := sink.EventSinksFromConfig(config, dbProvider, chainID) + eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID) if err != nil { return nil, nil, err } @@ -100,7 +101,7 @@ func doHandshake( proxyApp proxy.AppConns, consensusLogger log.Logger) error { - handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) + handshaker := consensus.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) if err := handshaker.Handshake(proxyApp); err != nil { @@ -126,9 +127,9 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL ) } switch { - case mode == cfg.ModeFull: + case mode == config.ModeFull: consensusLogger.Info("This node is a fullnode") - case mode == cfg.ModeValidator: + case mode == config.ModeValidator: addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { @@ -149,7 +150,7 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { } func createMempoolReactor( - config *cfg.Config, + cfg *config.Config, proxyApp proxy.AppConns, state sm.State, memplMetrics *mempool.Metrics, @@ -158,8 +159,8 @@ func createMempoolReactor( logger log.Logger, ) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { - logger = logger.With("module", "mempool", "version", config.Mempool.Version) - channelShims := mempoolv0.GetChannelShims(config.Mempool) + logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) + channelShims := mempoolv0.GetChannelShims(cfg.Mempool) reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) var ( @@ -167,7 +168,7 @@ func createMempoolReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates } else { @@ -175,10 +176,10 @@ func createMempoolReactor( peerUpdates = peerManager.Subscribe() } - switch config.Mempool.Version { - case cfg.MempoolV0: + switch cfg.Mempool.Version { + case config.MempoolV0: mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(memplMetrics), @@ -190,23 +191,23 @@ func createMempoolReactor( reactor := mempoolv0.NewReactor( logger, - config.Mempool, + cfg.Mempool, peerManager, mp, channels[mempool.MempoolChannel], peerUpdates, ) - if config.Consensus.WaitForTxs() { + if cfg.Consensus.WaitForTxs() { mp.EnableTxsAvailable() } return reactorShim, reactor, mp, nil - case cfg.MempoolV1: + case config.MempoolV1: mp := mempoolv1.NewTxMempool( logger, - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv1.WithMetrics(memplMetrics), @@ -216,34 +217,34 @@ func createMempoolReactor( reactor := mempoolv1.NewReactor( logger, - config.Mempool, + cfg.Mempool, peerManager, mp, channels[mempool.MempoolChannel], peerUpdates, ) - if config.Consensus.WaitForTxs() { + if cfg.Consensus.WaitForTxs() { mp.EnableTxsAvailable() } return reactorShim, reactor, mp, nil default: - return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version) + return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) } } func createEvidenceReactor( - config *cfg.Config, - dbProvider cfg.DBProvider, + cfg *config.Config, + dbProvider config.DBProvider, stateDB dbm.DB, blockStore *store.BlockStore, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, ) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) { - evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config}) + evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { return nil, nil, nil, err } @@ -261,7 +262,7 @@ func createEvidenceReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates } else { @@ -281,21 +282,21 @@ func createEvidenceReactor( func createBlockchainReactor( logger log.Logger, - config *cfg.Config, + cfg *config.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, - csReactor *cs.Reactor, + csReactor *consensus.Reactor, peerManager *p2p.PeerManager, router *p2p.Router, blockSync bool, - metrics *cs.Metrics, + metrics *consensus.Metrics, ) (*p2p.ReactorShim, service.Service, error) { logger = logger.With("module", "blockchain") - switch config.BlockSync.Version { - case cfg.BlockSyncV0: + switch cfg.BlockSync.Version { + case config.BlockSyncV0: reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) var ( @@ -303,7 +304,7 @@ func createBlockchainReactor( peerUpdates *p2p.PeerUpdates ) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates } else { @@ -322,69 +323,69 @@ func createBlockchainReactor( return reactorShim, reactor, nil - case cfg.BlockSyncV2: + case config.BlockSyncV2: return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") default: - return nil, nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version) + return nil, nil, fmt.Errorf("unknown block sync version %s", cfg.BlockSync.Version) } } func createConsensusReactor( - config *cfg.Config, + cfg *config.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mp mempool.Mempool, evidencePool *evidence.Pool, privValidator types.PrivValidator, - csMetrics *cs.Metrics, + csMetrics *consensus.Metrics, waitSync bool, eventBus *types.EventBus, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *cs.Reactor, *cs.State) { +) (*p2p.ReactorShim, *consensus.Reactor, *consensus.State) { - consensusState := cs.NewState( - config.Consensus, + consensusState := consensus.NewState( + cfg.Consensus, state.Copy(), blockExec, blockStore, mp, evidencePool, - cs.StateMetrics(csMetrics), + consensus.StateMetrics(csMetrics), ) consensusState.SetLogger(logger) - if privValidator != nil && config.Mode == cfg.ModeValidator { + if privValidator != nil && cfg.Mode == config.ModeValidator { consensusState.SetPrivValidator(privValidator) } - reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims) + reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", consensus.ChannelShims) var ( channels map[p2p.ChannelID]*p2p.Channel peerUpdates *p2p.PeerUpdates ) - if config.P2P.UseLegacy { + if cfg.P2P.UseLegacy { channels = getChannelsFromShim(reactorShim) peerUpdates = reactorShim.PeerUpdates } else { - channels = makeChannelsFromShims(router, cs.ChannelShims) + channels = makeChannelsFromShims(router, consensus.ChannelShims) peerUpdates = peerManager.Subscribe() } - reactor := cs.NewReactor( + reactor := consensus.NewReactor( logger, consensusState, - channels[cs.StateChannel], - channels[cs.DataChannel], - channels[cs.VoteChannel], - channels[cs.VoteSetBitsChannel], + channels[consensus.StateChannel], + channels[consensus.DataChannel], + channels[consensus.VoteChannel], + channels[consensus.VoteSetBitsChannel], peerUpdates, waitSync, - cs.ReactorMetrics(csMetrics), + consensus.ReactorMetrics(csMetrics), ) // Services which will be publishing and/or subscribing for messages (events) @@ -394,20 +395,20 @@ func createConsensusReactor( return reactorShim, reactor, consensusState } -func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport { +func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { return p2p.NewMConnTransport( - logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{}, + logger, p2p.MConnConfig(cfg.P2P), []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers + - len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")), + MaxAcceptedConnections: uint32(cfg.P2P.MaxNumInboundPeers + + len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")), ), }, ) } func createPeerManager( - config *cfg.Config, - dbProvider cfg.DBProvider, + cfg *config.Config, + dbProvider config.DBProvider, p2pLogger log.Logger, nodeID types.NodeID, ) (*p2p.PeerManager, error) { @@ -415,16 +416,16 @@ func createPeerManager( var maxConns uint16 switch { - case config.P2P.MaxConnections > 0: - maxConns = config.P2P.MaxConnections + case cfg.P2P.MaxConnections > 0: + maxConns = cfg.P2P.MaxConnections - case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0: - x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers + case cfg.P2P.MaxNumInboundPeers > 0 && cfg.P2P.MaxNumOutboundPeers > 0: + x := cfg.P2P.MaxNumInboundPeers + cfg.P2P.MaxNumOutboundPeers if x > math.MaxUint16 { return nil, fmt.Errorf( "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)", - config.P2P.MaxNumInboundPeers, - config.P2P.MaxNumOutboundPeers, + cfg.P2P.MaxNumInboundPeers, + cfg.P2P.MaxNumOutboundPeers, math.MaxUint16, ) } @@ -436,7 +437,7 @@ func createPeerManager( } privatePeerIDs := make(map[types.NodeID]struct{}) - for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") { + for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { privatePeerIDs[types.NodeID(id)] = struct{}{} } @@ -452,7 +453,7 @@ func createPeerManager( } peers := []p2p.NodeAddress{} - for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { return nil, fmt.Errorf("invalid peer address %q: %w", p, err) @@ -462,7 +463,7 @@ func createPeerManager( options.PersistentPeers = append(options.PersistentPeers, address.NodeID) } - for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.BootstrapPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { return nil, fmt.Errorf("invalid peer address %q: %w", p, err) @@ -470,7 +471,7 @@ func createPeerManager( peers = append(peers, address) } - peerDB, err := dbProvider(&cfg.DBContext{ID: "peerstore", Config: config}) + peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg}) if err != nil { return nil, err } @@ -511,7 +512,7 @@ func createRouter( } func createSwitch( - config *cfg.Config, + cfg *config.Config, transport p2p.Transport, p2pMetrics *p2p.Metrics, mempoolReactor *p2p.ReactorShim, @@ -530,13 +531,13 @@ func createSwitch( peerFilters = []p2p.PeerFilterFunc{} ) - if !config.P2P.AllowDuplicateIP { + if !cfg.P2P.AllowDuplicateIP { connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) } // Filter peers by addr or pubkey with an ABCI query. // If the query return code is OK, add peer. - if config.FilterPeers { + if cfg.FilterPeers { connFilters = append( connFilters, // ABCI query for address filtering. @@ -575,7 +576,7 @@ func createSwitch( } sw := p2p.NewSwitch( - config.P2P, + cfg.P2P, transport, p2p.WithMetrics(p2pMetrics), p2p.SwitchPeerFilters(peerFilters...), @@ -583,7 +584,7 @@ func createSwitch( ) sw.SetLogger(p2pLogger) - if config.Mode != cfg.ModeSeed { + if cfg.Mode != config.ModeSeed { sw.AddReactor("MEMPOOL", mempoolReactor) sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) @@ -594,26 +595,26 @@ func createSwitch( sw.SetNodeInfo(nodeInfo) sw.SetNodeKey(nodeKey) - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile()) + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", cfg.NodeKeyFile()) return sw } -func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, +func createAddrBookAndSetOnSwitch(cfg *config.Config, sw *p2p.Switch, p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + addrBook := pex.NewAddrBook(cfg.P2P.AddrBookFile(), cfg.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", cfg.P2P.AddrBookFile())) // Add ourselves to addrbook to prevent dialing ourselves - if config.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress)) + if cfg.P2P.ExternalAddress != "" { + addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ExternalAddress)) if err != nil { return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) } addrBook.AddOurAddress(addr) } - if config.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress)) + if cfg.P2P.ListenAddress != "" { + addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) if err != nil { return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) } @@ -625,19 +626,19 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, return addrBook, nil } -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, +func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, cfg *config.Config, sw *p2p.Switch, logger log.Logger) *pex.Reactor { reactorConfig := &pex.ReactorConfig{ - Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.Mode == cfg.ModeSeed, + Seeds: tmstrings.SplitAndTrimEmpty(cfg.P2P.Seeds, ",", " "), + SeedMode: cfg.Mode == config.ModeSeed, // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 // blocks assuming 10s blocks ~ 28 hours. // TODO (melekes): make it dynamic based on the actual block latencies // from the live network. // https://github.com/tendermint/tendermint/issues/3523 SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, + PersistentPeersMaxDialPeriod: cfg.P2P.PersistentPeersMaxDialPeriod, } // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewReactor(addrBook, reactorConfig) @@ -647,7 +648,7 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, } func createPEXReactorV2( - config *cfg.Config, + cfg *config.Config, logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, @@ -663,7 +664,7 @@ func createPEXReactorV2( } func makeNodeInfo( - config *cfg.Config, + cfg *config.Config, nodeKey types.NodeKey, eventSinks []indexer.EventSink, genDoc *types.GenesisDoc, @@ -676,15 +677,15 @@ func makeNodeInfo( } var bcChannel byte - switch config.BlockSync.Version { - case cfg.BlockSyncV0: + switch cfg.BlockSync.Version { + case config.BlockSyncV0: bcChannel = byte(bcv0.BlockSyncChannel) - case cfg.BlockSyncV2: + case config.BlockSyncV2: bcChannel = bcv2.BlockchainChannel default: - return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version) + return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", cfg.BlockSync.Version) } nodeInfo := types.NodeInfo{ @@ -698,10 +699,10 @@ func makeNodeInfo( Version: version.TMVersion, Channels: []byte{ bcChannel, - byte(cs.StateChannel), - byte(cs.DataChannel), - byte(cs.VoteChannel), - byte(cs.VoteSetBitsChannel), + byte(consensus.StateChannel), + byte(consensus.DataChannel), + byte(consensus.VoteChannel), + byte(consensus.VoteSetBitsChannel), byte(mempool.MempoolChannel), byte(evidence.EvidenceChannel), byte(statesync.SnapshotChannel), @@ -709,21 +710,21 @@ func makeNodeInfo( byte(statesync.LightBlockChannel), byte(statesync.ParamsChannel), }, - Moniker: config.Moniker, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: txIndexerStatus, - RPCAddress: config.RPC.ListenAddress, + RPCAddress: cfg.RPC.ListenAddress, }, } - if config.P2P.PexReactor { + if cfg.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := config.P2P.ExternalAddress + lAddr := cfg.P2P.ExternalAddress if lAddr == "" { - lAddr = config.P2P.ListenAddress + lAddr = cfg.P2P.ListenAddress } nodeInfo.ListenAddr = lAddr @@ -733,7 +734,7 @@ func makeNodeInfo( } func makeSeedNodeInfo( - config *cfg.Config, + cfg *config.Config, nodeKey types.NodeKey, genDoc *types.GenesisDoc, state sm.State, @@ -748,21 +749,21 @@ func makeSeedNodeInfo( Network: genDoc.ChainID, Version: version.TMVersion, Channels: []byte{}, - Moniker: config.Moniker, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: "off", - RPCAddress: config.RPC.ListenAddress, + RPCAddress: cfg.RPC.ListenAddress, }, } - if config.P2P.PexReactor { + if cfg.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := config.P2P.ExternalAddress + lAddr := cfg.P2P.ExternalAddress if lAddr == "" { - lAddr = config.P2P.ListenAddress + lAddr = cfg.P2P.ListenAddress } nodeInfo.ListenAddr = lAddr diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 77f3930aa..f4c0b7d99 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc/status" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -62,7 +62,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, errStatus.Err() } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := encoding.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } diff --git a/privval/grpc/server.go b/privval/grpc/server.go index f5c434b1b..13e0c9073 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc/status" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -44,7 +44,7 @@ func (ss *SignerServer) GetPubKey(ctx context.Context, req *privvalproto.PubKeyR return nil, status.Errorf(codes.NotFound, "error getting pubkey: %v", err) } - pk, err := cryptoenc.PubKeyToProto(pubKey) + pk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, status.Errorf(codes.Internal, "error transitioning pubkey to proto: %v", err) } diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 62647542c..413acca8e 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -11,7 +11,7 @@ import ( grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" grpc "google.golang.org/grpc" @@ -88,15 +88,15 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { // DialRemoteSigner is a generalized function to dial the gRPC server. func DialRemoteSigner( - config *cfg.PrivValidatorConfig, + cfg *config.PrivValidatorConfig, chainID string, logger log.Logger, usePrometheus bool, ) (*SignerClient, error) { var transportSecurity grpc.DialOption - if config.AreSecurityOptionsPresent() { - transportSecurity = GenerateTLS(config.ClientCertificateFile(), - config.ClientKeyFile(), config.RootCAFile(), logger) + if cfg.AreSecurityOptionsPresent() { + transportSecurity = GenerateTLS(cfg.ClientCertificateFile(), + cfg.ClientKeyFile(), cfg.RootCAFile(), logger) } else { transportSecurity = grpc.WithInsecure() logger.Info("Using an insecure gRPC connection!") @@ -111,7 +111,7 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) ctx := context.Background() - _, address := tmnet.ProtocolAndAddress(config.ListenAddr) + _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) conn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { logger.Error("unable to connect to server", "target", address, "err", err) diff --git a/privval/msgs_test.go b/privval/msgs_test.go index bf532bd7b..7ac9f2c5d 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -60,7 +60,7 @@ func exampleProposal() *types.Proposal { // nolint:lll // ignore line length for tests func TestPrivvalVectors(t *testing.T) { pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() - ppk, err := cryptoenc.PubKeyToProto(pk) + ppk, err := encoding.PubKeyToProto(pk) require.NoError(t, err) // Generate a simple vote diff --git a/privval/secret_connection.go b/privval/secret_connection.go index 8847f91db..ffa5d36ed 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" @@ -408,7 +408,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - pbpk, err := cryptoenc.PubKeyToProto(pubKey) + pbpk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, true, err } @@ -425,7 +425,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + pk, err := encoding.PubKeyFromProto(pba.PubKey) if err != nil { return nil, true, err // abort } diff --git a/privval/signer_client.go b/privval/signer_client.go index d25584c8f..5e5b32a92 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -83,7 +83,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := encoding.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 18ad8a996..d07c65620 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -37,7 +37,7 @@ func DefaultValidationRequestHandler( if err != nil { return res, err } - pk, err := cryptoenc.PubKeyToProto(pubKey) + pk, err := encoding.PubKeyToProto(pubKey) if err != nil { return res, err } diff --git a/proto/tendermint/blocksync/message.go b/proto/tendermint/blocksync/message.go index d448ccc4b..1840c4e61 100644 --- a/proto/tendermint/blocksync/message.go +++ b/proto/tendermint/blocksync/message.go @@ -2,9 +2,9 @@ package blocksync import ( "errors" - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) const ( diff --git a/proto/tendermint/blocksync/message_test.go b/proto/tendermint/blocksync/message_test.go index dd1aebbd0..f81de149f 100644 --- a/proto/tendermint/blocksync/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -5,7 +5,7 @@ import ( math "math" "testing" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" diff --git a/proto/tendermint/consensus/message.go b/proto/tendermint/consensus/message.go index 51feffc22..bcdab629a 100644 --- a/proto/tendermint/consensus/message.go +++ b/proto/tendermint/consensus/message.go @@ -1,9 +1,9 @@ package consensus import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a consensus proto message. diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go index 64a79bc81..a3e249f99 100644 --- a/proto/tendermint/mempool/message.go +++ b/proto/tendermint/mempool/message.go @@ -1,9 +1,9 @@ package mempool import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a mempool message. diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 8ba8cd2b2..38c8239dd 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -1,9 +1,9 @@ package p2p import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a PEX message. diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go index 992cd7525..92d3764fd 100644 --- a/proto/tendermint/statesync/message.go +++ b/proto/tendermint/statesync/message.go @@ -2,9 +2,9 @@ package statesync import ( "errors" - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a state sync proto message. diff --git a/proto/tendermint/statesync/message_test.go b/proto/tendermint/statesync/message_test.go index 40428ec07..cccd25766 100644 --- a/proto/tendermint/statesync/message_test.go +++ b/proto/tendermint/statesync/message_test.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "testing" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index e5d2c35f3..54d19a201 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -13,7 +13,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -127,7 +127,7 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) { // send go func() { var ( - txres *ctypes.ResultBroadcastTx + txres *coretypes.ResultBroadcastTx err error ctx = context.Background() ) diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 5626b7f48..98b071b91 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -11,7 +11,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" @@ -150,7 +150,7 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) require.NoError(t, err, "Error reading query result, value %v", qres.Value) - pk, err := cryptoenc.PubKeyFromProto(v.PubKey) + pk, err := encoding.PubKeyFromProto(v.PubKey) require.NoError(t, err) require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index e241404f3..1acc29c11 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctest "github.com/tendermint/tendermint/rpc/test" ) @@ -138,7 +138,7 @@ func ExampleHTTP_batching() { // Each result in the returned list is the deserialized result of each // respective ABCIQuery response for _, result := range results { - qr, ok := result.(*ctypes.ResultABCIQuery) + qr, ok := result.(*coretypes.ResultABCIQuery) if !ok { log.Fatal("invalid result type from ABCIQuery request") } diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 6d2c2883d..60732b991 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" ) func TestWaitForHeight(t *testing.T) { @@ -33,7 +33,7 @@ func TestWaitForHeight(t *testing.T) { // now set current block height to 10 m.Call = mock.Call{ - Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}}, + Response: &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 10}}, } // we will not wait for more than 10 blocks @@ -53,7 +53,7 @@ func TestWaitForHeight(t *testing.T) { // we use the callback to update the status height myWaiter := func(delta int64) error { // update the height for the next call - m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}} + m.Call.Response = &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 15}} return client.DefaultWaitStrategy(delta) } @@ -65,13 +65,13 @@ func TestWaitForHeight(t *testing.T) { pre := r.Calls[3] require.Nil(pre.Error) - prer, ok := pre.Response.(*ctypes.ResultStatus) + prer, ok := pre.Response.(*coretypes.ResultStatus) require.True(ok) assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) post := r.Calls[4] require.Nil(post.Error) - postr, ok := post.Response.(*ctypes.ResultStatus) + postr, ok := post.Response.(*coretypes.ResultStatus) require.True(ok) assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index c3a0f379b..65d0f434b 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -198,8 +198,8 @@ func (b *BatchHTTP) Count() int { //----------------------------------------------------------------------------- // baseRPCClient -func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) { - result := new(ctypes.ResultStatus) +func (c *baseRPCClient) Status(ctx context.Context) (*coretypes.ResultStatus, error) { + result := new(coretypes.ResultStatus) _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) if err != nil { return nil, err @@ -208,8 +208,8 @@ func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error return result, nil } -func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - result := new(ctypes.ResultABCIInfo) +func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + result := new(coretypes.ResultABCIInfo) _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) if err != nil { return nil, err @@ -222,7 +222,7 @@ func (c *baseRPCClient) ABCIQuery( ctx context.Context, path string, data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } @@ -230,8 +230,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - result := new(ctypes.ResultABCIQuery) + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + result := new(coretypes.ResultABCIQuery) _, err := c.caller.Call(ctx, "abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) @@ -245,8 +245,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( func (c *baseRPCClient) BroadcastTxCommit( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTxCommit, error) { - result := new(ctypes.ResultBroadcastTxCommit) +) (*coretypes.ResultBroadcastTxCommit, error) { + result := new(coretypes.ResultBroadcastTxCommit) _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -257,14 +257,14 @@ func (c *baseRPCClient) BroadcastTxCommit( func (c *baseRPCClient) BroadcastTxAsync( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { +) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_async", tx) } func (c *baseRPCClient) BroadcastTxSync( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { +) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } @@ -272,8 +272,8 @@ func (c *baseRPCClient) broadcastTX( ctx context.Context, route string, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { - result := new(ctypes.ResultBroadcastTx) +) (*coretypes.ResultBroadcastTx, error) { + result := new(coretypes.ResultBroadcastTx) _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -284,8 +284,8 @@ func (c *baseRPCClient) broadcastTX( func (c *baseRPCClient) UnconfirmedTxs( ctx context.Context, limit *int, -) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) +) (*coretypes.ResultUnconfirmedTxs, error) { + result := new(coretypes.ResultUnconfirmedTxs) params := make(map[string]interface{}) if limit != nil { params["limit"] = limit @@ -297,8 +297,8 @@ func (c *baseRPCClient) UnconfirmedTxs( return result, nil } -func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) +func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + result := new(coretypes.ResultUnconfirmedTxs) _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) if err != nil { return nil, err @@ -306,8 +306,8 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUn return result, nil } -func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - result := new(ctypes.ResultCheckTx) +func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { + result := new(coretypes.ResultCheckTx) _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -315,8 +315,8 @@ func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.Resul return result, nil } -func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { - result := new(ctypes.ResultNetInfo) +func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { + result := new(coretypes.ResultNetInfo) _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) if err != nil { return nil, err @@ -324,8 +324,8 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, err return result, nil } -func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { - result := new(ctypes.ResultDumpConsensusState) +func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { + result := new(coretypes.ResultDumpConsensusState) _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) if err != nil { return nil, err @@ -333,8 +333,8 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultD return result, nil } -func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { - result := new(ctypes.ResultConsensusState) +func (c *baseRPCClient) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { + result := new(coretypes.ResultConsensusState) _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) if err != nil { return nil, err @@ -345,8 +345,8 @@ func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConse func (c *baseRPCClient) ConsensusParams( ctx context.Context, height *int64, -) (*ctypes.ResultConsensusParams, error) { - result := new(ctypes.ResultConsensusParams) +) (*coretypes.ResultConsensusParams, error) { + result := new(coretypes.ResultConsensusParams) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -358,8 +358,8 @@ func (c *baseRPCClient) ConsensusParams( return result, nil } -func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { - result := new(ctypes.ResultHealth) +func (c *baseRPCClient) Health(ctx context.Context) (*coretypes.ResultHealth, error) { + result := new(coretypes.ResultHealth) _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) if err != nil { return nil, err @@ -371,8 +371,8 @@ func (c *baseRPCClient) BlockchainInfo( ctx context.Context, minHeight, maxHeight int64, -) (*ctypes.ResultBlockchainInfo, error) { - result := new(ctypes.ResultBlockchainInfo) +) (*coretypes.ResultBlockchainInfo, error) { + result := new(coretypes.ResultBlockchainInfo) _, err := c.caller.Call(ctx, "blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, result) @@ -382,8 +382,8 @@ func (c *baseRPCClient) BlockchainInfo( return result, nil } -func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { - result := new(ctypes.ResultGenesis) +func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { + result := new(coretypes.ResultGenesis) _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) if err != nil { return nil, err @@ -391,8 +391,8 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, err return result, nil } -func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { - result := new(ctypes.ResultGenesisChunk) +func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { + result := new(coretypes.ResultGenesisChunk) _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) if err != nil { return nil, err @@ -400,8 +400,8 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.Re return result, nil } -func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) +func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + result := new(coretypes.ResultBlock) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -413,8 +413,8 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul return result, nil } -func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) +func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { + result := new(coretypes.ResultBlock) params := map[string]interface{}{ "hash": hash, } @@ -428,8 +428,8 @@ func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (* func (c *baseRPCClient) BlockResults( ctx context.Context, height *int64, -) (*ctypes.ResultBlockResults, error) { - result := new(ctypes.ResultBlockResults) +) (*coretypes.ResultBlockResults, error) { + result := new(coretypes.ResultBlockResults) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -441,8 +441,8 @@ func (c *baseRPCClient) BlockResults( return result, nil } -func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { - result := new(ctypes.ResultCommit) +func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + result := new(coretypes.ResultCommit) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -454,8 +454,8 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu return result, nil } -func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { - result := new(ctypes.ResultTx) +func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { + result := new(coretypes.ResultTx) params := map[string]interface{}{ "hash": hash, "prove": prove, @@ -474,9 +474,9 @@ func (c *baseRPCClient) TxSearch( page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { - result := new(ctypes.ResultTxSearch) + result := new(coretypes.ResultTxSearch) params := map[string]interface{}{ "query": query, "prove": prove, @@ -503,9 +503,9 @@ func (c *baseRPCClient) BlockSearch( query string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { - result := new(ctypes.ResultBlockSearch) + result := new(coretypes.ResultBlockSearch) params := map[string]interface{}{ "query": query, "order_by": orderBy, @@ -531,8 +531,8 @@ func (c *baseRPCClient) Validators( height *int64, page, perPage *int, -) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) +) (*coretypes.ResultValidators, error) { + result := new(coretypes.ResultValidators) params := make(map[string]interface{}) if page != nil { params["page"] = page @@ -553,8 +553,8 @@ func (c *baseRPCClient) Validators( func (c *baseRPCClient) BroadcastEvidence( ctx context.Context, ev types.Evidence, -) (*ctypes.ResultBroadcastEvidence, error) { - result := new(ctypes.ResultBroadcastEvidence) +) (*coretypes.ResultBroadcastEvidence, error) { + result := new(coretypes.ResultBroadcastEvidence) _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) if err != nil { return nil, err diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 1735ca9c3..0f908e271 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -9,9 +9,9 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmjson "github.com/tendermint/tendermint/libs/json" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) @@ -53,7 +53,7 @@ type wsEvents struct { } type wsSubscription struct { - res chan ctypes.ResultEvent + res chan coretypes.ResultEvent id string query string } @@ -119,7 +119,7 @@ func (w *wsEvents) Stop() error { return w.ws.Stop() } // // It returns an error if wsEvents is not running. func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { if !w.IsRunning() { return nil, rpcclient.ErrClientNotRunning @@ -134,7 +134,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, outCap = outCapacity[0] } - outc := make(chan ctypes.ResultEvent, outCap) + outc := make(chan coretypes.ResultEvent, outCap) w.mtx.Lock() defer w.mtx.Unlock() // subscriber param is ignored because Tendermint will override it with @@ -213,7 +213,7 @@ func (w *wsEvents) redoSubscriptionsAfter(d time.Duration) { } func isErrAlreadySubscribed(err error) bool { - return strings.Contains(err.Error(), tmpubsub.ErrAlreadySubscribed.Error()) + return strings.Contains(err.Error(), pubsub.ErrAlreadySubscribed.Error()) } func (w *wsEvents) eventListener() { @@ -238,7 +238,7 @@ func (w *wsEvents) eventListener() { continue } - result := new(ctypes.ResultEvent) + result := new(coretypes.ResultEvent) err := tmjson.Unmarshal(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index ae54928df..7b6a9bd20 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -24,7 +24,7 @@ import ( "context" "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -61,26 +61,26 @@ type Client interface { // is easier to mock. type ABCIClient interface { // Reading from abci app - ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) - ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) + ABCIInfo(context.Context) (*coretypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) + opts ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxCommit(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) + BroadcastTxSync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) } // SignClient groups together the functionality needed to get valid signatures // and prove anything about the chain. type SignClient interface { - Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) - BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) - BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) - Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) - Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) - Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) + Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) + Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) + Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) + Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) // TxSearch defines a method to search for a paginated set of transactions by // DeliverTx event search criteria. @@ -90,7 +90,7 @@ type SignClient interface { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultTxSearch, error) + ) (*coretypes.ResultTxSearch, error) // BlockSearch defines a method to search for a paginated set of blocks by // BeginBlock and EndBlock event search criteria. @@ -99,29 +99,29 @@ type SignClient interface { query string, page, perPage *int, orderBy string, - ) (*ctypes.ResultBlockSearch, error) + ) (*coretypes.ResultBlockSearch, error) } // HistoryClient provides access to data from genesis to now in large chunks. type HistoryClient interface { - Genesis(context.Context) (*ctypes.ResultGenesis, error) - GenesisChunked(context.Context, uint) (*ctypes.ResultGenesisChunk, error) - BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) + Genesis(context.Context) (*coretypes.ResultGenesis, error) + GenesisChunked(context.Context, uint) (*coretypes.ResultGenesisChunk, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) } // StatusClient provides access to general chain info. type StatusClient interface { - Status(context.Context) (*ctypes.ResultStatus, error) + Status(context.Context) (*coretypes.ResultStatus, error) } // NetworkClient is general info about the network state. May not be needed // usually. type NetworkClient interface { - NetInfo(context.Context) (*ctypes.ResultNetInfo, error) - DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) - ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) - ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) - Health(context.Context) (*ctypes.ResultHealth, error) + NetInfo(context.Context) (*coretypes.ResultNetInfo, error) + DumpConsensusState(context.Context) (*coretypes.ResultDumpConsensusState, error) + ConsensusState(context.Context) (*coretypes.ResultConsensusState, error) + ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) + Health(context.Context) (*coretypes.ResultHealth, error) } // EventsClient is reactive, you can subscribe to any message, given the proper @@ -134,7 +134,7 @@ type EventsClient interface { // // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe // or UnsubscribeAll. - Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) + Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) //nolint:lll // Unsubscribe unsubscribes given subscriber from query. Unsubscribe(ctx context.Context, subscriber, query string) error // UnsubscribeAll unsubscribes given subscriber from all the queries. @@ -143,15 +143,15 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { - UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) - NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) - CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) + UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error) + CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error) } // EvidenceClient is used for submitting an evidence of the malicious // behavior. type EvidenceClient interface { - BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) + BroadcastEvidence(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error) } // RemoteClient is a Client, which can also return the remote network address. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 2080e0d44..85302691d 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -9,10 +9,10 @@ import ( rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -72,15 +72,15 @@ func (c *Local) SetLogger(l log.Logger) { c.Logger = l } -func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Local) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.env.Status(c.ctx) } -func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Local) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.env.ABCIInfo(c.ctx) } -func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } @@ -88,55 +88,55 @@ func (c *Local) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(c.ctx, tx) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } -func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.env.NumUnconfirmedTxs(c.ctx) } -func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.env.CheckTx(c.ctx, tx) } -func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(c.ctx) } -func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Local) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(c.ctx) } -func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Local) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.env.GetConsensusState(c.ctx) } -func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.env.ConsensusParams(c.ctx, height) } -func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { return c.env.UnsafeDialSeeds(c.ctx, seeds) } @@ -146,76 +146,76 @@ func (c *Local) DialPeers( persistent, unconditional, private bool, -) (*ctypes.ResultDialPeers, error) { +) (*coretypes.ResultDialPeers, error) { return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) } -func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Local) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.env.Genesis(c.ctx) } -func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Local) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { return c.env.GenesisChunked(c.ctx, id) } -func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Local) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { return c.env.Block(c.ctx, height) } -func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { +func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { return c.env.BlockByHash(c.ctx, hash) } -func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Local) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { return c.env.BlockResults(c.ctx, height) } -func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(c.ctx, height) } -func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll return c.env.Validators(c.ctx, height, page, perPage) } -func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*ctypes.ResultTx, error) { +func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { return c.env.Tx(c.ctx, hash, prove) } func (c *Local) TxSearch( _ context.Context, - query string, + queryString string, prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { - return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy) +) (*coretypes.ResultTxSearch, error) { + return c.env.TxSearch(c.ctx, queryString, prove, page, perPage, orderBy) } func (c *Local) BlockSearch( _ context.Context, - query string, + queryString string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { - return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) +) (*coretypes.ResultBlockSearch, error) { + return c.env.BlockSearch(c.ctx, queryString, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(c.ctx, ev) } func (c *Local) Subscribe( ctx context.Context, subscriber, - query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - q, err := tmquery.New(query) + queryString string, + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { + q, err := query.New(queryString) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } @@ -235,7 +235,7 @@ func (c *Local) Subscribe( return nil, fmt.Errorf("failed to subscribe: %w", err) } - outc := make(chan ctypes.ResultEvent, outCap) + outc := make(chan coretypes.ResultEvent, outCap) go c.eventsRoutine(sub, subscriber, q, outc) return outc, nil @@ -244,12 +244,12 @@ func (c *Local) Subscribe( func (c *Local) eventsRoutine( sub types.Subscription, subscriber string, - q tmpubsub.Query, - outc chan<- ctypes.ResultEvent) { + q pubsub.Query, + outc chan<- coretypes.ResultEvent) { for { select { case msg := <-sub.Out(): - result := ctypes.ResultEvent{ + result := coretypes.ResultEvent{ SubscriptionID: msg.SubscriptionID(), Query: q.String(), Data: msg.Data(), @@ -266,7 +266,7 @@ func (c *Local) eventsRoutine( } } case <-sub.Canceled(): - if sub.Err() == tmpubsub.ErrUnsubscribed { + if sub.Err() == pubsub.ErrUnsubscribed { return } @@ -282,7 +282,7 @@ func (c *Local) eventsRoutine( } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription { +func (c *Local) resubscribe(subscriber string, q pubsub.Query) types.Subscription { attempts := 0 for { if !c.IsRunning() { @@ -299,17 +299,17 @@ func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscript } } -func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber} +func (c *Local) Unsubscribe(ctx context.Context, subscriber, queryString string) error { + args := pubsub.UnsubscribeArgs{Subscriber: subscriber} var err error - args.Query, err = tmquery.New(query) + args.Query, err = query.New(queryString) if err != nil { // if this isn't a valid query it might be an ID, so // we'll try that. It'll turn into an error when we // try to unsubscribe. Eventually, perhaps, we'll want // to change the interface to only allow // unsubscription by ID, but that's a larger change. - args.ID = query + args.ID = queryString } return c.EventBus.Unsubscribe(ctx, args) } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 0186ee9cc..700b08f5e 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -24,11 +24,11 @@ var ( _ client.ABCIClient = (*ABCIRecorder)(nil) ) -func (a ABCIApp) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil +func (a ABCIApp) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + return &coretypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil } -func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -36,21 +36,21 @@ func (a ABCIApp) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { q := a.App.Query(abci.RequestQuery{ Data: data, Path: path, Height: opts.Height, Prove: opts.Prove, }) - return &ctypes.ResultABCIQuery{Response: q}, nil + return &coretypes.ResultABCIQuery{Response: q}, nil } // NOTE: Caller should call a.App.Commit() separately, // this function does not actually wait for a commit. // TODO: Make it wait for a commit and set res.Height appropriately. -func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res := ctypes.ResultBroadcastTxCommit{} +func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + res := coretypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) if res.CheckTx.IsErr() { return &res, nil @@ -60,13 +60,13 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.Re return &res, nil } -func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, Log: c.Log, @@ -75,13 +75,13 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Res }, nil } -func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, Log: c.Log, @@ -100,15 +100,15 @@ type ABCIMock struct { Broadcast Call } -func (m ABCIMock) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (m ABCIMock) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := m.Info.GetResponse(nil) if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil + return &coretypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil } -func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -116,37 +116,37 @@ func (m ABCIMock) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err } resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{Response: resQuery}, nil + return &coretypes.ResultABCIQuery{Response: resQuery}, nil } -func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := m.BroadcastCommit.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTxCommit), nil + return res.(*coretypes.ResultBroadcastTxCommit), nil } -func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTx), nil + return res.(*coretypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTx), nil + return res.(*coretypes.ResultBroadcastTx), nil } // ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) @@ -174,7 +174,7 @@ func (r *ABCIRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := r.Client.ABCIInfo(ctx) r.addCall(Call{ Name: "abci_info", @@ -188,7 +188,7 @@ func (r *ABCIRecorder) ABCIQuery( ctx context.Context, path string, data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -196,7 +196,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := r.Client.ABCIQueryWithOptions(ctx, path, data, opts) r.addCall(Call{ Name: "abci_query", @@ -207,7 +207,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( return res, err } -func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := r.Client.BroadcastTxCommit(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_commit", @@ -218,7 +218,7 @@ func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*cty return res, err } -func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxAsync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_async", @@ -229,7 +229,7 @@ func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctyp return res, err } -func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxSync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_sync", diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 206b8fac2..25fbbc05d 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -36,7 +36,7 @@ func TestABCIMock(t *testing.T) { // Broadcast commit depends on call BroadcastCommit: mock.Call{ Args: goodTx, - Response: &ctypes.ResultBroadcastTxCommit{ + Response: &coretypes.ResultBroadcastTxCommit{ CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")}, }, @@ -112,7 +112,7 @@ func TestABCIRecorder(t *testing.T) { assert.Nil(info.Error) assert.Nil(info.Args) require.NotNil(info.Response) - ir, ok := info.Response.(*ctypes.ResultABCIInfo) + ir, ok := info.Response.(*coretypes.ResultABCIInfo) require.True(ok) assert.Equal("data", ir.Response.Data) assert.Equal("v0.9.9", ir.Response.Version) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index b0f27ae88..4b858d067 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -21,7 +21,7 @@ import ( "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -75,15 +75,15 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { return nil, c.Error } -func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.env.Status(&rpctypes.Context{}) } -func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.env.ABCIInfo(&rpctypes.Context{}) } -func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -91,47 +91,47 @@ func (c Client) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.env.CheckTx(&rpctypes.Context{}, tx) } -func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(&rpctypes.Context{}) } -func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.env.GetConsensusState(&rpctypes.Context{}) } -func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(&rpctypes.Context{}) } -func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.env.ConsensusParams(&rpctypes.Context{}, height) } -func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c Client) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } @@ -141,34 +141,34 @@ func (c Client) DialPeers( persistent, unconditional, private bool, -) (*ctypes.ResultDialPeers, error) { +) (*coretypes.ResultDialPeers, error) { return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) } -func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } -func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.env.Genesis(&rpctypes.Context{}) } -func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { return c.env.Block(&rpctypes.Context{}, height) } -func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultBlock, error) { +func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { return c.env.BlockByHash(&rpctypes.Context{}, hash) } -func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 10da67008..22548e891 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -4,7 +4,7 @@ import ( "context" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" ) // StatusMock returns the result specified by the Call @@ -17,12 +17,12 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (m *StatusMock) Status(ctx context.Context) (*coretypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err } - return res.(*ctypes.ResultStatus), nil + return res.(*coretypes.ResultStatus), nil } // StatusRecorder can wrap another type (StatusMock, full client) @@ -43,7 +43,7 @@ func (r *StatusRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *StatusRecorder) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (r *StatusRecorder) Status(ctx context.Context) (*coretypes.ResultStatus, error) { res, err := r.Client.Status(ctx) r.addCall(Call{ Name: "status", diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 37dce6095..98655280e 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" ) func TestStatus(t *testing.T) { @@ -18,8 +18,8 @@ func TestStatus(t *testing.T) { m := &mock.StatusMock{ Call: mock.Call{ - Response: &ctypes.ResultStatus{ - SyncInfo: ctypes.SyncInfo{ + Response: &coretypes.ResultStatus{ + SyncInfo: coretypes.SyncInfo{ LatestBlockHash: bytes.HexBytes("block"), LatestAppHash: bytes.HexBytes("app"), LatestBlockHeight: 10, @@ -56,7 +56,7 @@ func TestStatus(t *testing.T) { assert.Nil(rs.Args) assert.Nil(rs.Error) require.NotNil(rs.Response) - st, ok := rs.Response.(*ctypes.ResultStatus) + st, ok := rs.Response.(*coretypes.ResultStatus) require.True(ok) assert.EqualValues("block", st.SyncInfo.LatestBlockHash) assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 5ad9e4244..8a6845831 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -16,7 +16,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" @@ -24,7 +24,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/rpc/coretypes" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -425,8 +425,8 @@ func TestBroadcastTxSync(t *testing.T) { defer cancel() // TODO (melekes): use mempool which is set on RPC rather than getting it from node - mempool := getMempool(t, n) - initMempoolSize := mempool.Size() + pool := getMempool(t, n) + initMempoolSize := pool.Size() for i, c := range GetClients(t, n, conf) { _, _, tx := MakeTxKV() @@ -434,18 +434,18 @@ func TestBroadcastTxSync(t *testing.T) { require.Nil(t, err, "%d: %+v", i, err) require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME - require.Equal(t, initMempoolSize+1, mempool.Size()) + require.Equal(t, initMempoolSize+1, pool.Size()) - txs := mempool.ReapMaxTxs(len(tx)) + txs := pool.ReapMaxTxs(len(tx)) require.EqualValues(t, tx, txs[0]) - mempool.Flush() + pool.Flush() } } -func getMempool(t *testing.T, srv service.Service) mempl.Mempool { +func getMempool(t *testing.T, srv service.Service) mempool.Mempool { t.Helper() n, ok := srv.(interface { - Mempool() mempl.Mempool + Mempool() mempool.Mempool }) require.True(t, ok) return n.Mempool() @@ -457,7 +457,7 @@ func TestBroadcastTxCommit(t *testing.T) { n, conf := NodeSuite(t) - mempool := getMempool(t, n) + pool := getMempool(t, n) for i, c := range GetClients(t, n, conf) { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) @@ -465,7 +465,7 @@ func TestBroadcastTxCommit(t *testing.T) { require.True(t, bres.CheckTx.IsOK()) require.True(t, bres.DeliverTx.IsOK()) - require.Equal(t, 0, mempool.Size()) + require.Equal(t, 0, pool.Size()) } } @@ -477,8 +477,8 @@ func TestUnconfirmedTxs(t *testing.T) { ch := make(chan *abci.Response, 1) n, conf := NodeSuite(t) - mempool := getMempool(t, n) - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + pool := getMempool(t, n) + err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) require.NoError(t, err) @@ -497,11 +497,11 @@ func TestUnconfirmedTxs(t *testing.T) { assert.Equal(t, 1, res.Count) assert.Equal(t, 1, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) } - mempool.Flush() + pool.Flush() } func TestNumUnconfirmedTxs(t *testing.T) { @@ -512,9 +512,9 @@ func TestNumUnconfirmedTxs(t *testing.T) { n, conf := NodeSuite(t) ch := make(chan *abci.Response, 1) - mempool := getMempool(t, n) + pool := getMempool(t, n) - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) require.NoError(t, err) // wait for tx to arrive in mempoool. @@ -524,7 +524,7 @@ func TestNumUnconfirmedTxs(t *testing.T) { t.Error("Timed out waiting for CheckTx callback") } - mempoolSize := mempool.Size() + mempoolSize := pool.Size() for i, c := range GetClients(t, n, conf) { mc, ok := c.(client.MempoolClient) require.True(t, ok, "%d", i) @@ -533,10 +533,10 @@ func TestNumUnconfirmedTxs(t *testing.T) { assert.Equal(t, mempoolSize, res.Count) assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) } - mempool.Flush() + pool.Flush() } func TestCheckTx(t *testing.T) { @@ -544,7 +544,7 @@ func TestCheckTx(t *testing.T) { defer cancel() n, conf := NodeSuite(t) - mempool := getMempool(t, n) + pool := getMempool(t, n) for _, c := range GetClients(t, n, conf) { _, _, tx := MakeTxKV() @@ -553,7 +553,7 @@ func TestCheckTx(t *testing.T) { require.NoError(t, err) assert.Equal(t, abci.CodeTypeOK, res.Code) - assert.Equal(t, 0, mempool.Size(), "mempool must be empty") + assert.Equal(t, 0, pool.Size(), "mempool must be empty") } } @@ -781,10 +781,10 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Len(t, bresults, 2) require.Equal(t, 0, batch.Count()) - bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) + bresult1, ok := bresults[0].(*coretypes.ResultBroadcastTxCommit) require.True(t, ok) require.Equal(t, *bresult1, *r1) - bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) + bresult2, ok := bresults[1].(*coretypes.ResultBroadcastTxCommit) require.True(t, ok) require.Equal(t, *bresult2, *r2) apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 @@ -802,10 +802,10 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Len(t, qresults, 2) require.Equal(t, 0, batch.Count()) - qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) + qresult1, ok := qresults[0].(*coretypes.ResultABCIQuery) require.True(t, ok) require.Equal(t, *qresult1, *q1) - qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) + qresult2, ok := qresults[1].(*coretypes.ResultABCIQuery) require.True(t, ok) require.Equal(t, *qresult2, *q2) diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 4db46bbe3..4a4a13687 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -4,7 +4,7 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" - core "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/rpc/core" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go index 45deb6b76..e680d195e 100644 --- a/rpc/grpc/grpc_test.go +++ b/rpc/grpc/grpc_test.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/service" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" + coregrpc "github.com/tendermint/tendermint/rpc/grpc" rpctest "github.com/tendermint/tendermint/rpc/test" ) @@ -37,7 +37,7 @@ func TestBroadcastTx(t *testing.T) { res, err := rpctest.GetGRPCClient(conf).BroadcastTx( context.Background(), - &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")}, + &coregrpc.RequestBroadcastTx{Tx: []byte("this is a tx")}, ) require.NoError(t, err) require.EqualValues(t, 0, res.CheckTx.Code) diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index 42941ea68..f69926cb7 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -6,18 +6,18 @@ import ( "fmt" tmjson "github.com/tendermint/tendermint/libs/json" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func unmarshalResponseBytes( responseBytes []byte, - expectedID types.JSONRPCIntID, + expectedID rpctypes.JSONRPCIntID, result interface{}, ) (interface{}, error) { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. - response := &types.RPCResponse{} + response := &rpctypes.RPCResponse{} if err := json.Unmarshal(responseBytes, response); err != nil { return nil, fmt.Errorf("error unmarshaling: %w", err) } @@ -40,12 +40,12 @@ func unmarshalResponseBytes( func unmarshalResponseBytesArray( responseBytes []byte, - expectedIDs []types.JSONRPCIntID, + expectedIDs []rpctypes.JSONRPCIntID, results []interface{}, ) ([]interface{}, error) { var ( - responses []types.RPCResponse + responses []rpctypes.RPCResponse ) if err := json.Unmarshal(responseBytes, &responses); err != nil { @@ -64,10 +64,10 @@ func unmarshalResponseBytesArray( } // Intersect IDs from responses with expectedIDs. - ids := make([]types.JSONRPCIntID, len(responses)) + ids := make([]rpctypes.JSONRPCIntID, len(responses)) var ok bool for i, resp := range responses { - ids[i], ok = resp.ID.(types.JSONRPCIntID) + ids[i], ok = resp.ID.(rpctypes.JSONRPCIntID) if !ok { return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) } @@ -85,8 +85,8 @@ func unmarshalResponseBytesArray( return results, nil } -func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { - m := make(map[types.JSONRPCIntID]bool, len(expectedIDs)) +func validateResponseIDs(ids, expectedIDs []rpctypes.JSONRPCIntID) error { + m := make(map[rpctypes.JSONRPCIntID]bool, len(expectedIDs)) for _, expectedID := range expectedIDs { m[expectedID] = true } @@ -104,11 +104,11 @@ func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { // From the JSON-RPC 2.0 spec: // id: It MUST be the same as the value of the id member in the Request Object. -func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) error { +func validateAndVerifyID(res *rpctypes.RPCResponse, expectedID rpctypes.JSONRPCIntID) error { if err := validateResponseID(res.ID); err != nil { return err } - if expectedID != res.ID.(types.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type + if expectedID != res.ID.(rpctypes.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) } return nil @@ -118,7 +118,7 @@ func validateResponseID(id interface{}) error { if id == nil { return errors.New("no ID") } - _, ok := id.(types.JSONRPCIntID) + _, ok := id.(rpctypes.JSONRPCIntID) if !ok { return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) } diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 71c00137b..9c73b8a8c 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -13,7 +13,7 @@ import ( "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( @@ -189,7 +189,7 @@ func (c *Client) Call( ) (interface{}, error) { id := c.nextRequestID() - request, err := types.MapToRequest(id, method, params) + request, err := rpctypes.MapToRequest(id, method, params) if err != nil { return nil, fmt.Errorf("failed to encode params: %w", err) } @@ -235,7 +235,7 @@ func (c *Client) NewRequestBatch() *RequestBatch { } func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]interface{}, error) { - reqs := make([]types.RPCRequest, 0, len(requests)) + reqs := make([]rpctypes.RPCRequest, 0, len(requests)) results := make([]interface{}, 0, len(requests)) for _, req := range requests { reqs = append(reqs, req.request) @@ -272,20 +272,20 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque } // collect ids to check responses IDs in unmarshalResponseBytesArray - ids := make([]types.JSONRPCIntID, len(requests)) + ids := make([]rpctypes.JSONRPCIntID, len(requests)) for i, req := range requests { - ids[i] = req.request.ID.(types.JSONRPCIntID) + ids[i] = req.request.ID.(rpctypes.JSONRPCIntID) } return unmarshalResponseBytesArray(responseBytes, ids, results) } -func (c *Client) nextRequestID() types.JSONRPCIntID { +func (c *Client) nextRequestID() rpctypes.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ c.mtx.Unlock() - return types.JSONRPCIntID(id) + return rpctypes.JSONRPCIntID(id) } //------------------------------------------------------------------------------------ @@ -293,7 +293,7 @@ func (c *Client) nextRequestID() types.JSONRPCIntID { // jsonRPCBufferedRequest encapsulates a single buffered request, as well as its // anticipated response structure. type jsonRPCBufferedRequest struct { - request types.RPCRequest + request rpctypes.RPCRequest result interface{} // The result will be deserialized into this object. } @@ -354,7 +354,7 @@ func (b *RequestBatch) Call( result interface{}, ) (interface{}, error) { id := b.client.nextRequestID() - request, err := types.MapToRequest(id, method, params) + request, err := rpctypes.MapToRequest(id, method, params) if err != nil { return nil, err } diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go index 3f376ddb0..cd4ff0686 100644 --- a/rpc/jsonrpc/client/http_uri_client.go +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -7,12 +7,12 @@ import ( "net/http" "strings" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( // URIClientRequestID in a request ID used by URIClient - URIClientRequestID = types.JSONRPCIntID(-1) + URIClientRequestID = rpctypes.JSONRPCIntID(-1) ) // URIClient is a JSON-RPC client, which sends POST form HTTP requests to the diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index 8530f32d9..8d8f9d18d 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -15,7 +15,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmclient "github.com/tendermint/tendermint/rpc/client" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // WSOptions for WSClient. @@ -50,16 +50,16 @@ type WSClient struct { // nolint: maligned // Single user facing channel to read RPCResponses from, closed only when the // client is being stopped. - ResponsesCh chan types.RPCResponse + ResponsesCh chan rpctypes.RPCResponse // Callback, which will be called each time after successful reconnect. onReconnect func() // internal channels - send chan types.RPCRequest // user requests - backlog chan types.RPCRequest // stores a single user request received during a conn failure - reconnectAfter chan error // reconnect requests - readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine + send chan rpctypes.RPCRequest // user requests + backlog chan rpctypes.RPCRequest // stores a single user request received during a conn failure + reconnectAfter chan error // reconnect requests + readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine // Maximum reconnect attempts (0 or greater; default: 25). maxReconnectAttempts uint @@ -152,15 +152,15 @@ func (c *WSClient) Start() error { return err } - c.ResponsesCh = make(chan types.RPCResponse) + c.ResponsesCh = make(chan rpctypes.RPCResponse) - c.send = make(chan types.RPCRequest) + c.send = make(chan rpctypes.RPCRequest) // 1 additional error may come from the read/write // goroutine depending on which failed first. c.reconnectAfter = make(chan error, 1) // capacity for 1 request. a user won't be able to send more because the send // channel is unbuffered. - c.backlog = make(chan types.RPCRequest, 1) + c.backlog = make(chan rpctypes.RPCRequest, 1) c.startReadWriteRoutines() go c.reconnectRoutine() @@ -195,7 +195,7 @@ func (c *WSClient) IsActive() bool { // Send the given RPC request to the server. Results will be available on // ResponsesCh, errors, if any, on ErrorsCh. Will block until send succeeds or // ctx.Done is closed. -func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { +func (c *WSClient) Send(ctx context.Context, request rpctypes.RPCRequest) error { select { case c.send <- request: c.Logger.Info("sent a request", "req", request) @@ -210,7 +210,7 @@ func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { // Call enqueues a call request onto the Send queue. Requests are JSON encoded. func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error { - request, err := types.MapToRequest(c.nextRequestID(), method, params) + request, err := rpctypes.MapToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -220,7 +220,7 @@ func (c *WSClient) Call(ctx context.Context, method string, params map[string]in // CallWithArrayParams enqueues a call request onto the Send queue. Params are // in a form of array (e.g. []interface{}{"abcd"}). Requests are JSON encoded. func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error { - request, err := types.ArrayToRequest(c.nextRequestID(), method, params) + request, err := rpctypes.ArrayToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -229,12 +229,12 @@ func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, param // Private methods -func (c *WSClient) nextRequestID() types.JSONRPCIntID { +func (c *WSClient) nextRequestID() rpctypes.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ c.mtx.Unlock() - return types.JSONRPCIntID(id) + return rpctypes.JSONRPCIntID(id) } func (c *WSClient) dial() error { @@ -462,7 +462,7 @@ func (c *WSClient) readRoutine() { return } - var response types.RPCResponse + var response rpctypes.RPCResponse err = json.Unmarshal(data, &response) if err != nil { c.Logger.Error("failed to parse response", "err", err, "data", string(data)) diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index bb8c149f6..208313e79 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -14,7 +14,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var wsCallTimeout = 5 * time.Second @@ -41,7 +41,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - var req types.RPCRequest + var req rpctypes.RPCRequest err = json.Unmarshal(in, &req) if err != nil { panic(err) @@ -56,7 +56,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mtx.RUnlock() res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res, ID: req.ID}) + emptyRespBytes, _ := json.Marshal(rpctypes.RPCResponse{Result: res, ID: req.ID}) if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { return } diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index b5e422280..5013590b6 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -18,9 +18,9 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - client "github.com/tendermint/tendermint/rpc/jsonrpc/client" - server "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/jsonrpc/client" + "github.com/tendermint/tendermint/rpc/jsonrpc/server" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Client and Server should work over tcp or unix sockets @@ -64,23 +64,23 @@ var Routes = map[string]*server.RPCFunc{ "echo_int": server.NewRPCFunc(EchoIntResult, "arg", false), } -func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoWSResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { +func EchoIntResult(ctx *rpctypes.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(ctx *rpctypes.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(ctx *rpctypes.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 3e31d3ec1..fbc0cca79 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -12,8 +12,8 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" - "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // HTTP + JSON handler @@ -23,7 +23,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { - res := types.RPCInvalidRequestError(nil, + res := rpctypes.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -41,20 +41,20 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han // first try to unmarshal the incoming request as an array of RPC requests var ( - requests []types.RPCRequest - responses []types.RPCResponse + requests []rpctypes.RPCRequest + responses []rpctypes.RPCResponse ) if err := json.Unmarshal(b, &requests); err != nil { // next, try to unmarshal as a single request - var request types.RPCRequest + var request rpctypes.RPCRequest if err := json.Unmarshal(b, &request); err != nil { - res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) + res := rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } return } - requests = []types.RPCRequest{request} + requests = []rpctypes.RPCRequest{request} } // Set the default response cache to true unless @@ -77,25 +77,25 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if len(r.URL.Path) > 1 { responses = append( responses, - types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), + rpctypes.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), ) c = false continue } rpcFunc, ok := funcMap[request.Method] if !ok || rpcFunc.ws { - responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + responses = append(responses, rpctypes.RPCMethodNotFoundError(request.ID)) c = false continue } - ctx := &types.Context{JSONReq: &request, HTTPReq: r} + ctx := &rpctypes.Context{JSONReq: &request, HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { responses = append( responses, - types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ) c = false continue @@ -114,22 +114,22 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han switch e := err.(type) { // if no error then return a success response case nil: - responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) + responses = append(responses, rpctypes.NewRPCSuccessResponse(request.ID, result)) // if this already of type RPC error then forward that error - case *types.RPCError: - responses = append(responses, types.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) + case *rpctypes.RPCError: + responses = append(responses, rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) c = false default: // we need to unwrap the error and parse it accordingly switch errors.Unwrap(err) { // check if the error was due to an invald request - case ctypes.ErrZeroOrNegativeHeight, ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, ctypes.ErrInvalidRequest: - responses = append(responses, types.RPCInvalidRequestError(request.ID, err)) + case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: + responses = append(responses, rpctypes.RPCInvalidRequestError(request.ID, err)) c = false // lastly default all remaining errors as internal errors default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - responses = append(responses, types.RPCInternalError(request.ID, err)) + responses = append(responses, rpctypes.RPCInternalError(request.ID, err)) c = false } } @@ -277,7 +277,7 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st w.Write(buf.Bytes()) // nolint: errcheck } -func hasDefaultHeight(r types.RPCRequest, h []reflect.Value) bool { +func hasDefaultHeight(r rpctypes.RPCRequest, h []reflect.Value) bool { switch r.Method { case "block", "block_results", "commit", "consensus_params", "validators": return len(h) < 2 || h[1].IsZero() diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 529f7619c..64e7597fd 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), - "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", true), + "c": NewRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), + "block": NewRPCFunc(func(ctx *rpctypes.Context, h int) (string, error) { return "block", nil }, "height", true), } mux := http.NewServeMux() logger := log.NewNopLogger() @@ -40,21 +40,21 @@ func TestRPCParams(t *testing.T) { expectedID interface{} }{ // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, // id not captured in JSON parsing failures {`{"method": "c", "id": "0", "params": a}`, "invalid character", nil}, - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", rpctypes.JSONRPCStringID("0")}, // no ID - notification // {`{"jsonrpc": "2.0", "method": "c", "params": ["a", "10"]}`, false, nil}, // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": {}}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": {}}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", rpctypes.JSONRPCStringID("0")}, } for i, tt := range tests { @@ -71,9 +71,9 @@ func TestRPCParams(t *testing.T) { continue } - recv := new(types.RPCResponse) + recv := new(rpctypes.RPCResponse) assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) if tt.wantErr == "" { assert.Nil(t, recv.Error, "#%d: not expecting an error", i) @@ -93,12 +93,12 @@ func TestJSONRPCID(t *testing.T) { expectedID interface{} }{ // good id - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, types.JSONRPCStringID("abc")}, - {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, types.JSONRPCIntID(0)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(-1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("abc")}, + {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(0)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(-1)}, // bad id {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, @@ -119,11 +119,11 @@ func TestJSONRPCID(t *testing.T) { } res.Body.Close() - recv := new(types.RPCResponse) + recv := new(rpctypes.RPCResponse) err = json.Unmarshal(blob, recv) assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) if !tt.wantErr { - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { @@ -185,7 +185,7 @@ func TestRPCNotificationInBatch(t *testing.T) { } res.Body.Close() - var responses []types.RPCResponse + var responses []rpctypes.RPCResponse // try to unmarshal an array first err = json.Unmarshal(blob, &responses) if err != nil { @@ -195,14 +195,14 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } else { // we were expecting an error here, so let's unmarshal a single response - var response types.RPCResponse + var response rpctypes.RPCResponse err = json.Unmarshal(blob, &response) if err != nil { t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) continue } // have a single-element result - responses = []types.RPCResponse{response} + responses = []rpctypes.RPCResponse{response} } } if tt.expectCount != len(responses) { @@ -210,7 +210,7 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } for _, response := range responses { - assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, response, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) } } } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 549671241..49e1e510e 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -16,7 +16,7 @@ import ( "golang.org/x/net/netutil" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Config is a RPC server configuration. @@ -105,7 +105,7 @@ func ServeTLS( // source: https://www.jsonrpc.org/historical/json-rpc-over-http.html func WriteRPCResponseHTTPError( w http.ResponseWriter, - res types.RPCResponse, + res rpctypes.RPCResponse, ) error { if res.Error == nil { panic("tried to write http error response without RPC error") @@ -134,7 +134,7 @@ func WriteRPCResponseHTTPError( // WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. // If the rpc response can be cached, add cache-control to the response header. -func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...types.RPCResponse) error { +func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...rpctypes.RPCResponse) error { var v interface{} if len(res) == 1 { v = res[0] @@ -189,7 +189,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler if e := recover(); e != nil { // If RPCResponse - if res, ok := e.(types.RPCResponse); ok { + if res, ok := e.(rpctypes.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, false, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -208,7 +208,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler logger.Error("panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) - res := types.RPCInternalError(types.JSONRPCIntID(-1), err) + res := rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 823719e41..39e713565 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) type sampleResult struct { @@ -107,11 +107,11 @@ func TestServeTLS(t *testing.T) { } func TestWriteRPCResponseHTTP(t *testing.T) { - id := types.JSONRPCIntID(-1) + id := rpctypes.JSONRPCIntID(-1) // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, true, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + err := WriteRPCResponseHTTP(w, true, rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"})) require.NoError(t, err) resp := w.Result() body, err := ioutil.ReadAll(resp.Body) @@ -132,8 +132,8 @@ func TestWriteRPCResponseHTTP(t *testing.T) { w = httptest.NewRecorder() err = WriteRPCResponseHTTP(w, false, - types.NewRPCSuccessResponse(id, &sampleResult{"hello"}), - types.NewRPCSuccessResponse(id, &sampleResult{"world"})) + rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"}), + rpctypes.NewRPCSuccessResponse(id, &sampleResult{"world"})) require.NoError(t, err) resp = w.Result() body, err = ioutil.ReadAll(resp.Body) @@ -162,7 +162,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { func TestWriteRPCResponseHTTPError(t *testing.T) { w := httptest.NewRecorder() - err := WriteRPCResponseHTTPError(w, types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) + err := WriteRPCResponseHTTPError(w, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), errors.New("foo"))) require.NoError(t, err) resp := w.Result() body, err := ioutil.ReadAll(resp.Body) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index a07401578..07b3616b4 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -11,8 +11,8 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // HTTP + URI handler @@ -22,12 +22,12 @@ var reInt = regexp.MustCompile(`^-?[0-9]+$`) // convert from a function name to the http handler func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { // Always return -1 as there's no ID here. - dummyID := types.JSONRPCIntID(-1) // URIClientRequestID + dummyID := rpctypes.JSONRPCIntID(-1) // URIClientRequestID // Exception for websocket endpoints if rpcFunc.ws { return func(w http.ResponseWriter, r *http.Request) { - res := types.RPCMethodNotFoundError(dummyID) + res := rpctypes.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -38,12 +38,12 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit return func(w http.ResponseWriter, r *http.Request) { logger.Debug("HTTP HANDLER", "req", r) - ctx := &types.Context{HTTPReq: r} + ctx := &rpctypes.Context{HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} fnArgs, err := httpParamsToArgs(rpcFunc, r) if err != nil { - res := types.RPCInvalidParamsError(dummyID, + res := rpctypes.RPCInvalidParamsError(dummyID, fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -60,29 +60,29 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit switch e := err.(type) { // if no error then return a success response case nil: - res := types.NewRPCSuccessResponse(dummyID, result) + res := rpctypes.NewRPCSuccessResponse(dummyID, result) if wErr := WriteRPCResponseHTTP(w, rpcFunc.cache, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } // if this already of type RPC error then forward that error. - case *types.RPCError: - res := types.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) + case *rpctypes.RPCError: + res := rpctypes.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } default: // we need to unwrap the error and parse it accordingly - var res types.RPCResponse + var res rpctypes.RPCResponse switch errors.Unwrap(err) { - case ctypes.ErrZeroOrNegativeHeight, - ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, - ctypes.ErrInvalidRequest: - res = types.RPCInvalidRequestError(dummyID, err) + case coretypes.ErrZeroOrNegativeHeight, + coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, + coretypes.ErrInvalidRequest: + res = rpctypes.RPCInvalidRequestError(dummyID, err) default: // ctypes.ErrHeightNotAvailable, ctypes.ErrHeightExceedsChainHead: - res = types.RPCInternalError(dummyID, err) + res = rpctypes.RPCInternalError(dummyID, err) } if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index 86316f8e5..92ea6f2c0 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/bytes" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { @@ -134,7 +134,7 @@ func TestParseJSONArray(t *testing.T) { } func TestParseJSONRPC(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(ctx *rpctypes.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name", false) cases := []struct { @@ -171,7 +171,7 @@ func TestParseJSONRPC(t *testing.T) { } func TestParseURI(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(ctx *rpctypes.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name", false) cases := []struct { diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index 63731799b..2271d03f8 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // WebSocket handler @@ -111,7 +111,7 @@ type wsConnection struct { remoteAddr string baseConn *websocket.Conn // writeChan is never closed, to allow WriteRPCResponse() to fail. - writeChan chan types.RPCResponse + writeChan chan rpctypes.RPCResponse // chan, which is closed when/if readRoutine errors // used to abort writeRoutine @@ -224,7 +224,7 @@ func (wsc *wsConnection) Start() error { if err := wsc.RunState.Start(); err != nil { return err } - wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) + wsc.writeChan = make(chan rpctypes.RPCResponse, wsc.writeChanCapacity) // Read subscriptions/unsubscriptions to events go wsc.readRoutine() @@ -257,7 +257,7 @@ func (wsc *wsConnection) GetRemoteAddr() string { // WriteRPCResponse pushes a response to the writeChan, and blocks until it is // accepted. // It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCResponse) error { +func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) error { select { case <-wsc.Quit(): return errors.New("connection was stopped") @@ -271,7 +271,7 @@ func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCRes // TryWriteRPCResponse attempts to push a response to the writeChan, but does // not block. // It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { +func (wsc *wsConnection) TryWriteRPCResponse(resp rpctypes.RPCResponse) bool { select { case <-wsc.Quit(): return false @@ -304,7 +304,7 @@ func (wsc *wsConnection) readRoutine() { err = fmt.Errorf("WSJSONRPC: %v", r) } wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err)); err != nil { + if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err)); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } go wsc.readRoutine() @@ -340,11 +340,11 @@ func (wsc *wsConnection) readRoutine() { } dec := json.NewDecoder(r) - var request types.RPCRequest + var request rpctypes.RPCRequest err = dec.Decode(&request) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { + rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } continue @@ -363,19 +363,19 @@ func (wsc *wsConnection) readRoutine() { // Now, fetch the RPCFunc and execute it. rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { - if err := wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID)); err != nil { + if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCMethodNotFoundError(request.ID)); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } continue } - ctx := &types.Context{JSONReq: &request, WSConn: wsc} + ctx := &rpctypes.Context{JSONReq: &request, WSConn: wsc} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } @@ -389,27 +389,27 @@ func (wsc *wsConnection) readRoutine() { // TODO: Need to encode args/returns to string if we want to log them wsc.Logger.Info("WSJSONRPC", "method", request.Method) - var resp types.RPCResponse + var resp rpctypes.RPCResponse result, err := unreflectResult(returns) switch e := err.(type) { // if no error then return a success response case nil: - resp = types.NewRPCSuccessResponse(request.ID, result) + resp = rpctypes.NewRPCSuccessResponse(request.ID, result) // if this already of type RPC error then forward that error - case *types.RPCError: - resp = types.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) + case *rpctypes.RPCError: + resp = rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) default: // we need to unwrap the error and parse it accordingly switch errors.Unwrap(err) { // check if the error was due to an invald request - case ctypes.ErrZeroOrNegativeHeight, ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, ctypes.ErrInvalidRequest: - resp = types.RPCInvalidRequestError(request.ID, err) + case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: + resp = rpctypes.RPCInvalidRequestError(request.ID, err) // lastly default all remaining errors as internal errors default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - resp = types.RPCInternalError(request.ID, err) + resp = rpctypes.RPCInternalError(request.ID, err) } } diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go index 42a96d1d3..b691172a4 100644 --- a/rpc/jsonrpc/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestWebsocketManagerHandler(t *testing.T) { @@ -26,8 +26,8 @@ func TestWebsocketManagerHandler(t *testing.T) { } // check basic functionality works - req, err := types.MapToRequest( - types.JSONRPCStringID("TestWebsocketManager"), + req, err := rpctypes.MapToRequest( + rpctypes.JSONRPCStringID("TestWebsocketManager"), "c", map[string]interface{}{"s": "a", "i": 10}, ) @@ -35,7 +35,7 @@ func TestWebsocketManagerHandler(t *testing.T) { err = c.WriteJSON(req) require.NoError(t, err) - var resp types.RPCResponse + var resp rpctypes.RPCResponse err = c.ReadJSON(&resp) require.NoError(t, err) require.Nil(t, resp.Error) @@ -44,7 +44,7 @@ func TestWebsocketManagerHandler(t *testing.T) { func newWSServer() *httptest.Server { funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewWSRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } wm := NewWebsocketManager(funcMap) wm.SetLogger(log.TestingLogger()) diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 91cff8386..01aa83b4f 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -8,13 +8,13 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" - nm "github.com/tendermint/tendermint/node" - ctypes "github.com/tendermint/tendermint/rpc/coretypes" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" + "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/rpc/coretypes" + coregrpc "github.com/tendermint/tendermint/rpc/grpc" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) @@ -24,13 +24,13 @@ type Options struct { suppressStdout bool } -func waitForRPC(ctx context.Context, conf *cfg.Config) { +func waitForRPC(ctx context.Context, conf *config.Config) { laddr := conf.RPC.ListenAddress client, err := rpcclient.New(laddr) if err != nil { panic(err) } - result := new(ctypes.ResultStatus) + result := new(coretypes.ResultStatus) for { _, err := client.Call(ctx, "status", map[string]interface{}{}, result) if err == nil { @@ -42,10 +42,10 @@ func waitForRPC(ctx context.Context, conf *cfg.Config) { } } -func waitForGRPC(ctx context.Context, conf *cfg.Config) { +func waitForGRPC(ctx context.Context, conf *config.Config) { client := GetGRPCClient(conf) for { - _, err := client.Ping(ctx, &core_grpc.RequestPing{}) + _, err := client.Ping(ctx, &coregrpc.RequestPing{}) if err == nil { return } @@ -66,8 +66,8 @@ func makeAddrs() (string, string, string) { fmt.Sprintf("tcp://127.0.0.1:%d", randPort()) } -func CreateConfig(testName string) *cfg.Config { - c := cfg.ResetTestRoot(testName) +func CreateConfig(testName string) *config.Config { + c := config.ResetTestRoot(testName) // and we use random ports to run in parallel tm, rpc, grpc := makeAddrs() @@ -78,15 +78,15 @@ func CreateConfig(testName string) *cfg.Config { return c } -func GetGRPCClient(conf *cfg.Config) core_grpc.BroadcastAPIClient { +func GetGRPCClient(conf *config.Config) coregrpc.BroadcastAPIClient { grpcAddr := conf.RPC.GRPCListenAddress - return core_grpc.StartGRPCClient(grpcAddr) + return coregrpc.StartGRPCClient(grpcAddr) } type ServiceCloser func(context.Context) error func StartTendermint(ctx context.Context, - conf *cfg.Config, + conf *config.Config, app abci.Application, opts ...func(*Options)) (service.Service, ServiceCloser, error) { @@ -101,12 +101,12 @@ func StartTendermint(ctx context.Context, logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) } papp := abciclient.NewLocalCreator(app) - node, err := nm.New(conf, logger, papp, nil) + tmNode, err := node.New(conf, logger, papp, nil) if err != nil { return nil, func(_ context.Context) error { return nil }, err } - err = node.Start() + err = tmNode.Start() if err != nil { return nil, func(_ context.Context) error { return nil }, err } @@ -119,11 +119,11 @@ func StartTendermint(ctx context.Context, fmt.Println("Tendermint running!") } - return node, func(ctx context.Context) error { - if err := node.Stop(); err != nil { + return tmNode, func(ctx context.Context) error { + if err := tmNode.Stop(); err != nil { logger.Error("Error when trying to stop node", "err", err) } - node.Wait() + tmNode.Wait() os.RemoveAll(conf.RootDir) return nil }, nil diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index d21dc6c44..6b60ac2fc 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -14,7 +14,7 @@ import ( "os" "strings" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -41,7 +41,7 @@ func main() { // because of the byte array in BlockPart // leading to unmarshal error: unexpected end of JSON input br := bufio.NewReaderSize(f, int(2*types.BlockPartSizeBytes)) - dec := cs.NewWALEncoder(walFile) + dec := consensus.NewWALEncoder(walFile) for { msgJSON, _, err := br.ReadLine() @@ -55,7 +55,7 @@ func main() { continue } - var msg cs.TimedWALMessage + var msg consensus.TimedWALMessage err = tmjson.Unmarshal(msgJSON, &msg) if err != nil { panic(fmt.Errorf("failed to unmarshal json: %v", err)) diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 886e5402f..5a5a0abac 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -12,7 +12,7 @@ import ( "io" "os" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" ) @@ -28,7 +28,7 @@ func main() { } defer f.Close() - dec := cs.NewWALDecoder(f) + dec := consensus.NewWALDecoder(f) for { msg, err := dec.Decode() if err == io.EOF { @@ -48,7 +48,7 @@ func main() { } if err == nil { - if endMsg, ok := msg.Msg.(cs.EndHeightMessage); ok { + if endMsg, ok := msg.Msg.(consensus.EndHeightMessage); ok { _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", endMsg.Height))) } } diff --git a/store/store_test.go b/store/store_test.go index e00715eb7..03db91fa8 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/test/factory" @@ -46,13 +46,13 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { } func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { - config := cfg.ResetTestRoot("blockchain_reactor_test") + cfg := config.ResetTestRoot("blockchain_reactor_test") blockDB := dbm.NewMemDB() - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } - return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) } + return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) } } func freshBlockStore() (*BlockStore, dbm.DB) { @@ -292,9 +292,9 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + cfg := config.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(cfg.RootDir) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) @@ -348,9 +348,9 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + cfg := config.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(cfg.RootDir) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() bs := NewBlockStore(db) diff --git a/types/protobuf.go b/types/protobuf.go index 7cd224665..f82965fbf 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -2,7 +2,7 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" ) //------------------------------------------------------- @@ -22,7 +22,7 @@ func (tm2pb) Validator(val *Validator) abci.Validator { // XXX: panics on unknown pubkey type func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { - pk, err := cryptoenc.PubKeyToProto(val.PubKey) + pk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } @@ -52,7 +52,7 @@ type pb2tm struct{} func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { tmVals := make([]*Validator, len(vals)) for i, v := range vals { - pub, err := cryptoenc.PubKeyFromProto(v.PubKey) + pub, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { return nil, err } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index b6900f40c..a33d031e2 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -9,7 +9,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" ) func TestABCIPubKey(t *testing.T) { @@ -19,9 +19,9 @@ func TestABCIPubKey(t *testing.T) { } func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error { - abciPubKey, err := cryptoenc.PubKeyToProto(pk) + abciPubKey, err := encoding.PubKeyToProto(pk) require.NoError(t, err) - pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) + pk2, err := encoding.PubKeyFromProto(abciPubKey) require.NoError(t, err) require.Equal(t, pk, pk2) return nil diff --git a/types/validator.go b/types/validator.go index fb3fa2d76..ded8156bf 100644 --- a/types/validator.go +++ b/types/validator.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/tendermint/tendermint/crypto" - ce "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -114,7 +114,7 @@ func ValidatorListString(vals []*Validator) string { // as its redundant with the pubkey. This also excludes ProposerPriority // which changes every round. func (v *Validator) Bytes() []byte { - pk, err := ce.PubKeyToProto(v.PubKey) + pk, err := encoding.PubKeyToProto(v.PubKey) if err != nil { panic(err) } @@ -137,7 +137,7 @@ func (v *Validator) ToProto() (*tmproto.Validator, error) { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyToProto(v.PubKey) + pk, err := encoding.PubKeyToProto(v.PubKey) if err != nil { return nil, err } @@ -159,7 +159,7 @@ func ValidatorFromProto(vp *tmproto.Validator) (*Validator, error) { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyFromProto(vp.PubKey) + pk, err := encoding.PubKeyFromProto(vp.PubKey) if err != nil { return nil, err } From bb8ffcb95bb2eafc297b7e162bd5e58d09c8ea1e Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Sep 2021 11:46:42 -0400 Subject: [PATCH 020/174] store: move pacakge to internal (#6978) --- cmd/tendermint/commands/reindex_event.go | 2 +- internal/blocksync/v0/reactor.go | 2 +- internal/blocksync/v0/reactor_test.go | 2 +- internal/blocksync/v2/reactor_test.go | 2 +- internal/consensus/byzantine_test.go | 2 +- internal/consensus/common_test.go | 2 +- internal/consensus/mempool_test.go | 2 +- internal/consensus/reactor_test.go | 2 +- internal/consensus/replay_file.go | 2 +- internal/consensus/replay_test.go | 2 +- internal/consensus/wal_generator.go | 2 +- internal/evidence/pool_test.go | 2 +- internal/inspect/inspect.go | 2 +- internal/state/execution_test.go | 2 +- internal/state/validation_test.go | 2 +- internal/statesync/reactor.go | 2 +- internal/statesync/reactor_test.go | 2 +- {store => internal/store}/store.go | 0 {store => internal/store}/store_test.go | 0 node/node.go | 2 +- node/node_test.go | 2 +- node/setup.go | 2 +- 22 files changed, 20 insertions(+), 20 deletions(-) rename {store => internal/store}/store.go (100%) rename {store => internal/store}/store_test.go (100%) diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index ca176df11..9545e32d7 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -15,8 +15,8 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/rpc/coretypes" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/v0/reactor.go index 4c378d040..286df0ef0 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -10,11 +10,11 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index 14e0fbf1d..e947581aa 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -18,10 +18,10 @@ import ( "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go index 8cb675a0a..579a46c19 100644 --- a/internal/blocksync/v2/reactor_test.go +++ b/internal/blocksync/v2/reactor_test.go @@ -24,11 +24,11 @@ import ( "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" + tmstore "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - tmstore "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index bff9f4ace..81a4f8be8 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -19,11 +19,11 @@ import ( mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 2bd926c45..5251b0fdb 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -24,6 +24,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" @@ -32,7 +33,6 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 58784a2ee..4b2037469 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -16,7 +16,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index a5f06c884..baffbac30 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -26,11 +26,11 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" sm "github.com/tendermint/tendermint/internal/state" statemocks "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 47ed61874..d009533ba 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -15,10 +15,10 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 248cf6da3..97017985d 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -29,13 +29,13 @@ import ( "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index ccc6685fc..7e31188fa 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -18,9 +18,9 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 5be2ec318..f38c09e02 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -16,9 +16,9 @@ import ( sm "github.com/tendermint/tendermint/internal/state" smmocks "github.com/tendermint/tendermint/internal/state/mocks" sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index c48776d3c..90e615341 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -12,9 +12,9 @@ import ( "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmstrings "github.com/tendermint/tendermint/libs/strings" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "golang.org/x/sync/errgroup" diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index d777d1699..8eff0430d 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -21,9 +21,9 @@ import ( sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index d2427f35a..08c54f139 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -17,11 +17,11 @@ import ( sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" testfactory "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 196ac1a9d..931e2e7fc 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -16,12 +16,12 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index a0312428c..98ee9d26e 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -20,12 +20,12 @@ import ( proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/store/store.go b/internal/store/store.go similarity index 100% rename from store/store.go rename to internal/store/store.go diff --git a/store/store_test.go b/internal/store/store_test.go similarity index 100% rename from store/store_test.go rename to internal/store/store_test.go diff --git a/node/node.go b/node/node.go index 4e44a5678..fc43aace4 100644 --- a/node/node.go +++ b/node/node.go @@ -24,6 +24,7 @@ import ( rpccore "github.com/tendermint/tendermint/internal/rpc/core" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -34,7 +35,6 @@ import ( tmgrpc "github.com/tendermint/tendermint/privval/grpc" grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port diff --git a/node/node_test.go b/node/node_test.go index a940fef54..30e7a8f13 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -27,12 +27,12 @@ import ( "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) diff --git a/node/setup.go b/node/setup.go index 63fb4c739..3f305125d 100644 --- a/node/setup.go +++ b/node/setup.go @@ -29,11 +29,11 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" From 8a171b84261ed938ba3cc428169bf54026eaadb8 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Sep 2021 12:42:20 -0400 Subject: [PATCH 021/174] e2e: improve manifest sorting algorithim (#6979) --- test/e2e/generator/generate.go | 17 +++---- test/e2e/generator/main.go | 65 ++++++++++++++------------ test/e2e/pkg/manifest.go | 84 ++++++++++++++++++++++++---------- 3 files changed, 105 insertions(+), 61 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 2d6945e65..78da8ed4a 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -80,26 +80,27 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { return nil, err } + if len(manifest.Nodes) < opts.MinNetworkSize { + continue + } + if len(manifest.Nodes) == 1 { if opt["p2p"] == HybridP2PMode { continue } } - manifests = append(manifests, manifest) - } - if opts.Sorted { - // When the sorted flag is set (generally, as long as - // groups aren't set), - e2e.SortManifests(manifests) + manifests = append(manifests, manifest) } return manifests, nil } type Options struct { - P2P P2PMode - Sorted bool + MinNetworkSize int + NumGroups int + Directory string + P2P P2PMode } type P2PMode string diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 7dd096760..8ec373944 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" - "math" "math/rand" "os" "path/filepath" @@ -11,6 +10,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) const ( @@ -26,6 +26,7 @@ func main() { // CLI is the Cobra-based command-line interface. type CLI struct { root *cobra.Command + opts Options } // NewCLI sets up the CLI. @@ -37,31 +38,36 @@ func NewCLI() *CLI { SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - dir, err := cmd.Flags().GetString("dir") + var opts Options + var err error + + cli.opts.Directory, err = cmd.Flags().GetString("dir") if err != nil { return err } - groups, err := cmd.Flags().GetInt("groups") + + cli.opts.NumGroups, err = cmd.Flags().GetInt("groups") if err != nil { return err } + + cli.opts.MinNetworkSize, err = cmd.Flags().GetInt("min-size") + if err != nil { + return err + } + p2pMode, err := cmd.Flags().GetString("p2p") if err != nil { return err } - var opts Options switch mode := P2PMode(p2pMode); mode { case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - opts = Options{P2P: mode} + opts.P2P = mode default: return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) } - if groups == 0 { - opts.Sorted = true - } - - return cli.generate(dir, groups, opts) + return cli.generate() }, } @@ -70,40 +76,43 @@ func NewCLI() *CLI { cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") + cli.root.PersistentFlags().IntP("min-size", "m", 1, "Minimum Network Size") return cli } // generate generates manifests in a directory. -func (cli *CLI) generate(dir string, groups int, opts Options) error { - err := os.MkdirAll(dir, 0755) +func (cli *CLI) generate() error { + err := os.MkdirAll(cli.opts.Directory, 0755) if err != nil { return err } - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), opts) + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts) if err != nil { return err } - if groups <= 0 { - for i, manifest := range manifests { - err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-%04d.toml", i))) - if err != nil { + + switch { + case cli.opts.NumGroups <= 0: + e2e.SortManifests(manifests) + + if err := e2e.WriteManifests(filepath.Join(cli.opts.Directory, "gen"), manifests); err != nil { + return err + } + default: + groupManifests := e2e.SplitGroups(cli.opts.NumGroups, manifests) + + for idx, gm := range groupManifests { + e2e.SortManifests(gm) + + prefix := filepath.Join(cli.opts.Directory, fmt.Sprintf("gen-group%02d", idx)) + if err := e2e.WriteManifests(prefix, gm); err != nil { return err } } - } else { - groupSize := int(math.Ceil(float64(len(manifests)) / float64(groups))) - for g := 0; g < groups; g++ { - for i := 0; i < groupSize && g*groupSize+i < len(manifests); i++ { - manifest := manifests[g*groupSize+i] - err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-group%02d-%04d.toml", g, i))) - if err != nil { - return err - } - } - } } + return nil } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 2a8f73127..86e571ee1 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -170,41 +170,75 @@ func LoadManifest(file string) (Manifest, error) { } // SortManifests orders (in-place) a list of manifests such that the -// manifests will be ordered (vaguely) from least complex to most -// complex. +// manifests will be ordered in terms of complexity (or expected +// runtime). Complexity is determined first by the number of nodes, +// and then by the total number of perturbations in the network func SortManifests(manifests []Manifest) { sort.SliceStable(manifests, func(i, j int) bool { - left, right := manifests[i], manifests[j] - - if len(left.Nodes) < len(right.Nodes) { - return true - } - - if left.InitialHeight < right.InitialHeight { - return true - } - - if left.TxSize < right.TxSize { - return true - } - - if left.Evidence < right.Evidence { - return true - } - + // sort based on a point-based comparison between two + // manifests. var ( - leftPerturb int - rightPerturb int + left = manifests[i] + right = manifests[j] ) + // scores start with 100 points for each node. The + // number of nodes in a network is the most important + // factor in the complexity of the test. + leftScore := len(left.Nodes) * 100 + rightScore := len(right.Nodes) * 100 + + // add two points for every node perturbation, and one + // point for every node that starts after genesis. for _, n := range left.Nodes { - leftPerturb += len(n.Perturb) + leftScore += (len(n.Perturb) * 2) + + if n.StartAt > 0 { + leftScore++ + } } for _, n := range right.Nodes { - rightPerturb += len(n.Perturb) + rightScore += (len(n.Perturb) * 2) + if n.StartAt > 0 { + rightScore++ + } } - return leftPerturb < rightPerturb + // add one point if the network has evidence. + if left.Evidence > 0 { + leftScore++ + } + if right.Evidence > 0 { + rightScore++ + } + return leftScore < rightScore }) } + +// SplitGroups divides a list of manifests into n groups of +// manifests. +func SplitGroups(groups int, manifests []Manifest) [][]Manifest { + groupSize := (len(manifests) + groups - 1) / groups + splitManifests := make([][]Manifest, 0, groups) + + for i := 0; i < len(manifests); i += groupSize { + grp := make([]Manifest, groupSize) + n := copy(grp, manifests[i:]) + splitManifests = append(splitManifests, grp[:n]) + } + + return splitManifests +} + +// WriteManifests writes a collection of manifests into files with the +// specified path prefix. +func WriteManifests(prefix string, manifests []Manifest) error { + for i, manifest := range manifests { + if err := manifest.Save(fmt.Sprintf("%s-%04d.toml", prefix, i)); err != nil { + return err + } + } + + return nil +} From 3d410e4a6ba3369332099b280a895b1848c79d8b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Sep 2021 14:31:59 -0400 Subject: [PATCH 022/174] e2e: only check validator sets after statesync (#6980) --- test/e2e/tests/validator_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 6e836ff78..8292e86ee 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -20,6 +20,14 @@ func TestValidator_Sets(t *testing.T) { require.NoError(t, err) first := status.SyncInfo.EarliestBlockHeight + + // for nodes that have to catch up, we should only + // check the validator sets for nodes after this + // point, to avoid inconsistencies with backfill. + if node.StartAt > first { + first = node.StartAt + } + last := status.SyncInfo.LatestBlockHeight // skip first block if node is pruning blocks, to avoid race conditions From e94c418ad91ed92e02532b57fc327f9caab73168 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Sep 2021 14:52:14 -0400 Subject: [PATCH 023/174] e2e: always preserve failed networks (#6981) --- test/e2e/run-multiple.sh | 2 +- test/e2e/runner/main.go | 25 ++++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/test/e2e/run-multiple.sh b/test/e2e/run-multiple.sh index 6f4170bb0..571a78a7f 100755 --- a/test/e2e/run-multiple.sh +++ b/test/e2e/run-multiple.sh @@ -21,7 +21,7 @@ for MANIFEST in "$@"; do START=$SECONDS echo "==> Running testnet: $MANIFEST" - if ! ./build/runner -p -f "$MANIFEST"; then + if ! ./build/runner -f "$MANIFEST"; then echo "==> Testnet $MANIFEST failed, dumping manifest..." cat "$MANIFEST" diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 8de182f3a..c6d2ad7b7 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -48,18 +48,21 @@ func NewCLI() *CLI { cli.testnet = testnet return nil }, - RunE: func(cmd *cobra.Command, args []string) error { - if err := Cleanup(cli.testnet); err != nil { + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err = Cleanup(cli.testnet); err != nil { return err } defer func() { if cli.preserve { logger.Info("Preserving testnet contents because -preserve=true") + } else if err != nil { + logger.Info("Preserving testnet that encountered error", + "err", err) } else if err := Cleanup(cli.testnet); err != nil { logger.Error("Error cleaning up testnet contents", "err", err) } }() - if err := Setup(cli.testnet); err != nil { + if err = Setup(cli.testnet); err != nil { return err } @@ -73,38 +76,38 @@ func NewCLI() *CLI { chLoadResult <- Load(lctx, cli.testnet) }() - if err := Start(ctx, cli.testnet); err != nil { + if err = Start(ctx, cli.testnet); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } if cli.testnet.HasPerturbations() { - if err := Perturb(ctx, cli.testnet); err != nil { + if err = Perturb(ctx, cli.testnet); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } } if cli.testnet.Evidence > 0 { - if err := InjectEvidence(ctx, cli.testnet, cli.testnet.Evidence); err != nil { + if err = InjectEvidence(ctx, cli.testnet, cli.testnet.Evidence); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress + if err = Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress return err } } loadCancel() - if err := <-chLoadResult; err != nil { + if err = <-chLoadResult; err != nil { return fmt.Errorf("transaction load failed: %w", err) } - if err := Wait(ctx, cli.testnet, 5); err != nil { // wait for network to settle before tests + if err = Wait(ctx, cli.testnet, 5); err != nil { // wait for network to settle before tests return err } if err := Test(cli.testnet); err != nil { From 5ccd668c788c93dc6bc0998e4590efc1094d1b2a Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 23 Sep 2021 16:58:10 -0400 Subject: [PATCH 024/174] e2e: load should be proportional to network (#6983) --- test/e2e/runner/load.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 23050c636..d66d7770e 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -21,9 +21,9 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { // CPU. This gives high-throughput small networks and low-throughput large ones. // This also limits the number of TCP connections, since each worker has // a connection to all nodes. - concurrency := 64 / len(testnet.Nodes) - if concurrency == 0 { - concurrency = 1 + concurrency := len(testnet.Nodes) * 8 + if concurrency > 64 { + concurrency = 64 } chTx := make(chan types.Tx) @@ -32,7 +32,11 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { defer cancel() // Spawn job generator and processors. - logger.Info(fmt.Sprintf("Starting transaction load (%v workers)...", concurrency)) + logger.Info("starting transaction load", + "workers", concurrency, + "nodes", len(testnet.Nodes), + "tx", testnet.TxSize) + started := time.Now() go loadGenerate(ctx, chTx, testnet.TxSize) @@ -78,8 +82,8 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { logger.Info("ending transaction load", "dur_secs", time.Since(started).Seconds(), "txns", success, - "rate", rate, - "slow", rate < 1) + "workers", concurrency, + "rate", rate) return nil } @@ -129,8 +133,8 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { func loadGenerateWaitTime(size int64) time.Duration { const ( - min = int64(100 * time.Millisecond) - max = int64(time.Second) + min = int64(10 * time.Millisecond) + max = int64(100 * time.Millisecond) ) var ( From dd4141e76fd78f54a6794752131e630b847b29c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Sep 2021 10:15:30 -0400 Subject: [PATCH 025/174] build(deps): Bump github.com/go-kit/kit from 0.11.0 to 0.12.0 (#6988) Bumps [github.com/go-kit/kit](https://github.com/go-kit/kit) from 0.11.0 to 0.12.0. - [Release notes](https://github.com/go-kit/kit/releases) - [Commits](https://github.com/go-kit/kit/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: github.com/go-kit/kit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 ++-- go.sum | 110 +++++++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 86 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index 4c6bbdc99..efb65b974 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/fortytw2/leaktest v1.3.0 - github.com/go-kit/kit v0.11.0 + github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 github.com/golangci/golangci-lint v1.42.1 @@ -35,8 +35,8 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 github.com/vektra/mockery/v2 v2.9.4 - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 - golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 + golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 + golang.org/x/net v0.0.0-20210917221730-978cfadd31cf golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.40.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect diff --git a/go.sum b/go.sum index d9385a995..b07b446ad 100644 --- a/go.sum +++ b/go.sum @@ -58,10 +58,13 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -88,6 +91,7 @@ github.com/adlio/schema v1.1.13 h1:LeNMVg5Z1FX+Qgz8tJUijBLRdcpbFUElz+d1489On98= github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -102,6 +106,7 @@ github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/ashanbrown/forbidigo v1.2.0 h1:RMlEFupPCxQ1IogYOQUnIQwGEUGK8g5vAPMRyJoSxbc= @@ -111,10 +116,11 @@ github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9D github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= -github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -140,14 +146,16 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzFDVc9/j0= github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= @@ -155,14 +163,15 @@ github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3/ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -237,9 +246,10 @@ github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -258,12 +268,14 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= -github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= @@ -304,12 +316,15 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -455,21 +470,24 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -492,7 +510,7 @@ github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -521,8 +539,10 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -531,6 +551,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d h1:XeSMXURZPtUffuWAaq90o6kLgZdgu+QA8wk4MPC8ikI= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -542,7 +563,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -596,8 +618,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -616,6 +639,7 @@ github.com/mgechev/revive v1.1.1/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJd github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= @@ -642,6 +666,7 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= @@ -656,9 +681,9 @@ github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0 github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= -github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= @@ -709,12 +734,13 @@ github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= @@ -738,6 +764,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -749,15 +776,19 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= @@ -897,6 +928,7 @@ github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoi github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -953,14 +985,18 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -979,9 +1015,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -992,6 +1033,7 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1068,8 +1110,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1163,6 +1208,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1177,8 +1223,9 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1189,19 +1236,22 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1302,6 +1352,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1396,8 +1450,9 @@ google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1502,6 +1557,7 @@ mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 363b87e8ea12628b5a2e0eb600e315ea78b8afc6 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 10:50:19 -0400 Subject: [PATCH 026/174] changelog: add entry for interanlizations (#6989) --- CHANGELOG_PENDING.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 1c8c2b81e..f329b0e24 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -19,6 +19,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - [crypto/armor]: \#6963 remove package which is unused, and based on deprecated fundamentals. Downstream users should maintain this library. (@tychoish) + - [state] [store] [proxy] [rpc/core]: \#6937 move packages to + `internal` to prevent consumption of these internal APIs by + external users. (@tychoish) - Blockchain Protocol From c909f8a236e833951fa9b324a4f3595827050b79 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 11:52:47 -0400 Subject: [PATCH 027/174] e2e: avoid non-determinism in app hash check (#6985) --- test/e2e/tests/app_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 9a2b289b4..4387b98cd 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -44,15 +44,17 @@ func TestApp_Hash(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") - block, err := client.Block(ctx, nil) + status, err := client.Status(ctx) + require.NoError(t, err) + if info.Response.LastBlockHeight == status.SyncInfo.LatestBlockHeight { + require.EqualValues(t, info.Response.LastBlockAppHash, status.SyncInfo.LatestAppHash, + "app hash does not match node status") + } + + block, err := client.Block(ctx, &info.Response.LastBlockHeight) require.NoError(t, err) require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), "app hash does not match last block's app hash") - - status, err := client.Status(ctx) - require.NoError(t, err) - require.EqualValues(t, info.Response.LastBlockAppHash, status.SyncInfo.LatestAppHash, - "app hash does not match node status") }) } From ab8cfb9f57eba4c53290eb7533bd375c4aa95b82 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 12:28:51 -0400 Subject: [PATCH 028/174] e2e: tighten timing for load generation (#6990) --- .github/workflows/e2e-nightly-master.yml | 4 ++-- test/e2e/runner/load.go | 24 +++++++++--------------- test/e2e/runner/main.go | 16 +++++++++++++++- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 029fee6bb..5f5ed9706 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: p2p: ['legacy', 'new', 'hybrid'] - group: ['00', '01'] + group: ['00', '01', '02', '03'] runs-on: ubuntu-latest timeout-minutes: 60 steps: @@ -35,7 +35,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} working-directory: test/e2e diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index d66d7770e..5e6e04f89 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -3,7 +3,6 @@ package main import ( "container/ring" "context" - "errors" "fmt" "math/rand" "time" @@ -58,19 +57,9 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { case numSeen := <-chSuccess: success += numSeen case <-ctx.Done(): - // if we couldn't submit any transactions, - // that's probably a problem and the test - // should error; however, for very short tests - // we shouldn't abort. - // - // The 2s cut off, is a rough guess based on - // the expected value of - // loadGenerateWaitTime. If the implementation - // of that function changes, then this might - // also need to change without more - // refactoring. - if success == 0 && time.Since(started) > 2*time.Second { - return errors.New("failed to submit any transactions") + if success == 0 { + return fmt.Errorf("failed to submit transactions in %s by %d workers", + time.Since(started), concurrency) } // TODO perhaps allow test networks to @@ -141,9 +130,14 @@ func loadGenerateWaitTime(size int64) time.Duration { baseJitter = rand.Int63n(max-min+1) + min // nolint: gosec sizeFactor = size * int64(time.Millisecond) sizeJitter = rand.Int63n(sizeFactor-min+1) + min // nolint: gosec + waitTime = time.Duration(baseJitter + sizeJitter) ) - return time.Duration(baseJitter + sizeJitter) + if size == 1 { + return waitTime / 2 + } + + return waitTime } // loadProcess processes transactions diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index c6d2ad7b7..194e4c6e4 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "strconv" + "time" "github.com/spf13/cobra" @@ -75,7 +76,7 @@ func NewCLI() *CLI { go func() { chLoadResult <- Load(lctx, cli.testnet) }() - + startAt := time.Now() if err = Start(ctx, cli.testnet); err != nil { return err } @@ -102,6 +103,19 @@ func NewCLI() *CLI { } } + // to help make sure that we don't run into + // situations where 0 transactions have + // happened on quick cases, we make sure that + // it's been at least 10s before canceling the + // load generator. + // + // TODO allow the load generator to report + // successful transactions to avoid needing + // this sleep. + if rest := time.Since(startAt); rest < 15*time.Second { + time.Sleep(15*time.Second - rest) + } + loadCancel() if err = <-chLoadResult; err != nil { From b203c9179916b12f1068358ba7b13d4b7393f722 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 13:01:35 -0400 Subject: [PATCH 029/174] rpc: implement BroadcastTxCommit without event subscriptions (#6984) --- internal/rpc/core/mempool.go | 115 ++++++++++++++--------------------- 1 file changed, 44 insertions(+), 71 deletions(-) diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index d66cabadd..6cd05348f 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -1,14 +1,14 @@ package core import ( - "context" "errors" "fmt" + "math/rand" "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" @@ -60,87 +60,60 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*co // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { //nolint:lll - subscriber := ctx.RemoteAddr() - - if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) - } else if env.EventBus.NumClientSubscriptions(subscriber) >= env.Config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) - } - - // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) - defer cancel() - q := types.EventQueryTxFor(tx) - deliverTxSub, err := env.EventBus.Subscribe(subCtx, subscriber, q) - if err != nil { - err = fmt.Errorf("failed to subscribe to tx: %w", err) - env.Logger.Error("Error on broadcast_tx_commit", "err", err) - return nil, err - } - defer func() { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: q} - if err := env.EventBus.Unsubscribe(context.Background(), args); err != nil { - env.Logger.Error("Error unsubscribing from eventBus", "err", err) - } - }() - - // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan *abci.Response, 1) - err = env.Mempool.CheckTx( + resCh := make(chan *abci.Response, 1) + err := env.Mempool.CheckTx( ctx.Context(), tx, - func(res *abci.Response) { checkTxResCh <- res }, + func(res *abci.Response) { resCh <- res }, mempool.TxInfo{}, ) if err != nil { - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) + return nil, err } - checkTxResMsg := <-checkTxResCh - checkTxRes := checkTxResMsg.GetCheckTx() + r := (<-resCh).GetCheckTx() - if checkTxRes.Code != abci.CodeTypeOK { + if !indexer.KVSinkEnabled(env.EventSinks) { return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, nil + CheckTx: *r, + Hash: tx.Hash(), + }, + errors.New("cannot wait for commit because kvEventSync is not enabled") } - // Wait for the tx to be included in a block or timeout. - select { - case msg := <-deliverTxSub.Out(): // The tx was included in a block. - deliverTxRes := msg.Data().(types.EventDataTx) - return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: deliverTxRes.Result, - Hash: tx.Hash(), - Height: deliverTxRes.Height, - }, nil - case <-deliverTxSub.Canceled(): - var reason string - if deliverTxSub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = deliverTxSub.Err().Error() + startAt := time.Now() + timer := time.NewTimer(0) + defer timer.Stop() + + count := 0 + for { + count++ + select { + case <-ctx.Context().Done(): + env.Logger.Error("Error on broadcastTxCommit", + "duration", time.Since(startAt), + "err", err) + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: tx.Hash(), + }, fmt.Errorf("timeout waiting for commit of tx %s (%s)", + tx.Hash(), time.Since(startAt)) + case <-timer.C: + txres, err := env.Tx(ctx, tx.Hash(), false) + if err != nil { + jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec + backoff := 100 * time.Duration(count) * time.Millisecond + timer.Reset(jitter + backoff) + continue + } + + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + DeliverTx: txres.TxResult, + Hash: tx.Hash(), + Height: txres.Height, + }, nil } - err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason) - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, err - case <-time.After(env.Config.TimeoutBroadcastTxCommit): - err = errors.New("timed out waiting for tx to be included in a block") - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, err } } From 08982c81fc0c83504df4b4043e7a6e9bb0ba4550 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 13:49:06 -0400 Subject: [PATCH 030/174] e2e: skip validation of status apphash (#6991) I believe this assertion is likely redundant given that we're checking the block apphash. --- test/e2e/tests/app_test.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 4387b98cd..afd4d2d52 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -44,17 +44,15 @@ func TestApp_Hash(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") - status, err := client.Status(ctx) - require.NoError(t, err) - if info.Response.LastBlockHeight == status.SyncInfo.LatestBlockHeight { - require.EqualValues(t, info.Response.LastBlockAppHash, status.SyncInfo.LatestAppHash, - "app hash does not match node status") - } - block, err := client.Block(ctx, &info.Response.LastBlockHeight) require.NoError(t, err) require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), "app hash does not match last block's app hash") + + status, err := client.Status(ctx) + require.NoError(t, err) + require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight, + "status out of sync with application") }) } From 5e45676875b411059fae0e8878dc639001869762 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 14:27:00 -0400 Subject: [PATCH 031/174] e2e: do not inject evidence through light proxy (#6992) In the last run, there were two problems at the RPC layer returned from light nodes' RPC end points. I think exercising the light client proxy RPC system is something that can/should be done via unit testing, and that likely these errors are (in production) transient and (in CI) very likely to fail for test environment issues. --- test/e2e/runner/evidence.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index ab993a2fe..e6c7c3f7a 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -35,7 +35,7 @@ func InjectEvidence(ctx context.Context, testnet *e2e.Testnet, amount int) error for _, idx := range rand.Perm(len(testnet.Nodes)) { targetNode = testnet.Nodes[idx] - if targetNode.Mode == e2e.ModeSeed { + if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { targetNode = nil continue } From dbad7015155d7ced67ac67b7fd9915b61ca0ebbb Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 14:59:10 -0400 Subject: [PATCH 032/174] ci: use smart merges (#6993) --- .github/mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index df570504a..6b08c877f 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -6,7 +6,7 @@ pull_request_rules: actions: merge: method: squash - strict: true + strict: smart+fasttrack commit_message: title+body - name: backport patches to v0.34.x branch conditions: From 71c6682b57cef792ae44730a9f39840226c017c0 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 24 Sep 2021 17:40:12 -0400 Subject: [PATCH 033/174] statesync: clean up reactor/syncer lifecylce (#6995) I've been noticing that there are a number of situations where the statesync reactor blocks waiting for peers (or similar,) I've moved things around to improve outcomes in local tests. --- internal/statesync/reactor.go | 34 ++++++++++++++------ internal/statesync/reactor_test.go | 5 ++- internal/statesync/stateprovider.go | 48 ++++++++++++++++++++++++----- internal/statesync/syncer.go | 25 +++++++++++++-- 4 files changed, 91 insertions(+), 21 deletions(-) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 931e2e7fc..c66b77793 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -269,7 +269,10 @@ func (r *Reactor) OnStop() { func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { // We need at least two peers (for cross-referencing of light blocks) before we can // begin state sync - r.waitForEnoughPeers(ctx, 2) + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return sm.State{}, err + } + r.mtx.Lock() if r.syncer != nil { r.mtx.Unlock() @@ -288,6 +291,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.stateProvider, r.snapshotCh.Out, r.chunkCh.Out, + r.snapshotCh.Done(), r.tempDir, r.metrics, ) @@ -302,10 +306,16 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { requestSnapshotsHook := func() { // request snapshots from all currently connected peers - r.snapshotCh.Out <- p2p.Envelope{ + msg := p2p.Envelope{ Broadcast: true, Message: &ssproto.SnapshotsRequest{}, } + + select { + case <-ctx.Done(): + case <-r.closeCh: + case r.snapshotCh.Out <- msg: + } } state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook) @@ -992,19 +1002,21 @@ func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) { }, nil } -func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) { - t := time.NewTicker(200 * time.Millisecond) +func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { + startAt := time.Now() + t := time.NewTicker(100 * time.Millisecond) defer t.Stop() - for { + for r.peers.Len() < numPeers { select { case <-ctx.Done(): - return + return fmt.Errorf("operation canceled while waiting for peers after %s", time.Since(startAt)) + case <-r.closeCh: + return fmt.Errorf("shutdown while waiting for peers after %s", time.Since(startAt)) case <-t.C: - if r.peers.Len() >= numPeers { - return - } + continue } } + return nil } func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initialHeight int64) error { @@ -1019,6 +1031,10 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial "trustHeight", to.Height, "useP2P", r.cfg.UseP2P) if r.cfg.UseP2P { + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return err + } + peers := r.peers.All() providers := make([]provider.Provider, len(peers)) for idx, p := range peers { diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 98ee9d26e..7bcfe39af 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -171,6 +171,7 @@ func setup( stateProvider, rts.snapshotOutCh, rts.chunkOutCh, + rts.snapshotChannel.Done(), "", rts.reactor.metrics, ) @@ -524,7 +525,9 @@ func TestReactor_StateProviderP2P(t *testing.T) { rts.reactor.cfg.UseP2P = true rts.reactor.cfg.TrustHeight = 1 rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash()) - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + rts.reactor.mtx.Lock() err := rts.reactor.initStateProvider(ctx, factory.DefaultTestChainID, 1) rts.reactor.mtx.Unlock() diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index 9eb776e28..b622824cd 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -337,9 +337,39 @@ func (s *stateProviderP2P) addProvider(p lightprovider.Provider) { } } -// consensusParams sends out a request for consensus params blocking until one is returned. -// If it fails to get a valid set of consensus params from any of the providers it returns an error. +// consensusParams sends out a request for consensus params blocking +// until one is returned. +// +// If it fails to get a valid set of consensus params from any of the +// providers it returns an error; however, it will retry indefinitely +// (with backoff) until the context is canceled. func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (types.ConsensusParams, error) { + iterCount := 0 + for { + params, err := s.tryGetConsensusParamsFromWitnesses(ctx, height) + if err != nil { + return types.ConsensusParams{}, err + } + if params != nil { + return *params, nil + } + iterCount++ + + select { + case <-ctx.Done(): + return types.ConsensusParams{}, ctx.Err() + case <-time.After(time.Duration(iterCount) * consensusParamsResponseTimeout): + } + } +} + +// tryGetConsensusParamsFromWitnesses attempts to get consensus +// parameters from the light clients available witnesses. If both +// return parameters are nil, then it can be retried. +func (s *stateProviderP2P) tryGetConsensusParamsFromWitnesses( + ctx context.Context, + height int64, +) (*types.ConsensusParams, error) { for _, provider := range s.lc.Witnesses() { p, ok := provider.(*BlockProvider) if !ok { @@ -349,7 +379,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t // extract the nodeID of the provider peer, err := types.NewNodeID(p.String()) if err != nil { - return types.ConsensusParams{}, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) + return nil, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) } select { @@ -360,7 +390,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t }, }: case <-ctx.Done(): - return types.ConsensusParams{}, ctx.Err() + return nil, ctx.Err() } select { @@ -368,13 +398,15 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t case <-time.After(consensusParamsResponseTimeout): continue case <-ctx.Done(): - return types.ConsensusParams{}, ctx.Err() + return nil, ctx.Err() case params, ok := <-s.paramsRecvCh: if !ok { - return types.ConsensusParams{}, errors.New("params channel closed") + return nil, errors.New("params channel closed") } - return params, nil + return ¶ms, nil } } - return types.ConsensusParams{}, errors.New("unable to fetch consensus params from connected providers") + + // signal to caller to retry. + return nil, nil } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 43fbf99e4..68bec6880 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -70,6 +70,7 @@ type syncer struct { avgChunkTime int64 lastSyncedSnapshotHeight int64 processingSnapshot *snapshot + closeCh <-chan struct{} } // newSyncer creates a new syncer. @@ -79,7 +80,9 @@ func newSyncer( conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, stateProvider StateProvider, - snapshotCh, chunkCh chan<- p2p.Envelope, + snapshotCh chan<- p2p.Envelope, + chunkCh chan<- p2p.Envelope, + closeCh <-chan struct{}, tempDir string, metrics *Metrics, ) *syncer { @@ -95,6 +98,7 @@ func newSyncer( fetchers: cfg.Fetchers, retryTimeout: cfg.ChunkRequestTimeout, metrics: metrics, + closeCh: closeCh, } } @@ -139,10 +143,16 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // single request to discover snapshots, later we may want to do retries and stuff. func (s *syncer) AddPeer(peerID types.NodeID) { s.logger.Debug("Requesting snapshots from peer", "peer", peerID) - s.snapshotCh <- p2p.Envelope{ + + msg := p2p.Envelope{ To: peerID, Message: &ssproto.SnapshotsRequest{}, } + + select { + case <-s.closeCh: + case s.snapshotCh <- msg: + } } // RemovePeer removes a peer from the pool. @@ -473,6 +483,8 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch select { case <-ctx.Done(): return + case <-s.closeCh: + return case <-time.After(2 * time.Second): continue } @@ -499,6 +511,8 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch case <-ctx.Done(): return + case <-s.closeCh: + return } ticker.Stop() @@ -522,7 +536,7 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { "peer", peer, ) - s.chunkCh <- p2p.Envelope{ + msg := p2p.Envelope{ To: peer, Message: &ssproto.ChunkRequest{ Height: snapshot.Height, @@ -530,6 +544,11 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { Index: chunk, }, } + + select { + case s.chunkCh <- msg: + case <-s.closeCh: + } } // verifyApp verifies the sync, checking the app hash and last block height. It returns the From 118bfe2087e8a8c41589d3d616e5ae3956c43540 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 24 Sep 2021 15:37:25 -0700 Subject: [PATCH 034/174] abci: Flush socket requests and responses immediately. (#6997) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The main effect of this change is to flush the socket client and server message encoding buffers immediately once the message is fully and correctly encoded. This allows us to remove the timer and some other special cases, without changing the observed behaviour of the system. -- Background The socket protocol client and server each use a buffered writer to encode request and response messages onto the underlying connection. This reduces the possibility of a single message being split across multiple writes, but has the side-effect that a request may remain buffered for some time. The implementation worked around this by keeping a ticker that occasionally triggers a flush, and by flushing the writer in response to an explicit request baked into the client/server protocol (see also #6994). These workarounds are both unnecessary: Once a message has been dequeued for sending and fully encoded in wire format, there is no real use keeping all or part of it buffered locally. Moreover, using an asynchronous process to flush the buffer makes the round-trip performance of the request unpredictable. -- Benchmarks Code: https://play.golang.org/p/0ChUOxJOiHt I found no pre-existing performance benchmarks to justify the flush pattern, but a natural question is whether this will significantly harm client/server performance. To test this, I implemented a simple benchmark that transfers randomly-sized byte buffers from a no-op "client" to a no-op "server" over a Unix-domain socket, using a buffered writer, both with and without explicit flushes after each write. As the following data show, flushing every time (FLUSH=true) does reduce raw throughput, but not by a significant amount except for very small request sizes, where the transfer time is already trivial (1.9μs). Given that the client is calibrated for 1MiB transactions, the overhead is not meaningful. The percentage in each section is the speedup for flushing only when the buffer is full, relative to flushing every block. The benchmark uses the default buffer size (4096 bytes), which is the same value used by the socket client and server implementation: FLUSH NBLOCKS MAX AVG TOTAL ELAPSED TIME/BLOCK false 3957471 512 255 1011165416 2.00018873s 505ns true 1068568 512 255 273064368 2.000217051s 1.871µs (73%) false 536096 4096 2048 1098066401 2.000229108s 3.731µs true 477911 4096 2047 978746731 2.000177825s 4.185µs (10.8%) false 124595 16384 8181 1019340160 2.000235086s 16.053µs true 120995 16384 8179 989703064 2.000329349s 16.532µs (2.9%) false 2114 1048576 525693 1111316541 2.000479928s 946.3µs true 2083 1048576 526379 1096449173 2.001817137s 961.025µs (1.5%) Note also that the FLUSH=false baseline is actually faster than the production code, which flushes more often than is required by the buffer filling up. Moreover, the timer slows down the overall transaction rate of the client and server, indepenedent of how fast the socket transfer is, so the loss on a real workload is probably much less. --- abci/client/socket_client.go | 58 +++++++++++------------------------- abci/server/socket_server.go | 19 ++++-------- 2 files changed, 24 insertions(+), 53 deletions(-) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index d6477062c..8a4598fe5 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -13,7 +13,6 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/libs/timer" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -22,8 +21,6 @@ const ( // reqQueueSize is the max number of queued async requests. // (memory: 256MB max assuming 1MB transactions) reqQueueSize = 256 - // Don't wait longer than... - flushThrottleMS = 20 ) type reqResWithContext struct { @@ -40,8 +37,7 @@ type socketClient struct { mustConnect bool conn net.Conn - reqQueue chan *reqResWithContext - flushTimer *timer.ThrottleTimer + reqQueue chan *reqResWithContext mtx tmsync.RWMutex err error @@ -57,7 +53,6 @@ var _ Client = (*socketClient)(nil) func NewSocketClient(addr string, mustConnect bool) Client { cli := &socketClient{ reqQueue: make(chan *reqResWithContext, reqQueueSize), - flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS), mustConnect: mustConnect, addr: addr, @@ -102,8 +97,7 @@ func (cli *socketClient) OnStop() { cli.conn.Close() } - cli.flushQueue() - cli.flushTimer.Stop() + cli.drainQueue() } // Error returns an error if the client was stopped abruptly. @@ -126,38 +120,25 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) { //---------------------------------------- func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { - w := bufio.NewWriter(conn) + bw := bufio.NewWriter(conn) for { select { case reqres := <-cli.reqQueue: - // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) - if reqres.C.Err() != nil { cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) continue } - cli.willSendReq(reqres.R) - err := types.WriteMessage(reqres.R.Request, w) - if err != nil { + + if err := types.WriteMessage(reqres.R.Request, bw); err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return } + if err := bw.Flush(); err != nil { + cli.stopForError(fmt.Errorf("flush buffer: %w", err)) + return + } - // If it's a flush request, flush the current buffer. - if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok { - err = w.Flush() - if err != nil { - cli.stopForError(fmt.Errorf("flush buffer: %w", err)) - return - } - } - case <-cli.flushTimer.Ch: // flush queue - select { - case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}: - default: - // Probably will fill the buffer, or retry later. - } case <-cli.Quit(): return } @@ -492,14 +473,6 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s } } - // Maybe auto-flush, or unset auto-flush - switch req.Value.(type) { - case *types.Request_Flush: - cli.flushTimer.Unset() - default: - cli.flushTimer.Set() - } - return reqres, nil } @@ -537,7 +510,9 @@ func queueErr(e error) error { return fmt.Errorf("can't queue req: %w", e) } -func (cli *socketClient) flushQueue() { +// drainQueue marks as complete and discards all remaining pending requests +// from the queue. +func (cli *socketClient) drainQueue() { cli.mtx.Lock() defer cli.mtx.Unlock() @@ -547,14 +522,17 @@ func (cli *socketClient) flushQueue() { reqres.Done() } - // mark all queued messages as resolved -LOOP: + // Mark all queued messages as resolved. + // + // TODO(creachadair): We can't simply range the channel, because it is never + // closed, and the writer continues to add work. + // See https://github.com/tendermint/tendermint/issues/6996. for { select { case reqres := <-cli.reqQueue: reqres.R.Done() default: - break LOOP + return } } } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 543b444b1..85539645b 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -240,22 +240,15 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types // Pull responses from 'responses' and write them to conn. func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { - var count int - var bufWriter = bufio.NewWriter(conn) - for { - var res = <-responses - err := types.WriteMessage(res, bufWriter) - if err != nil { + bw := bufio.NewWriter(conn) + for res := range responses { + if err := types.WriteMessage(res, bw); err != nil { closeConn <- fmt.Errorf("error writing message: %w", err) return } - if _, ok := res.Value.(*types.Response_Flush); ok { - err = bufWriter.Flush() - if err != nil { - closeConn <- fmt.Errorf("error flushing write buffer: %w", err) - return - } + if err := bw.Flush(); err != nil { + closeConn <- fmt.Errorf("error flushing write buffer: %w", err) + return } - count++ } } From c101fa17abef2f00fc9520570916dc1b1bacf676 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sat, 25 Sep 2021 11:53:04 -0400 Subject: [PATCH 035/174] e2e: add limit and sort to generator (#6998) I observed a couple of problems with the generator in some recent tests: - there were a couple of hybrid test cases which did not have any legacy nodes (randomness and all.) I change the probability to produce more reliable results. - added options to the generation to be able to add a max (to compliment the earlier min) number of nodes for local testing. - added an option to support reversing the sort order so "more complex" networks were first, as well as tweaked some of the point values. - this refactored the generators cli parsing to be a bit more clear. --- test/e2e/generator/generate.go | 12 +++++++++--- test/e2e/generator/main.go | 30 ++++++++++-------------------- test/e2e/pkg/manifest.go | 26 +++++++++++++++++++++----- 3 files changed, 40 insertions(+), 28 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 78da8ed4a..4db1914d3 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -90,6 +90,10 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { } } + if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize { + continue + } + manifests = append(manifests, manifest) } @@ -98,9 +102,11 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { type Options struct { MinNetworkSize int + MaxNetworkSize int NumGroups int Directory string P2P P2PMode + Reverse bool } type P2PMode string @@ -159,7 +165,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case LegacyP2PMode: node.UseLegacyP2P = true case HybridP2PMode: - node.UseLegacyP2P = r.Intn(2) == 1 + node.UseLegacyP2P = r.Intn(5) < 2 } manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node @@ -184,7 +190,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case LegacyP2PMode: node.UseLegacyP2P = true case HybridP2PMode: - node.UseLegacyP2P = r.Intn(2) == 1 + node.UseLegacyP2P = r.Intn(5) < 2 } manifest.Nodes[name] = node @@ -221,7 +227,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case LegacyP2PMode: node.UseLegacyP2P = true case HybridP2PMode: - node.UseLegacyP2P = r.Intn(2) == 1 + node.UseLegacyP2P = r.Intn(5) < 2 } manifest.Nodes[fmt.Sprintf("full%02d", i)] = node diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 8ec373944..6e39be820 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -41,25 +41,11 @@ func NewCLI() *CLI { var opts Options var err error - cli.opts.Directory, err = cmd.Flags().GetString("dir") - if err != nil { - return err - } - - cli.opts.NumGroups, err = cmd.Flags().GetInt("groups") - if err != nil { - return err - } - - cli.opts.MinNetworkSize, err = cmd.Flags().GetInt("min-size") - if err != nil { - return err - } - p2pMode, err := cmd.Flags().GetString("p2p") if err != nil { return err } + switch mode := P2PMode(p2pMode); mode { case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: opts.P2P = mode @@ -71,12 +57,16 @@ func NewCLI() *CLI { }, } - cli.root.PersistentFlags().StringP("dir", "d", "", "Output directory for manifests") + cli.root.PersistentFlags().StringVarP(&cli.opts.Directory, "dir", "d", "", "Output directory for manifests") _ = cli.root.MarkPersistentFlagRequired("dir") - cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") + cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order") + cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups") cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") - cli.root.PersistentFlags().IntP("min-size", "m", 1, "Minimum Network Size") + cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1, + "Minimum network size (nodes)") + cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, + "Maxmum network size (nodes), 0 is unlimited") return cli } @@ -95,7 +85,7 @@ func (cli *CLI) generate() error { switch { case cli.opts.NumGroups <= 0: - e2e.SortManifests(manifests) + e2e.SortManifests(manifests, cli.opts.Reverse) if err := e2e.WriteManifests(filepath.Join(cli.opts.Directory, "gen"), manifests); err != nil { return err @@ -104,7 +94,7 @@ func (cli *CLI) generate() error { groupManifests := e2e.SplitGroups(cli.opts.NumGroups, manifests) for idx, gm := range groupManifests { - e2e.SortManifests(gm) + e2e.SortManifests(gm, cli.opts.Reverse) prefix := filepath.Join(cli.opts.Directory, fmt.Sprintf("gen-group%02d", idx)) if err := e2e.WriteManifests(prefix, gm); err != nil { diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 86e571ee1..e3e5bb7ee 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -172,8 +172,11 @@ func LoadManifest(file string) (Manifest, error) { // SortManifests orders (in-place) a list of manifests such that the // manifests will be ordered in terms of complexity (or expected // runtime). Complexity is determined first by the number of nodes, -// and then by the total number of perturbations in the network -func SortManifests(manifests []Manifest) { +// and then by the total number of perturbations in the network. +// +// If reverse is true, then the manifests are ordered with the most +// complex networks before the less complex networks. +func SortManifests(manifests []Manifest, reverse bool) { sort.SliceStable(manifests, func(i, j int) bool { // sort based on a point-based comparison between two // manifests. @@ -194,24 +197,37 @@ func SortManifests(manifests []Manifest) { leftScore += (len(n.Perturb) * 2) if n.StartAt > 0 { - leftScore++ + leftScore += 3 } } for _, n := range right.Nodes { rightScore += (len(n.Perturb) * 2) if n.StartAt > 0 { - rightScore++ + rightScore += 3 } } // add one point if the network has evidence. if left.Evidence > 0 { + leftScore += 2 + } + + if right.Evidence > 0 { + rightScore += 2 + } + + if left.TxSize > right.TxSize { leftScore++ } - if right.Evidence > 0 { + + if right.TxSize > left.TxSize { rightScore++ } + if reverse { + return leftScore >= rightScore + } + return leftScore < rightScore }) } From 37ca98a544a522dd2e0a7a00f2b11b66fb4044b2 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sat, 25 Sep 2021 19:14:38 -0400 Subject: [PATCH 036/174] e2e: reduce number of statesyncs in test networks (#6999) --- test/e2e/generator/generate.go | 23 +++++++++++++++++------ test/e2e/generator/random.go | 12 +++++++++--- test/e2e/generator/random_test.go | 28 ++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 9 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 4db1914d3..6db1c2645 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -46,9 +46,13 @@ var ( "unix": 10, } // FIXME: v2 disabled due to flake - nodeBlockSyncs = uniformChoice{"v0"} // "v2" - nodeMempools = uniformChoice{"v0", "v1"} - nodeStateSyncs = uniformChoice{e2e.StateSyncDisabled, e2e.StateSyncP2P, e2e.StateSyncRPC} + nodeBlockSyncs = uniformChoice{"v0"} // "v2" + nodeMempools = uniformChoice{"v0", "v1"} + nodeStateSyncs = weightedChoice{ + e2e.StateSyncDisabled: 20, + e2e.StateSyncP2P: 40, + e2e.StateSyncRPC: 40, + } nodePersistIntervals = uniformChoice{0, 1, 5} nodeSnapshotIntervals = uniformChoice{0, 3} nodeRetainBlocks = uniformChoice{0, 2 * int(e2e.EvidenceAgeHeight), 4 * int(e2e.EvidenceAgeHeight)} @@ -176,9 +180,11 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // the initial validator set, and validator set updates for delayed nodes. nextStartAt := manifest.InitialHeight + 5 quorum := numValidators*2/3 + 1 + numValdatorStateSyncing := 0 for i := 1; i <= numValidators; i++ { startAt := int64(0) - if i > quorum { + if i > quorum && numValdatorStateSyncing < 2 && r.Float64() >= 0.5 { + numValdatorStateSyncing++ startAt = nextStartAt nextStartAt += 5 } @@ -273,7 +279,12 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) } else if i > 0 { - manifest.Nodes[name].PersistentPeers = uniformSetChoice(peerNames[:i]).Choose(r) + peers := uniformSetChoice(peerNames[:i]) + if manifest.Nodes[name].StateSync == e2e.StateSyncP2P { + manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) + } else { + manifest.Nodes[name].PersistentPeers = peers.Choose(r) + } } } @@ -311,7 +322,7 @@ func generateNode( } if startAt > 0 { - node.StateSync = nodeStateSyncs.Choose(r).(string) + node.StateSync = nodeStateSyncs.Choose(r) } // If this node is forced to be an archive node, retain all blocks and diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go index d6c84d46c..c00d56964 100644 --- a/test/e2e/generator/random.go +++ b/test/e2e/generator/random.go @@ -74,18 +74,24 @@ func (pc probSetChoice) Choose(r *rand.Rand) []string { // uniformSetChoice picks a set of strings with uniform probability, picking at least one. type uniformSetChoice []string -func (usc uniformSetChoice) Choose(r *rand.Rand) []string { +func (usc uniformSetChoice) Choose(r *rand.Rand) []string { return usc.ChooseAtLeast(r, 1) } + +func (usc uniformSetChoice) ChooseAtLeast(r *rand.Rand, num int) []string { choices := []string{} indexes := r.Perm(len(usc)) - if len(indexes) > 1 { - indexes = indexes[:1+r.Intn(len(indexes)-1)] + if num < len(indexes) { + indexes = indexes[:1+randomInRange(r, num, len(indexes)-1)] } + for _, i := range indexes { choices = append(choices, usc[i]) } + return choices } +func randomInRange(r *rand.Rand, min, max int) int { return r.Intn(max-min+1) + min } + type weightedChoice map[string]uint func (wc weightedChoice) Choose(r *rand.Rand) string { diff --git a/test/e2e/generator/random_test.go b/test/e2e/generator/random_test.go index 3fbb19ab5..48b04f2d1 100644 --- a/test/e2e/generator/random_test.go +++ b/test/e2e/generator/random_test.go @@ -1,9 +1,12 @@ package main import ( + "fmt" + "math/rand" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCombinations(t *testing.T) { @@ -29,3 +32,28 @@ func TestCombinations(t *testing.T) { {"bool": true, "int": 3, "string": "bar"}, }, c) } + +func TestUniformSetChoice(t *testing.T) { + set := uniformSetChoice([]string{"a", "b", "c"}) + r := rand.New(rand.NewSource(2384)) + + for i := 0; i < 100; i++ { + t.Run(fmt.Sprintf("Iteration%03d", i), func(t *testing.T) { + set = append(set, t.Name()) + + t.Run("ChooseAtLeastSubset", func(t *testing.T) { + require.True(t, len(set.ChooseAtLeast(r, 1)) >= 1) + require.True(t, len(set.ChooseAtLeast(r, 2)) >= 2) + require.True(t, len(set.ChooseAtLeast(r, len(set)/2)) >= len(set)/2) + }) + t.Run("ChooseAtLeastEqualOrGreaterToLength", func(t *testing.T) { + require.Len(t, set.ChooseAtLeast(r, len(set)), len(set)) + require.Len(t, set.ChooseAtLeast(r, len(set)+1), len(set)) + require.Len(t, set.ChooseAtLeast(r, len(set)*10), len(set)) + }) + t.Run("ChooseSingle", func(t *testing.T) { + require.True(t, len(set.Choose(r)) >= 1) + }) + }) + } +} From fb9eaf576afbcfcd83d265d7cc0f7d55f2e59393 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sun, 26 Sep 2021 12:10:36 -0400 Subject: [PATCH 037/174] e2e: improve chances of statesyncing success (#7001) This reduces this situation where a node will get stuck block syncing, which seemed to happen a lot in last nights run. --- test/e2e/generator/generate.go | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 6db1c2645..96f5ace58 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -49,9 +49,9 @@ var ( nodeBlockSyncs = uniformChoice{"v0"} // "v2" nodeMempools = uniformChoice{"v0", "v1"} nodeStateSyncs = weightedChoice{ - e2e.StateSyncDisabled: 20, - e2e.StateSyncP2P: 40, - e2e.StateSyncRPC: 40, + e2e.StateSyncDisabled: 10, + e2e.StateSyncP2P: 45, + e2e.StateSyncRPC: 45, } nodePersistIntervals = uniformChoice{0, 1, 5} nodeSnapshotIntervals = uniformChoice{0, 3} @@ -163,7 +163,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { - node := generateNode(r, e2e.ModeSeed, 0, manifest.InitialHeight, false) + node := generateNode(r, manifest, e2e.ModeSeed, 0, false) switch p2pMode { case LegacyP2PMode: @@ -180,17 +180,16 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // the initial validator set, and validator set updates for delayed nodes. nextStartAt := manifest.InitialHeight + 5 quorum := numValidators*2/3 + 1 - numValdatorStateSyncing := 0 + numSyncingNodes := 0 for i := 1; i <= numValidators; i++ { startAt := int64(0) - if i > quorum && numValdatorStateSyncing < 2 && r.Float64() >= 0.5 { - numValdatorStateSyncing++ + if i > quorum && numSyncingNodes < 2 && r.Float64() >= 0.25 { + numSyncingNodes++ startAt = nextStartAt nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) - node := generateNode( - r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) + node := generateNode(r, manifest, e2e.ModeValidator, startAt, i <= 2) switch p2pMode { case LegacyP2PMode: @@ -227,7 +226,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er startAt = nextStartAt nextStartAt += 5 } - node := generateNode(r, e2e.ModeFull, startAt, manifest.InitialHeight, false) + node := generateNode(r, manifest, e2e.ModeFull, startAt, false) switch p2pMode { case LegacyP2PMode: @@ -304,7 +303,11 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // here, since we need to know the overall network topology and startup // sequencing. func generateNode( - r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, + r *rand.Rand, + manifest e2e.Manifest, + mode e2e.Mode, + startAt int64, + forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ Mode: string(mode), @@ -323,6 +326,13 @@ func generateNode( if startAt > 0 { node.StateSync = nodeStateSyncs.Choose(r) + if manifest.InitialHeight-startAt <= 5 && node.StateSync == e2e.StateSyncDisabled { + // avoid needing to blocsync more than five total blocks. + node.StateSync = uniformSetChoice([]string{ + e2e.StateSyncP2P, + e2e.StateSyncRPC, + }).Choose(r)[0] + } } // If this node is forced to be an archive node, retain all blocks and From 60a6c6fb1a8313ff707bf3960400b97f9fc7e751 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Mon, 27 Sep 2021 15:43:07 +0200 Subject: [PATCH 038/174] e2e: allow running of single node using the e2e app (#6982) --- CHANGELOG_PENDING.md | 2 ++ cmd/tendermint/commands/run_node.go | 2 +- internal/proxy/client.go | 9 ++++- test/e2e/Makefile | 9 ++--- test/e2e/README.md | 34 +++++++++++++++++- test/e2e/app/app.go | 53 +++++++++++++++++++++++++++-- test/e2e/app/snapshots.go | 2 +- test/e2e/app/state.go | 2 +- test/e2e/docker/Dockerfile | 2 +- test/e2e/node/built-in.toml | 4 +++ test/e2e/{app => node}/config.go | 15 +++++++- test/e2e/{app => node}/main.go | 5 +-- test/e2e/node/socket.toml | 5 +++ 13 files changed, 127 insertions(+), 17 deletions(-) create mode 100644 test/e2e/node/built-in.toml rename test/e2e/{app => node}/config.go (80%) rename test/e2e/{app => node}/main.go (98%) create mode 100644 test/e2e/node/socket.toml diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f329b0e24..987a760b4 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,6 +27,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi ### FEATURES +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). + ### IMPROVEMENTS ### BUG FIXES diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index c174fd967..1e310f5eb 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -65,7 +65,7 @@ func AddNodeFlags(cmd *cobra.Command) { "proxy-app", config.ProxyApp, "proxy app address, or one of: 'kvstore',"+ - " 'persistent_kvstore' or 'noop' for local testing.") + " 'persistent_kvstore', 'e2e' or 'noop' for local testing.") cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)") // rpc flags diff --git a/internal/proxy/client.go b/internal/proxy/client.go index 072ddf3e0..ddb9a928d 100644 --- a/internal/proxy/client.go +++ b/internal/proxy/client.go @@ -6,11 +6,12 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/types" + e2e "github.com/tendermint/tendermint/test/e2e/app" ) // DefaultClientCreator returns a default ClientCreator, which will create a // local client if addr is one of: 'kvstore', -// 'persistent_kvstore' or 'noop', otherwise - a remote client. +// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client. // // The Closer is a noop except for persistent_kvstore applications, // which will clean up the store. @@ -21,6 +22,12 @@ func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io case "persistent_kvstore": app := kvstore.NewPersistentKVStoreApplication(dbDir) return abciclient.NewLocalCreator(app), app + case "e2e": + app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) + if err != nil { + panic(err) + } + return abciclient.NewLocalCreator(app), noopCloser{} case "noop": return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} default: diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 2b41cc1cd..23cf4d039 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -3,11 +3,8 @@ all: docker generator runner tests docker: docker build --tag tendermint/e2e-node -f docker/Dockerfile ../.. -# We need to build support for database backends into the app in -# order to build a binary with a Tendermint node in it (for built-in -# ABCI testing). -app: - go build -o build/app -tags badgerdb,boltdb,cleveldb,rocksdb ./app +node: + go build -o build/node -tags badgerdb,boltdb,cleveldb,rocksdb ./node generator: go build -o build/generator ./generator @@ -18,4 +15,4 @@ runner: tests: go test -o build/tests ./tests -.PHONY: all app docker generator runner tests +.PHONY: all docker generator runner tests node diff --git a/test/e2e/README.md b/test/e2e/README.md index d737120c1..00bce5ad8 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -142,10 +142,42 @@ Docker does not enable IPv6 by default. To do so, enter the following in } ``` -## Benchmarking testnets +## Benchmarking Testnets It is also possible to run a simple benchmark on a testnet. This is done through the `benchmark` command. This manages the entire process: setting up the environment, starting the test net, waiting for a considerable amount of blocks to be used (currently 100), and then returning the following metrics from the sample of the blockchain: - Average time to produce a block - Standard deviation of producing a block - Minimum and maximum time to produce a block + +## Running Individual Nodes + +The E2E test harness is designed to run several nodes of varying configurations within docker. It is also possible to run a single node in the case of running larger, geographically-dispersed testnets. To run a single node you can either run: + +**Built-in** + +```bash +make node +tendermint init validator +TMHOME=$HOME/.tendermint ./build/node ./node/built-in.toml +``` + +To make things simpler the e2e application can also be run in the tendermint binary +by running + +```bash +tendermint start --proxy-app e2e +``` + +However this won't offer the same level of configurability of the application. + +**Socket** + +```bash +make node +tendermint init validator +tendermint start +./build/node ./node.socket.toml +``` + +Check `node/config.go` to see how the settings of the test application can be tweaked. \ No newline at end of file diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 26b10d32a..0a0194c98 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -1,4 +1,4 @@ -package main +package app import ( "bytes" @@ -27,6 +27,55 @@ type Application struct { restoreChunks [][]byte } +// Config allows for the setting of high level parameters for running the e2e Application +// KeyType and ValidatorUpdates must be the same for all nodes running the same application. +type Config struct { + // The directory with which state.json will be persisted in. Usually $HOME/.tendermint/data + Dir string `toml:"dir"` + + // SnapshotInterval specifies the height interval at which the application + // will take state sync snapshots. Defaults to 0 (disabled). + SnapshotInterval uint64 `toml:"snapshot_interval"` + + // RetainBlocks specifies the number of recent blocks to retain. Defaults to + // 0, which retains all blocks. Must be greater that PersistInterval, + // SnapshotInterval and EvidenceAgeHeight. + RetainBlocks uint64 `toml:"retain_blocks"` + + // KeyType sets the curve that will be used by validators. + // Options are ed25519 & secp256k1 + KeyType string `toml:"key_type"` + + // PersistInterval specifies the height interval at which the application + // will persist state to disk. Defaults to 1 (every height), setting this to + // 0 disables state persistence. + PersistInterval uint64 `toml:"persist_interval"` + + // ValidatorUpdates is a map of heights to validator names and their power, + // and will be returned by the ABCI application. For example, the following + // changes the power of validator01 and validator02 at height 1000: + // + // [validator_update.1000] + // validator01 = 20 + // validator02 = 10 + // + // Specifying height 0 returns the validator update during InitChain. The + // application returns the validator updates as-is, i.e. removing a + // validator must be done by returning it with power 0, and any validators + // not specified are not changed. + // + // height <-> pubkey <-> voting power + ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` +} + +func DefaultConfig(dir string) *Config { + return &Config{ + PersistInterval: 1, + SnapshotInterval: 100, + Dir: dir, + } +} + // NewApplication creates the application. func NewApplication(cfg *Config) (*Application, error) { state, err := NewState(filepath.Join(cfg.Dir, "state.json"), cfg.PersistInterval) @@ -134,7 +183,7 @@ func (app *Application) Commit() abci.ResponseCommit { if err != nil { panic(err) } - logger.Info("Created state sync snapshot", "height", snapshot.Height) + app.logger.Info("Created state sync snapshot", "height", snapshot.Height) } retainHeight := int64(0) if app.cfg.RetainBlocks > 0 { diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 4ddb7ecdc..4ef20375f 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -1,5 +1,5 @@ // nolint: gosec -package main +package app import ( "encoding/json" diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index b34680c1b..441926453 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -1,5 +1,5 @@ //nolint: gosec -package main +package app import ( "crypto/sha256" diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 68c7bc836..260df23f3 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -19,7 +19,7 @@ COPY . . RUN make build && cp build/tendermint /usr/bin/tendermint COPY test/e2e/docker/entrypoint* /usr/bin/ -RUN cd test/e2e && make app && cp build/app /usr/bin/app +RUN cd test/e2e && make node && cp build/node /usr/bin/app # Set up runtime directory. We don't use a separate runtime image since we need # e.g. leveldb and rocksdb which are already installed in the build image. diff --git a/test/e2e/node/built-in.toml b/test/e2e/node/built-in.toml new file mode 100644 index 000000000..0a2146a58 --- /dev/null +++ b/test/e2e/node/built-in.toml @@ -0,0 +1,4 @@ +snapshot_interval = 100 +persist_interval = 1 +chain_id = "test-chain" +protocol = "builtin" diff --git a/test/e2e/app/config.go b/test/e2e/node/config.go similarity index 80% rename from test/e2e/app/config.go rename to test/e2e/node/config.go index d7e776538..fa7dcc497 100644 --- a/test/e2e/app/config.go +++ b/test/e2e/node/config.go @@ -6,6 +6,8 @@ import ( "fmt" "github.com/BurntSushi/toml" + + "github.com/tendermint/tendermint/test/e2e/app" ) // Config is the application configuration. @@ -22,10 +24,21 @@ type Config struct { PrivValServer string `toml:"privval_server"` PrivValKey string `toml:"privval_key"` PrivValState string `toml:"privval_state"` - Misbehaviors map[string]string `toml:"misbehaviors"` KeyType string `toml:"key_type"` } +// App extracts out the application specific configuration parameters +func (cfg *Config) App() *app.Config { + return &app.Config{ + Dir: cfg.Dir, + SnapshotInterval: cfg.SnapshotInterval, + RetainBlocks: cfg.RetainBlocks, + KeyType: cfg.KeyType, + ValidatorUpdates: cfg.ValidatorUpdates, + PersistInterval: cfg.PersistInterval, + } +} + // LoadConfig loads the configuration from disk. func LoadConfig(file string) (*Config, error) { cfg := &Config{ diff --git a/test/e2e/app/main.go b/test/e2e/node/main.go similarity index 98% rename from test/e2e/app/main.go rename to test/e2e/node/main.go index 1d6806524..222a6883f 100644 --- a/test/e2e/app/main.go +++ b/test/e2e/node/main.go @@ -30,6 +30,7 @@ import ( grpcprivval "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/test/e2e/app" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) @@ -97,7 +98,7 @@ func run(configFile string) error { // startApp starts the application server, listening for connections from Tendermint. func startApp(cfg *Config) error { - app, err := NewApplication(cfg) + app, err := app.NewApplication(cfg.App()) if err != nil { return err } @@ -118,7 +119,7 @@ func startApp(cfg *Config) error { // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. func startNode(cfg *Config) error { - app, err := NewApplication(cfg) + app, err := app.NewApplication(cfg.App()) if err != nil { return err } diff --git a/test/e2e/node/socket.toml b/test/e2e/node/socket.toml new file mode 100644 index 000000000..2f7913e62 --- /dev/null +++ b/test/e2e/node/socket.toml @@ -0,0 +1,5 @@ +snapshot_interval = 100 +persist_interval = 1 +chain_id = "test-chain" +protocol = "socket" +listen = "tcp://127.0.0.1:26658" From bce7c2f73bc3a20393ab3c42a1b94d59c6f0f5a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Sep 2021 17:12:56 +0200 Subject: [PATCH 039/174] build(deps): Bump google.golang.org/grpc from 1.40.0 to 1.41.0 (#7003) --- go.mod | 2 +- go.sum | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index efb65b974..4eff73283 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 golang.org/x/net v0.0.0-20210917221730-978cfadd31cf golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.40.0 + google.golang.org/grpc v1.41.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index b07b446ad..c6a87e16f 100644 --- a/go.sum +++ b/go.sum @@ -171,6 +171,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -227,6 +228,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= @@ -1482,8 +1484,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From b879f71e8e41760107d1fb516a176cdb50b964f5 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Sep 2021 13:27:08 -0400 Subject: [PATCH 040/174] e2e: reduce log noise (#7004) --- test/e2e/runner/rpc.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 5cfa1d64d..ad5fa7a64 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -171,18 +171,18 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R return nil, err case err == nil && status.SyncInfo.LatestBlockHeight >= height: return status, nil - case counter%100 == 0: + case counter%500 == 0: switch { case err != nil: lastFailed = true logger.Error("node not yet ready", "iter", counter, "node", node.Name, - "err", err, "target", height, + "err", err, ) case status != nil: - logger.Error("node not yet ready", + logger.Info("node not yet ready", "iter", counter, "node", node.Name, "height", status.SyncInfo.LatestBlockHeight, From b150ea6b3ec6b69596e9af776a7eb6d0a304dc14 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Sep 2021 14:08:08 -0400 Subject: [PATCH 041/174] e2e: avoid seed nodes when statesyncing (#7006) --- test/e2e/generator/generate.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 96f5ace58..6369cf962 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -275,7 +275,17 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er } }) for i, name := range peerNames { - if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { + // there are seeds, statesync is disabled, and it's + // either the first peer by the sort order, and + // (randomly half of the remaining peers use a seed + // node; otherwise, choose some remaining set of the + // peers. + + if len(seedNames) > 0 && + manifest.Nodes[name].StateSync == e2e.StateSyncDisabled && + (i == 0 || r.Float64() >= 0.5) { + + // choose one of the seeds manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) } else if i > 0 { peers := uniformSetChoice(peerNames[:i]) From 6eaa3b24d68bc9ceaeaaae2fb13f9180cf779580 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Sep 2021 15:22:25 -0400 Subject: [PATCH 042/174] ci: use cheaper codecov data collection (#7009) --- .github/workflows/coverage.yml | 4 ++-- test/test_cover.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 6ac3a738e..49c76ab98 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -89,7 +89,7 @@ jobs: go-version: 1.16 - name: test & coverage report creation run: | - cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic + cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out if: env.GIT_DIFF - uses: actions/upload-artifact@v2 with: @@ -125,7 +125,7 @@ jobs: name: "${{ github.sha }}-03-coverage" if: env.GIT_DIFF - run: | - cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt + cat ./*profile.out | grep -v "mode: set" >> coverage.txt if: env.GIT_DIFF - uses: codecov/codecov-action@v2.1.0 with: diff --git a/test/test_cover.sh b/test/test_cover.sh index 17df139e6..cad6bec6d 100644 --- a/test/test_cover.sh +++ b/test/test_cover.sh @@ -6,7 +6,7 @@ set -e echo "mode: atomic" > coverage.txt for pkg in ${PKGS[@]}; do - go test -timeout 5m -race -coverprofile=profile.out -covermode=atomic "$pkg" + go test -timeout 5m -race -coverprofile=profile.out "$pkg" if [ -f profile.out ]; then tail -n +2 profile.out >> coverage.txt; rm profile.out From 8023a2aeef0ee4ec5b1dd56ad25408b8d844a86b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Sep 2021 15:38:03 -0400 Subject: [PATCH 043/174] e2e: add generator tests (#7008) --- .github/workflows/coverage.yml | 1 - test/e2e/generator/generate.go | 44 +++++++++++++++--- test/e2e/generator/generate_test.go | 70 +++++++++++++++++++++++++++++ test/e2e/pkg/manifest.go | 5 +++ 4 files changed, 113 insertions(+), 7 deletions(-) create mode 100644 test/e2e/generator/generate_test.go diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 49c76ab98..3553f6356 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -4,7 +4,6 @@ on: push: paths: - "**.go" - - "!test/" branches: - master - release/** diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 6369cf962..f314ae8c0 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -73,7 +73,15 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} switch opts.P2P { case NewP2PMode, LegacyP2PMode, HybridP2PMode: + defer func() { + // avoid modifying the global state. + original := make([]interface{}, len(testnetCombinations["p2p"])) + copy(original, testnetCombinations["p2p"]) + testnetCombinations["p2p"] = original + }() + testnetCombinations["p2p"] = []interface{}{opts.P2P} + default: testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} } @@ -154,13 +162,15 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case "large": // FIXME Networks are kept small since large ones use too much CPU. numSeeds = r.Intn(2) - numLightClients = r.Intn(3) + numLightClients = r.Intn(2) numValidators = 4 + r.Intn(4) numFulls = r.Intn(4) default: return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) } + const legacyP2PFactor float64 = 0.5 + // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { node := generateNode(r, manifest, e2e.ModeSeed, 0, false) @@ -169,18 +179,23 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case LegacyP2PMode: node.UseLegacyP2P = true case HybridP2PMode: - node.UseLegacyP2P = r.Intn(5) < 2 + node.UseLegacyP2P = r.Float64() < legacyP2PFactor } manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node } + var ( + numSyncingNodes = 0 + hybridNumNew = 0 + hybridNumLegacy = 0 + ) + // Next, we generate validators. We make sure a BFT quorum of validators start // at the initial height, and that we have two archive nodes. We also set up // the initial validator set, and validator set updates for delayed nodes. nextStartAt := manifest.InitialHeight + 5 quorum := numValidators*2/3 + 1 - numSyncingNodes := 0 for i := 1; i <= numValidators; i++ { startAt := int64(0) if i > quorum && numSyncingNodes < 2 && r.Float64() >= 0.25 { @@ -195,7 +210,23 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case LegacyP2PMode: node.UseLegacyP2P = true case HybridP2PMode: - node.UseLegacyP2P = r.Intn(5) < 2 + node.UseLegacyP2P = r.Float64() < legacyP2PFactor + if node.UseLegacyP2P { + hybridNumLegacy++ + if hybridNumNew == 0 { + hybridNumNew++ + hybridNumLegacy-- + node.UseLegacyP2P = false + } + } else { + hybridNumNew++ + if hybridNumLegacy == 0 { + hybridNumNew-- + hybridNumLegacy++ + node.UseLegacyP2P = true + + } + } } manifest.Nodes[name] = node @@ -222,7 +253,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // Finally, we generate random full nodes. for i := 1; i <= numFulls; i++ { startAt := int64(0) - if r.Float64() >= 0.5 { + if numSyncingNodes < 2 && r.Float64() >= 0.5 { + numSyncingNodes++ startAt = nextStartAt nextStartAt += 5 } @@ -232,7 +264,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er case LegacyP2PMode: node.UseLegacyP2P = true case HybridP2PMode: - node.UseLegacyP2P = r.Intn(5) < 2 + node.UseLegacyP2P = r.Float64() > legacyP2PFactor } manifest.Nodes[fmt.Sprintf("full%02d", i)] = node diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go new file mode 100644 index 000000000..b816ba577 --- /dev/null +++ b/test/e2e/generator/generate_test.go @@ -0,0 +1,70 @@ +package main + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +func TestGenerator(t *testing.T) { + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: MixedP2PMode}) + require.NoError(t, err) + require.True(t, len(manifests) >= 64, "insufficient combinations") + + // this just means that the numbers reported by the test + // failures map to the test cases that you'd see locally. + e2e.SortManifests(manifests, false /* ascending */) + + for idx, m := range manifests { + t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { + numStateSyncs := 0 + for name, node := range m.Nodes { + if node.StateSync != e2e.StateSyncDisabled { + numStateSyncs++ + } + t.Run(name, func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2) + require.Equal(t, "v0", node.BlockSync) + } + + }) + } + require.True(t, numStateSyncs <= 2) + }) + } + + t.Run("Hybrid", func(t *testing.T) { + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: HybridP2PMode}) + require.NoError(t, err) + require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) + + for idx, m := range manifests { + t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { + require.True(t, len(m.Nodes) > 1) + + var numLegacy, numNew int + for _, node := range m.Nodes { + if node.UseLegacyP2P { + numLegacy++ + } else { + numNew++ + } + } + + assert.True(t, numLegacy >= 1, "not enough legacy nodes [%d/%d]", + numLegacy, len(m.Nodes)) + assert.True(t, numNew >= 1, "not enough new nodes [%d/%d]", + numNew, len(m.Nodes)) + }) + } + }) +} diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index e3e5bb7ee..4492f4037 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -150,6 +150,11 @@ type ManifestNode struct { UseLegacyP2P bool `toml:"use_legacy_p2p"` } +// Stateless reports whether m is a node that does not own state, including light and seed nodes. +func (m ManifestNode) Stateless() bool { + return m.Mode == string(ModeLight) || m.Mode == string(ModeSeed) +} + // Save saves the testnet manifest to a file. func (m Manifest) Save(file string) error { f, err := os.Create(file) From 9a16d930c6a090dc4e29f26c82ed5852b8875a00 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Sep 2021 16:46:40 -0400 Subject: [PATCH 044/174] statesync: add logging while waiting for peers (#7007) --- internal/statesync/reactor.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index c66b77793..0d8ab7f4c 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -1006,7 +1006,11 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { startAt := time.Now() t := time.NewTicker(100 * time.Millisecond) defer t.Stop() + logT := time.NewTicker(time.Minute) + defer logT.Stop() + var iter int for r.peers.Len() < numPeers { + iter++ select { case <-ctx.Done(): return fmt.Errorf("operation canceled while waiting for peers after %s", time.Since(startAt)) @@ -1014,6 +1018,14 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { return fmt.Errorf("shutdown while waiting for peers after %s", time.Since(startAt)) case <-t.C: continue + case <-logT.C: + r.Logger.Info("waiting for sufficient peers to start statesync", + "duration", time.Since(startAt).String(), + "target", numPeers, + "peers", r.peers.Len(), + "iters", iter, + ) + continue } } return nil From 6be36613c9785791abad46515698e07d1f32caba Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 27 Sep 2021 17:00:05 -0400 Subject: [PATCH 045/174] e2e: reduce number of stateless nodes in test networks (#7010) --- test/e2e/generator/generate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index f314ae8c0..2c69ea6b8 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -161,7 +161,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er numValidators = 4 case "large": // FIXME Networks are kept small since large ones use too much CPU. - numSeeds = r.Intn(2) + numSeeds = r.Intn(1) numLightClients = r.Intn(2) numValidators = 4 + r.Intn(4) numFulls = r.Intn(4) From 1bd1593f20f41a3fb8deae71592229d77a2706b9 Mon Sep 17 00:00:00 2001 From: lklimek <842586+lklimek@users.noreply.github.com> Date: Tue, 28 Sep 2021 15:32:14 +0200 Subject: [PATCH 046/174] fix: race condition in p2p_switch and pex_reactor (#7015) Closes https://github.com/tendermint/tendermint/issues/7014 --- internal/p2p/pex/pex_reactor.go | 3 +-- internal/p2p/switch.go | 3 +-- libs/cmap/cmap.go | 14 +++++++++++ libs/cmap/cmap_test.go | 44 +++++++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 4 deletions(-) diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go index 049dbd9f1..9eb58c054 100644 --- a/internal/p2p/pex/pex_reactor.go +++ b/internal/p2p/pex/pex_reactor.go @@ -349,11 +349,10 @@ func (r *Reactor) receiveRequest(src Peer) error { // request out for this peer. func (r *Reactor) RequestAddrs(p Peer) { id := string(p.ID()) - if r.requestsSent.Has(id) { + if _, exists := r.requestsSent.GetOrSet(id, struct{}{}); exists { return } r.Logger.Debug("Request addrs", "from", p) - r.requestsSent.Set(id, struct{}{}) p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) } diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go index eeb93a994..ea1272354 100644 --- a/internal/p2p/switch.go +++ b/internal/p2p/switch.go @@ -432,10 +432,9 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { // - ie. if we're getting ErrDuplicatePeer we can stop // because the addrbook got us the peer back already func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if sw.reconnecting.Has(string(addr.ID)) { + if _, exists := sw.reconnecting.GetOrSet(string(addr.ID), addr); exists { return } - sw.reconnecting.Set(string(addr.ID), addr) defer sw.reconnecting.Delete(string(addr.ID)) start := time.Now() diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go index 539870363..5aa82e807 100644 --- a/libs/cmap/cmap.go +++ b/libs/cmap/cmap.go @@ -22,6 +22,20 @@ func (cm *CMap) Set(key string, value interface{}) { cm.l.Unlock() } +// GetOrSet returns the existing value if present. Othewise, it stores `newValue` and returns it. +func (cm *CMap) GetOrSet(key string, newValue interface{}) (value interface{}, alreadyExists bool) { + + cm.l.Lock() + defer cm.l.Unlock() + + if v, ok := cm.m[key]; ok { + return v, true + } + + cm.m[key] = newValue + return newValue, false +} + func (cm *CMap) Get(key string) interface{} { cm.l.Lock() val := cm.m[key] diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go index bab78da96..68a052bdb 100644 --- a/libs/cmap/cmap_test.go +++ b/libs/cmap/cmap_test.go @@ -3,6 +3,7 @@ package cmap import ( "fmt" "strings" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -67,3 +68,46 @@ func BenchmarkCMapHas(b *testing.B) { m.Has(string(rune(i))) } } + +func TestCMap_GetOrSet_Parallel(t *testing.T) { + + tests := []struct { + name string + newValue interface{} + parallelism int + }{ + {"test1", "a", 4}, + {"test2", "a", 40}, + {"test3", "a", 1}, + } + + //nolint:scopelint + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := NewCMap() + + wg := sync.WaitGroup{} + wg.Add(tt.parallelism) + for i := 0; i < tt.parallelism; i++ { + go func() { + defer wg.Done() + gotValue, _ := cm.GetOrSet(tt.name, tt.newValue) + assert.EqualValues(t, tt.newValue, gotValue) + }() + } + wg.Wait() + }) + } +} + +func TestCMap_GetOrSet_Exists(t *testing.T) { + cm := NewCMap() + + gotValue, exists := cm.GetOrSet("key", 1000) + assert.False(t, exists) + assert.EqualValues(t, 1000, gotValue) + + gotValue, exists = cm.GetOrSet("key", 2000) + assert.True(t, exists) + assert.EqualValues(t, 1000, gotValue) +} From e35a42fc68156429a0e4f9adbad68c3112358b92 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 28 Sep 2021 10:39:26 -0400 Subject: [PATCH 047/174] e2e: use smaller transactions (#7016) 75% of the failures in the last run all ran with the 10kb transactions. I'd like to dial it back and see if things improve more. --- test/e2e/generator/generate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 2c69ea6b8..81c1be7ef 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -63,7 +63,7 @@ var ( "restart": 0.1, } evidence = uniformChoice{0, 1, 10} - txSize = uniformChoice{1024, 10240} // either 1kb or 10kb + txSize = uniformChoice{1024, 4096} // either 1kb or 4kb ipv6 = uniformChoice{false, true} keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} ) From ea539dcb98345f7af77aaca941004eee8c19cfd4 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 28 Sep 2021 07:59:47 -0700 Subject: [PATCH 048/174] Update changelog for v0.35.0-rc2. (#7011) Also, relinkify and update the bounty URL. --- CHANGELOG.md | 234 +++++++++++++++++++++++-------------------- CHANGELOG_PENDING.md | 16 +-- 2 files changed, 132 insertions(+), 118 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d4f3d278..2eda330f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,30 @@ # Changelog -Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint). +Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). -## v0.35 +## v0.35.0-rc2 + +September 27, 2021 + +### BREAKING CHANGES + +- Go API + + - [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on + deprecated fundamentals. Downstream users should maintain this + library. (@tychoish) + - [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to + `internal` to prevent consumption of these internal APIs by + external users. (@tychoish) + +### FEATURES + +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). + + +## v0.35.0-rc1 + +September 8, 2021 Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua @@ -10,151 +32,151 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad, ### BREAKING CHANGES - CLI/RPC/Config - - [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) - - [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) - - [config] \#5728 `fastsync.version = "v1"` is no longer supported (@melekes) - - [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) - - [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) - - [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) - - [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes) - - [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters) - - [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106) - - [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) - - [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) - - [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106) - - [blocksync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) - - [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated. - - [blocksync/v2] \#6730 Fast Sync v2 is deprecated, please use v0 + - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) + - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) + - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) + - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) + - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) + - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) + - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) + - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) + - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) + - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) + - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) + - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) + - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) + - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. + - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. - - [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) - - [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. - - [cli] \#6854 Remove deprecated snake case commands. (@tychoish) + - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) + - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. + - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) - Apps - - [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - - [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface - - [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`. - - [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. - - [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - - [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`. + - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. + - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. + - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. - It is not required any longer to set ldflags to set version strings - - [abci/counter] \#6684 Delete counter example app + - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app - Go API - - [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) - - [p2p] \#6618 \#6583 Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) - - [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) - - [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish) - - [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez) - - [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish) - - [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez) - - [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) + - [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) + - [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) + - [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) + - [p2p] [\#6547](https://github.com/tendermint/tendermint/pull/6547) Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish) + - [libs/log] [\#6534](https://github.com/tendermint/tendermint/pull/6534) Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez) + - [libs/time] [\#6495](https://github.com/tendermint/tendermint/pull/6495) Move types/time to libs/time to improve consistency. (@tychoish) + - [mempool] [\#6529](https://github.com/tendermint/tendermint/pull/6529) The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez) + - [abci/client, proxy] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) - [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker) - - [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes) + - [libs/bits] [\#5720](https://github.com/tendermint/tendermint/pull/5720) Validate `BitArray` in `FromProto`, which now returns an error (@melekes) - [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker) - [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker) - [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) - - [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters) - - [state] \#5864 Use an iterator when pruning state (@cmwaters) - - [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. + - [store] [\#5848](https://github.com/tendermint/tendermint/pull/5848) Remove block store state in favor of using the db iterators directly (@cmwaters) + - [state] [\#5864](https://github.com/tendermint/tendermint/pull/5864) Use an iterator when pruning state (@cmwaters) + - [types] [\#6023](https://github.com/tendermint/tendermint/pull/6023) Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. - - [light] \#6054 Move `MaxRetryAttempt` option from client to provider. + - [light] [\#6054](https://github.com/tendermint/tendermint/pull/6054) Move `MaxRetryAttempt` option from client to provider. - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) - - [all] \#6077 Change spelling from British English to American (@cmwaters) + - [all] [\#6077](https://github.com/tendermint/tendermint/pull/6077) Change spelling from British English to American (@cmwaters) - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub - Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2 - - [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) - - [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes) - - [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) - - [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder - - [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) - - [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. + - [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) + - [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Unexpose `WSEvents` (@melekes) + - [rpc/jsonrpc/client/ws_client] [\#6176](https://github.com/tendermint/tendermint/pull/6176) `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) + - [internal/libs] [\#6366](https://github.com/tendermint/tendermint/pull/6366) Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder + - [libs/rand] [\#6364](https://github.com/tendermint/tendermint/pull/6364) Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) + - [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself and `TxInfo`. (@alexanderbez) - - [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) - - [types] \#6627 Move `NodeKey` to types to make the type public. - - [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` - - [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) + - [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) + - [types] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Move `NodeKey` to types to make the type public. + - [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` + - [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) - Data Storage - - [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters) - - [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) - - [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106) + - [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters) + - [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) + - [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106) - Tooling - - [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) - - [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106) + - [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106) + - [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106) ### FEATURES - [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam -- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) -- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to +- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) +- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer exchange reactors behave the same. (@cmwaters) -- [crypto] \#6376 Enable sr25519 as a validator key type -- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez) +- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type +- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the `mempool.version` configuration, where `v1` is the default configuration. - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. - Transactions are gossiped in FIFO order as they are in `v0`. -- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) -- [blocksync/event] \#6619 Emit blocksync status event when switching consensus/blocksync (@JayT106) -- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106) -- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) +- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) +- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) +- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) +- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) ### IMPROVEMENTS -- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish) -- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) -- [types] \#6478 Add `block_id` to `newblock` event (@jeebster) -- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778) -- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning) -- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning) -- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. -- [privval] \#5725 Add gRPC support to private validator. -- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778) -- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes) -- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) -- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) -- [blocksync/v1] \#5728 Remove blocksync v1 (@melekes) -- [blocksync/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes) -- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes) -- [blocksync/v2] \#5774 Send status request when new peer joins (@melekes) -- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands) -- [consensus] \#5987 and \#5792 Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon) -- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778) +- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish) +- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) +- [types] [\#6478](https://github.com/tendermint/tendermint/pull/6478) Add `block_id` to `newblock` event (@jeebster) +- [crypto/ed25519] [\#5632](https://github.com/tendermint/tendermint/pull/5632) Adopt zip215 `ed25519` verification. (@marbar3778) +- [crypto/ed25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning) +- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning) +- [privval] [\#5603](https://github.com/tendermint/tendermint/pull/5603) Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. +- [privval] [\#5725](https://github.com/tendermint/tendermint/pull/5725) Add gRPC support to private validator. +- [privval] [\#5876](https://github.com/tendermint/tendermint/pull/5876) `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778) +- [abci/client] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` requests return an error if queue is full (@melekes) +- [mempool] [\#5673](https://github.com/tendermint/tendermint/pull/5673) Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) +- [abci] [\#5706](https://github.com/tendermint/tendermint/pull/5706) Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) +- [blocksync/v1] [\#5728](https://github.com/tendermint/tendermint/pull/5728) Remove blocksync v1 (@melekes) +- [blocksync/v0] [\#5741](https://github.com/tendermint/tendermint/pull/5741) Relax termination conditions and increase sync timeout (@melekes) +- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` output now contains node ID (`id` field) (@melekes) +- [blocksync/v2] [\#5774](https://github.com/tendermint/tendermint/pull/5774) Send status request when new peer joins (@melekes) +- [store] [\#5888](https://github.com/tendermint/tendermint/pull/5888) store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands) +- [consensus] [\#5987](https://github.com/tendermint/tendermint/pull/5987) and [\#5792](https://github.com/tendermint/tendermint/pull/5792) Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon) +- [types] [\#5994](https://github.com/tendermint/tendermint/pull/5994) Reduce the use of protobuf types in core logic. (@marbar3778) - `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk. -- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes) -- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis) -- [state] \#6067 Batch save state data (@githubsands & @cmwaters) -- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778) -- [types] \#6120 use batch verification for verifying commits signatures. +- [rpc/client/http] [\#6163](https://github.com/tendermint/tendermint/pull/6163) Do not drop events even if the `out` channel is full (@melekes) +- [node] [\#6059](https://github.com/tendermint/tendermint/pull/6059) Validate and complete genesis doc before saving to state store (@silasdavis) +- [state] [\#6067](https://github.com/tendermint/tendermint/pull/6067) Batch save state data (@githubsands & @cmwaters) +- [crypto] [\#6120](https://github.com/tendermint/tendermint/pull/6120) Implement batch verification interface for ed25519 and sr25519. (@marbar3778) +- [types] [\#6120](https://github.com/tendermint/tendermint/pull/6120) use batch verification for verifying commits signatures. - If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature. -- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. -- [privval] \#6240 Add `context.Context` to privval interface. -- [rpc] \#6265 set cache control in http-rpc response header (@JayT106) -- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. -- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106) -- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm) -- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778) -- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm) -- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778) -- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters) -- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106) -- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs) -- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) +- [privval/file] [\#6185](https://github.com/tendermint/tendermint/pull/6185) Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. +- [privval] [\#6240](https://github.com/tendermint/tendermint/pull/6240) Add `context.Context` to privval interface. +- [rpc] [\#6265](https://github.com/tendermint/tendermint/pull/6265) set cache control in http-rpc response header (@JayT106) +- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. +- [node/state] [\#6370](https://github.com/tendermint/tendermint/pull/6370) graceful shutdown in the consensus reactor (@JayT106) +- [crypto/merkle] [\#6443](https://github.com/tendermint/tendermint/pull/6443) Improve HashAlternatives performance (@cuonglm) +- [crypto/merkle] [\#6513](https://github.com/tendermint/tendermint/pull/6513) Optimize HashAlternatives (@marbar3778) +- [p2p/pex] [\#6509](https://github.com/tendermint/tendermint/pull/6509) Improve addrBook.hash performance (@cuonglm) +- [consensus/metrics] [\#6549](https://github.com/tendermint/tendermint/pull/6549) Change block_size gauge to a histogram for better observability over time (@marbar3778) +- [statesync] [\#6587](https://github.com/tendermint/tendermint/pull/6587) Increase chunk priority and re-request chunks that don't arrive (@cmwaters) +- [state/privval] [\#6578](https://github.com/tendermint/tendermint/pull/6578) No GetPubKey retry beyond the proposal/voting window (@JayT106) +- [rpc] [\#6615](https://github.com/tendermint/tendermint/pull/6615) Add TotalGasUsed to block_results response (@crypto-facs) +- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) - [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters) ### BUG FIXES -- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) -- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) -- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106) -- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) -- [blocksync] \#6590 Update the metrics during blocksync (@JayT106) +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) +- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) +- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) +- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) ## v0.34.13 diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 987a760b4..56efd763b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,10 +1,12 @@ # Unreleased Changes +Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). + ## vX.X -Special thanks to external contributors on this release: +Month, DD, YYYY -Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint). +Special thanks to external contributors on this release: ### BREAKING CHANGES @@ -16,20 +18,10 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi - Go API - - [crypto/armor]: \#6963 remove package which is unused, and based on - deprecated fundamentals. Downstream users should maintain this - library. (@tychoish) - - [state] [store] [proxy] [rpc/core]: \#6937 move packages to - `internal` to prevent consumption of these internal APIs by - external users. (@tychoish) - - Blockchain Protocol ### FEATURES -- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). - ### IMPROVEMENTS ### BUG FIXES - From c18470a5f1114f8c5469c311502050e88d2ca9bb Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 28 Sep 2021 16:47:35 -0400 Subject: [PATCH 049/174] e2e: use network size in load generator (#7019) --- test/e2e/runner/evidence.go | 4 ++-- test/e2e/runner/load.go | 37 ++++++++++++++++--------------------- test/e2e/runner/main.go | 30 ++++++++++++++++++++++-------- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index e6c7c3f7a..25c4a1cc4 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -28,11 +28,11 @@ const lightClientEvidenceRatio = 4 // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of LightClientAttackEvidence and // DuplicateVoteEvidence. -func InjectEvidence(ctx context.Context, testnet *e2e.Testnet, amount int) error { +func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amount int) error { // select a random node var targetNode *e2e.Node - for _, idx := range rand.Perm(len(testnet.Nodes)) { + for _, idx := range r.Perm(len(testnet.Nodes)) { targetNode = testnet.Nodes[idx] if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 5e6e04f89..f31b436dd 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -14,15 +14,15 @@ import ( // Load generates transactions against the network until the given context is // canceled. -func Load(ctx context.Context, testnet *e2e.Testnet) error { +func Load(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet) error { // Since transactions are executed across all nodes in the network, we need // to reduce transaction load for larger networks to avoid using too much // CPU. This gives high-throughput small networks and low-throughput large ones. // This also limits the number of TCP connections, since each worker has // a connection to all nodes. - concurrency := len(testnet.Nodes) * 8 - if concurrency > 64 { - concurrency = 64 + concurrency := len(testnet.Nodes) * 2 + if concurrency > 32 { + concurrency = 32 } chTx := make(chan types.Tx) @@ -38,7 +38,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { started := time.Now() - go loadGenerate(ctx, chTx, testnet.TxSize) + go loadGenerate(ctx, r, chTx, testnet.TxSize, len(testnet.Nodes)) for w := 0; w < concurrency; w++ { go loadProcess(ctx, testnet, chTx, chSuccess) @@ -85,7 +85,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet) error { // generation is primarily the result of backpressure from the // broadcast transaction, though there is still some timer-based // limiting. -func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { +func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int64, networkSize int) { timer := time.NewTimer(0) defer timer.Stop() defer close(chTx) @@ -101,8 +101,8 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { // This gives a reasonable load without putting too much data in the app. id := rand.Int63() % 100 // nolint: gosec - bz := make([]byte, size) - _, err := rand.Read(bz) // nolint: gosec + bz := make([]byte, txSize) + _, err := r.Read(bz) if err != nil { panic(fmt.Sprintf("Failed to read random bytes: %v", err)) } @@ -114,30 +114,25 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, size int64) { case chTx <- tx: // sleep for a bit before sending the // next transaction. - timer.Reset(loadGenerateWaitTime(size)) + timer.Reset(loadGenerateWaitTime(r, networkSize)) } } } -func loadGenerateWaitTime(size int64) time.Duration { +func loadGenerateWaitTime(r *rand.Rand, size int) time.Duration { const ( - min = int64(10 * time.Millisecond) - max = int64(100 * time.Millisecond) + min = int64(250 * time.Millisecond) + max = int64(time.Second) ) var ( - baseJitter = rand.Int63n(max-min+1) + min // nolint: gosec - sizeFactor = size * int64(time.Millisecond) - sizeJitter = rand.Int63n(sizeFactor-min+1) + min // nolint: gosec - waitTime = time.Duration(baseJitter + sizeJitter) + baseJitter = r.Int63n(max-min+1) + min + sizeFactor = int64(size) * min + sizeJitter = r.Int63n(sizeFactor-min+1) + min ) - if size == 1 { - return waitTime / 2 - } - - return waitTime + return time.Duration(baseJitter + sizeJitter) } // loadProcess processes transactions diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 194e4c6e4..fb6ce4a8c 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "math/rand" "os" "strconv" "time" @@ -13,9 +14,9 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -var ( - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) +const randomSeed = 2308084734268 + +var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) func main() { NewCLI().Run() @@ -67,6 +68,8 @@ func NewCLI() *CLI { return err } + r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + chLoadResult := make(chan error) ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() @@ -74,7 +77,7 @@ func NewCLI() *CLI { lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - chLoadResult <- Load(lctx, cli.testnet) + chLoadResult <- Load(lctx, r, cli.testnet) }() startAt := time.Now() if err = Start(ctx, cli.testnet); err != nil { @@ -95,7 +98,7 @@ func NewCLI() *CLI { } if cli.testnet.Evidence > 0 { - if err = InjectEvidence(ctx, cli.testnet, cli.testnet.Evidence); err != nil { + if err = InjectEvidence(ctx, r, cli.testnet, cli.testnet.Evidence); err != nil { return err } if err = Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress @@ -212,7 +215,11 @@ func NewCLI() *CLI { Use: "load", Short: "Generates transaction load until the command is canceled", RunE: func(cmd *cobra.Command, args []string) (err error) { - return Load(context.Background(), cli.testnet) + return Load( + cmd.Context(), + rand.New(rand.NewSource(randomSeed)), // nolint: gosec + cli.testnet, + ) }, }) @@ -230,7 +237,12 @@ func NewCLI() *CLI { } } - return InjectEvidence(cmd.Context(), cli.testnet, amount) + return InjectEvidence( + cmd.Context(), + rand.New(rand.NewSource(randomSeed)), // nolint: gosec + cli.testnet, + amount, + ) }, }) @@ -302,10 +314,12 @@ Does not run any perbutations. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() + r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - err := Load(lctx, cli.testnet) + err := Load(lctx, r, cli.testnet) chLoadResult <- err }() From b1dfbb8bc37e1a90551560e8b09078b49ef133c1 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 28 Sep 2021 17:04:37 -0400 Subject: [PATCH 050/174] e2e: generator ensure p2p modes (#7021) --- test/e2e/generator/generate.go | 16 ++++++++--- test/e2e/generator/generate_test.go | 41 +++++++++++++++++++++++++++++ test/e2e/generator/main.go | 3 +-- test/e2e/tests/app_test.go | 14 ++++++---- 4 files changed, 64 insertions(+), 10 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 81c1be7ef..deffb533a 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -81,8 +81,7 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { }() testnetCombinations["p2p"] = []interface{}{opts.P2P} - - default: + case MixedP2PMode: testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} } @@ -332,9 +331,20 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // lastly, set up the light clients for i := 1; i <= numLightClients; i++ { startAt := manifest.InitialHeight + 5 - manifest.Nodes[fmt.Sprintf("light%02d", i)] = generateLightNode( + + node := generateLightNode( r, startAt+(5*int64(i)), lightProviders, ) + + switch p2pMode { + case LegacyP2PMode: + node.UseLegacyP2P = true + case HybridP2PMode: + node.UseLegacyP2P = r.Float64() < legacyP2PFactor + } + + manifest.Nodes[fmt.Sprintf("light%02d", i)] = node + } return manifest, nil diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index b816ba577..c38b6b20b 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -47,6 +47,9 @@ func TestGenerator(t *testing.T) { require.NoError(t, err) require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) + // failures map to the test cases that you'd see locally. + e2e.SortManifests(manifests, false /* ascending */) + for idx, m := range manifests { t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { require.True(t, len(m.Nodes) > 1) @@ -67,4 +70,42 @@ func TestGenerator(t *testing.T) { }) } }) + t.Run("UnmixedP2P", func(t *testing.T) { + t.Run("New", func(t *testing.T) { + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: NewP2PMode}) + require.NoError(t, err) + require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) + + // failures map to the test cases that you'd see locally. + e2e.SortManifests(manifests, false /* ascending */) + + for idx, m := range manifests { + t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { + for name, node := range m.Nodes { + t.Run(name, func(t *testing.T) { + require.False(t, node.UseLegacyP2P) + }) + } + }) + } + }) + t.Run("Legacy", func(t *testing.T) { + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: LegacyP2PMode}) + require.NoError(t, err) + require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) + + // failures map to the test cases that you'd see locally. + e2e.SortManifests(manifests, false /* ascending */) + + for idx, m := range manifests { + t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { + for name, node := range m.Nodes { + t.Run(name, func(t *testing.T) { + require.True(t, node.UseLegacyP2P) + }) + } + }) + } + }) + }) } diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 6e39be820..4668f6a8f 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -38,7 +38,6 @@ func NewCLI() *CLI { SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - var opts Options var err error p2pMode, err := cmd.Flags().GetString("p2p") @@ -48,7 +47,7 @@ func NewCLI() *CLI { switch mode := P2PMode(p2pMode); mode { case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - opts.P2P = mode + cli.opts.P2P = mode default: return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index afd4d2d52..ab6f9739e 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -44,13 +44,17 @@ func TestApp_Hash(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") - block, err := client.Block(ctx, &info.Response.LastBlockHeight) - require.NoError(t, err) - require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), - "app hash does not match last block's app hash") - status, err := client.Status(ctx) require.NoError(t, err) + + block, err := client.Block(ctx, &info.Response.LastBlockHeight) + require.NoError(t, err) + + if info.Response.LastBlockHeight == block.Block.Height { + require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), + "app hash does not match last block's app hash") + } + require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight, "status out of sync with application") }) From 8758078786f5477b249a2e3b41f4404ff199a512 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 29 Sep 2021 11:19:34 -0400 Subject: [PATCH 051/174] consensus: avoid unbuffered channel in state test (#7025) --- internal/consensus/state.go | 6 +++++- internal/consensus/state_test.go | 17 ++++------------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 46bacf5cc..42900a7d4 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -241,8 +241,12 @@ func (cs *State) GetLastHeight() int64 { // GetRoundState returns a shallow copy of the internal consensus state. func (cs *State) GetRoundState() *cstypes.RoundState { cs.mtx.RLock() + defer cs.mtx.RUnlock() + + // NOTE: this might be dodgy, as RoundState itself isn't thread + // safe as it contains a number of pointers and is explicitly + // not thread safe. rs := cs.RoundState // copy - cs.mtx.RUnlock() return &rs } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index b3b7c81a3..9b8f68a5e 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -331,7 +331,7 @@ func TestStateFullRound1(t *testing.T) { t.Error(err) } - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) @@ -361,7 +361,7 @@ func TestStateFullRoundNil(t *testing.T) { cs, vss := randState(config, 1) height, round := cs.Height, cs.Round - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -382,7 +382,7 @@ func TestStateFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote @@ -428,7 +428,7 @@ func TestStateLockNoPOL(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -1971,12 +1971,3 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa } return sub.Out() } - -// subscribe subscribes test client to the given query and returns a channel with cap = 0. -func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) - } - return sub.Out() -} From 962caeae65eed0796e21fdf0fbb2109096ea5a2f Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 29 Sep 2021 08:38:52 -0700 Subject: [PATCH 052/174] Make doc site index default to the latest release (#7023) Fix the order of lines in docs/versions so that v0.34 is last (the current release). Related changes: - Update docs/DOCS_README.md to reflect the current state of how we publish the site. - Fix the build-docs target in Makefile to not perturb the package-lock.json during the build. - Fix the Makefile rule to not clobber package-lock.json. --- CONTRIBUTING.md | 9 +++++++++ Makefile | 6 +++--- docs/DOCS_README.md | 24 +++++++++++++++++------- docs/versions | 2 +- 4 files changed, 30 insertions(+), 11 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 23bfafcdf..33b8cf6a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -249,6 +249,7 @@ The author of the original pull request is responsible for solving the conflicts merging the pull request. #### Creating a backport branch + If this is the first release candidate for a major release, you get to have the honor of creating the backport branch! @@ -334,6 +335,14 @@ If there were no release candidates, begin by creating a backport branch, as des - `git tag -a v0.35.0 -m 'Release v0.35.0'` - `git push origin v0.35.0` 7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +8. Add the release to the documentation site generator config (see + [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: + - Start on branch `master`. + - Add a new line at the bottom of [`docs/versions`](./docs/versions) to + ensure the newest release is the default for the landing page. + - Add a new entry to `themeConfig.versions` in + [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the + release in the dropdown versions menu. #### Minor release (point releases) diff --git a/Makefile b/Makefile index a509f3a26..c9b6951f4 100644 --- a/Makefile +++ b/Makefile @@ -131,11 +131,11 @@ generate_test_cert: # generate server cerificate @certstrap request-cert -cn server -ip 127.0.0.1 # self-sign server cerificate with rootCA - @certstrap sign server --CA "root CA" + @certstrap sign server --CA "root CA" # generate client cerificate @certstrap request-cert -cn client -ip 127.0.0.1 # self-sign client cerificate with rootCA - @certstrap sign client --CA "root CA" + @certstrap sign client --CA "root CA" .PHONY: generate_test_cert ############################################################################### @@ -214,7 +214,7 @@ DESTINATION = ./index.html.md build-docs: @cd docs && \ while read -r branch path_prefix; do \ - (git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + (git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ mkdir -p ~/output/$${path_prefix} ; \ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ cp ~/output/$${path_prefix}/index.html ~/output ; \ diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 04883e462..c1ab1580a 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -2,17 +2,27 @@ The documentation for Tendermint Core is hosted at: -- +- -built from the files in this (`/docs`) directory for -[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. +built from the files in this [`docs` directory for `master`](https://github.com/tendermint/tendermint/tree/master/docs) +and other supported release branches. ## How It Works -There is a CircleCI job listening for changes in the `/docs` directory, on both -the `master` branch. Any updates to files in this directory -on those branches will automatically trigger a website deployment. Under the hood, -the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo. +There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml) +in the `tendermint/docs` repository that clones and builds the documentation +site from the contents of this `docs` directory, for `master` and for each +supported release branch. Under the hood, this workflow runs `make build-docs` +from the [Makefile](../Makefile#L214). + +The list of supported versions are defined in [`config.js`](./.vuepress/config.js), +which defines the UI menu on the documentation site, and also in +[`docs/versions`](./versions), which determines which branches are built. + +The last entry in the `docs/versions` file determines which version is linked +by default from the generated `index.html`. This should generally be the most +recent release, rather than `master`, so that new users are not confused by +documentation for unreleased features. ## README diff --git a/docs/versions b/docs/versions index 7ae4a265a..c88a614bd 100644 --- a/docs/versions +++ b/docs/versions @@ -1,4 +1,4 @@ +master master v0.32.x v0.32 v0.33.x v0.33 v0.34.x v0.34 -master master From 23fe6fd2f9820c28ce397443e83e4d500ba45900 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 29 Sep 2021 12:38:27 -0400 Subject: [PATCH 053/174] statesync: ensure test network properly configured (#7026) This test reliably gets hung up on network configuration, (which may be a real issue,) but it's network setup is handcranked and we should ensure that the test focuses on it's core assertions and doesn't fail for test architecture reasons. --- internal/statesync/dispatcher.go | 13 +++++++++++++ internal/statesync/reactor.go | 8 +++++--- internal/statesync/reactor_test.go | 24 +++++++++++++++++++----- 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 2ff93a023..844cb5e32 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -297,3 +297,16 @@ func (l *peerList) All() []types.NodeID { defer l.mtx.Unlock() return l.peers } + +func (l *peerList) Contains(id types.NodeID) bool { + l.mtx.Lock() + defer l.mtx.Unlock() + + for _, p := range l.peers { + if id == p { + return true + } + } + + return false +} diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 0d8ab7f4c..a20c8ca41 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -254,11 +254,11 @@ func (r *Reactor) OnStop() { // Wait for all p2p Channels to be closed before returning. This ensures we // can easily reason about synchronization of all p2p Channels and ensure no // panics will occur. + <-r.peerUpdates.Done() <-r.snapshotCh.Done() <-r.chunkCh.Done() <-r.blockCh.Done() <-r.paramsCh.Done() - <-r.peerUpdates.Done() } // Sync runs a state sync, fetching snapshots and providing chunks to the @@ -1013,9 +1013,11 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { iter++ select { case <-ctx.Done(): - return fmt.Errorf("operation canceled while waiting for peers after %s", time.Since(startAt)) + return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]", + time.Since(startAt).Seconds(), r.peers.Len(), numPeers) case <-r.closeCh: - return fmt.Errorf("shutdown while waiting for peers after %s", time.Since(startAt)) + return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]", + time.Since(startAt).Seconds(), r.peers.Len(), numPeers) case <-t.C: continue case <-logT.C: diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 7bcfe39af..b90e5fd78 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -525,25 +525,39 @@ func TestReactor_StateProviderP2P(t *testing.T) { rts.reactor.cfg.UseP2P = true rts.reactor.cfg.TrustHeight = 1 rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash()) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + for _, p := range []types.NodeID{peerA, peerB} { + if !rts.reactor.peers.Contains(p) { + rts.reactor.peers.Append(p) + } + } + require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured") + + bctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ictx, cancel := context.WithTimeout(bctx, time.Second) defer cancel() rts.reactor.mtx.Lock() - err := rts.reactor.initStateProvider(ctx, factory.DefaultTestChainID, 1) + err := rts.reactor.initStateProvider(ictx, factory.DefaultTestChainID, 1) rts.reactor.mtx.Unlock() require.NoError(t, err) rts.reactor.syncer.stateProvider = rts.reactor.stateProvider - appHash, err := rts.reactor.stateProvider.AppHash(ctx, 5) + actx, cancel := context.WithTimeout(bctx, 10*time.Second) + defer cancel() + + appHash, err := rts.reactor.stateProvider.AppHash(actx, 5) require.NoError(t, err) require.Len(t, appHash, 32) - state, err := rts.reactor.stateProvider.State(ctx, 5) + state, err := rts.reactor.stateProvider.State(actx, 5) require.NoError(t, err) require.Equal(t, appHash, state.AppHash) require.Equal(t, types.DefaultConsensusParams(), &state.ConsensusParams) - commit, err := rts.reactor.stateProvider.Commit(ctx, 5) + commit, err := rts.reactor.stateProvider.Commit(actx, 5) require.NoError(t, err) require.Equal(t, commit.BlockID, state.LastBlockID) From c9d92f5f19f75500c6a4b73b8ec04fe9dd8abb0d Mon Sep 17 00:00:00 2001 From: Tess Rinearson Date: Wed, 29 Sep 2021 22:02:28 +0200 Subject: [PATCH 054/174] .github: remove tessr and bez from codeowners (#7028) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7993419e8..b48354f01 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,4 +7,4 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair +* @ebuchman @cmwaters @tychoish @williambanfield @creachadair From 6a0d9c832a400468f4884423f53e8630357b86b4 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 30 Sep 2021 12:19:18 -0400 Subject: [PATCH 055/174] blocksync: fix shutdown deadlock issue (#7030) When shutting down blocksync, it is observed that the process can hang completely. A dump of running goroutines reveals that this is due to goroutines not listening on the correct shutdown signal. Namely, the `poolRoutine` goroutine does not wait on `pool.Quit`. The `poolRoutine` does not receive any other shutdown signal during `OnStop` becuase it must stop before the `r.closeCh` is closed. Currently the `poolRoutine` listens in the `closeCh` which will not close until the `poolRoutine` stops and calls `poolWG.Done()`. This change also puts the `requestRoutine()` in the `OnStart` method to make it more visible since it does not rely on anything that is spawned in the `poolRoutine`. ``` goroutine 183 [semacquire]: sync.runtime_Semacquire(0xc0000d3bd8) runtime/sema.go:56 +0x45 sync.(*WaitGroup).Wait(0xc0000d3bd0) sync/waitgroup.go:130 +0x65 github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStop(0xc0000d3a00) github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:193 +0x47 github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc0000d3a00, 0x0, 0x0) github.com/tendermint/tendermint/libs/service/service.go:171 +0x323 github.com/tendermint/tendermint/node.(*nodeImpl).OnStop(0xc00052c000) github.com/tendermint/tendermint/node/node.go:758 +0xc62 github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc00052c000, 0x0, 0x0) github.com/tendermint/tendermint/libs/service/service.go:171 +0x323 github.com/tendermint/tendermint/cmd/tendermint/commands.NewRunNodeCmd.func1.1() github.com/tendermint/tendermint/cmd/tendermint/commands/run_node.go:143 +0x62 github.com/tendermint/tendermint/libs/os.TrapSignal.func1(0xc000df6d20, 0x7f04a68da900, 0xc0004a8930, 0xc0005a72d8) github.com/tendermint/tendermint/libs/os/os.go:26 +0x102 created by github.com/tendermint/tendermint/libs/os.TrapSignal github.com/tendermint/tendermint/libs/os/os.go:22 +0xe6 goroutine 161 [select]: github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).poolRoutine(0xc0000d3a00, 0x0) github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:464 +0x2b3 created by github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStart github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:174 +0xf1 goroutine 162 [select]: github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).processBlockSyncCh(0xc0000d3a00) github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:310 +0x151 created by github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStart github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:177 +0x54 goroutine 163 [select]: github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).processPeerUpdates(0xc0000d3a00) github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:363 +0x12b created by github.com/tendermint/tendermint/internal/blocksync/v0.(*Reactor).OnStart github.com/tendermint/tendermint/internal/blocksync/v0/reactor.go:178 +0x76 ``` --- internal/blocksync/v0/reactor.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/v0/reactor.go index 286df0ef0..552dcbda5 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -169,6 +169,8 @@ func (r *Reactor) OnStart() error { if err := r.pool.Start(); err != nil { return err } + r.poolWG.Add(1) + go r.requestRoutine() r.poolWG.Add(1) go r.poolRoutine(false) @@ -384,6 +386,9 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error { r.syncStartTime = time.Now() + r.poolWG.Add(1) + go r.requestRoutine() + r.poolWG.Add(1) go r.poolRoutine(true) @@ -394,7 +399,6 @@ func (r *Reactor) requestRoutine() { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() - r.poolWG.Add(1) defer r.poolWG.Done() for { @@ -455,8 +459,6 @@ func (r *Reactor) poolRoutine(stateSynced bool) { defer trySyncTicker.Stop() defer switchToConsensusTicker.Stop() - go r.requestRoutine() - defer r.poolWG.Done() FOR_LOOP: @@ -605,6 +607,8 @@ FOR_LOOP: case <-r.closeCh: break FOR_LOOP + case <-r.pool.Quit(): + break FOR_LOOP } } } From 77052370cce980fa378d7acc414120faf80ab573 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 30 Sep 2021 10:13:32 -0700 Subject: [PATCH 056/174] Update default config template to match mapstructure keys. (#7036) Fix a couple of cases where we updated the keys in the config reader, but forgot to update some of their uses in the default template. Fixes #7031. --- config/toml.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/config/toml.go b/config/toml.go index 986ff1c2a..e4c86c9f4 100644 --- a/config/toml.go +++ b/config/toml.go @@ -164,10 +164,10 @@ laddr = "{{ .PrivValidator.ListenAddr }}" client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}" # Client key generated while creating certificates for secure connection -validator-client-key-file = "{{ js .PrivValidator.ClientKey }}" +client-key-file = "{{ js .PrivValidator.ClientKey }}" # Path to the Root Certificate Authority used to sign both client and server certificates -certificate-authority = "{{ js .PrivValidator.RootCA }}" +root-ca-file = "{{ js .PrivValidator.RootCA }}" ####################################################################### @@ -447,8 +447,8 @@ rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}" trust-height = {{ .StateSync.TrustHeight }} trust-hash = "{{ .StateSync.TrustHash }}" -# The trust period should be set so that Tendermint can detect and gossip misbehavior before -# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding +# The trust period should be set so that Tendermint can detect and gossip misbehavior before +# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding # period should suffice. trust-period = "{{ .StateSync.TrustPeriod }}" From bdd815ebc9209e11947353a12557558e81f4e5d5 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 30 Sep 2021 10:53:05 -0700 Subject: [PATCH 057/174] Align atomic struct field for compatibility in 32-bit ABIs. (#7037) The layout of struct fields means that interior fields may not be properly aligned for 64-bit access. Fixes #7000. --- internal/p2p/conn/connection.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index dc5bacc39..339ad7469 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -751,13 +751,18 @@ func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { // TODO: lowercase. // NOTE: not goroutine-safe. type Channel struct { + // Exponential moving average. + // This field must be accessed atomically. + // It is first in the struct to ensure correct alignment. + // See https://github.com/tendermint/tendermint/issues/7000. + recentlySent int64 + conn *MConnection desc ChannelDescriptor sendQueue chan []byte sendQueueSize int32 // atomic. recving []byte sending []byte - recentlySent int64 // exponential moving average maxPacketMsgPayloadSize int From 177850a2c95408d967c9f7c71ef463cf4ed61df9 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 30 Sep 2021 15:19:10 -0400 Subject: [PATCH 058/174] statesync: remove deadlock on init fail (#7029) When statesync is stopped during shutdown, it has the possibility of deadlocking. A dump of goroutines reveals that this is related to the peerUpdates channel not returning anything on its `Done()` channel when `OnStop` is called. As this is occuring, `processPeerUpdate` is attempting to acquire the reactor lock. It appears that this lock can never be acquired. I looked for the places where the lock may remain locked accidentally and cleaned them up in hopes to eradicate the issue. Dumps of the relevant goroutines may be found below. Note that the line numbers below are relative to the code in the `v0.35.0-rc1` tag. ``` goroutine 36 [chan receive]: github.com/tendermint/tendermint/internal/statesync.(*Reactor).OnStop(0xc00058f200) github.com/tendermint/tendermint/internal/statesync/reactor.go:243 +0x117 github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc00058f200, 0x0, 0x0) github.com/tendermint/tendermint/libs/service/service.go:171 +0x323 github.com/tendermint/tendermint/node.(*nodeImpl).OnStop(0xc0001ea240) github.com/tendermint/tendermint/node/node.go:769 +0x132 github.com/tendermint/tendermint/libs/service.(*BaseService).Stop(0xc0001ea240, 0x0, 0x0) github.com/tendermint/tendermint/libs/service/service.go:171 +0x323 github.com/tendermint/tendermint/cmd/tendermint/commands.NewRunNodeCmd.func1.1() github.com/tendermint/tendermint/cmd/tendermint/commands/run_node.go:143 +0x62 github.com/tendermint/tendermint/libs/os.TrapSignal.func1(0xc000629500, 0x7fdb52f96358, 0xc0002b5030, 0xc00000daa0) github.com/tendermint/tendermint/libs/os/os.go:26 +0x102 created by github.com/tendermint/tendermint/libs/os.TrapSignal github.com/tendermint/tendermint/libs/os/os.go:22 +0xe6 goroutine 188 [semacquire]: sync.runtime_SemacquireMutex(0xc00026b1cc, 0x0, 0x1) runtime/sema.go:71 +0x47 sync.(*Mutex).lockSlow(0xc00026b1c8) sync/mutex.go:138 +0x105 sync.(*Mutex).Lock(...) sync/mutex.go:81 sync.(*RWMutex).Lock(0xc00026b1c8) sync/rwmutex.go:111 +0x90 github.com/tendermint/tendermint/internal/statesync.(*Reactor).processPeerUpdate(0xc00026b080, 0xc000650008, 0x28, 0x124de90, 0x4) github.com/tendermint/tendermint/internal/statesync/reactor.go:849 +0x1a5 github.com/tendermint/tendermint/internal/statesync.(*Reactor).processPeerUpdates(0xc00026b080) github.com/tendermint/tendermint/internal/statesync/reactor.go:883 +0xab created by github.com/tendermint/tendermint/internal/statesync.(*Reactor.OnStart github.com/tendermint/tendermint/internal/statesync/reactor.go:219 +0xcd) ``` --- internal/statesync/reactor.go | 10 +++++++--- internal/statesync/syncer.go | 13 ++++++++++++- internal/statesync/syncer_test.go | 6 ++++-- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index a20c8ca41..abfa973b5 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -280,6 +280,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { } if err := r.initStateProvider(ctx, r.chainID, r.initialHeight); err != nil { + r.mtx.Unlock() return sm.State{}, err } @@ -889,17 +890,20 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } r.mtx.Lock() + defer r.mtx.Unlock() if r.syncer == nil { - r.mtx.Unlock() return } - defer r.mtx.Unlock() switch peerUpdate.Status { case p2p.PeerStatusUp: newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) r.providers[peerUpdate.NodeID] = newProvider - r.syncer.AddPeer(peerUpdate.NodeID) + err := r.syncer.AddPeer(peerUpdate.NodeID) + if err != nil { + r.Logger.Error("error adding peer to syncer", "error", err) + return + } if sp, ok := r.stateProvider.(*stateProviderP2P); ok { // we do this in a separate routine to not block whilst waiting for the light client to finish // whatever call it's currently executing diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 68bec6880..b4212961a 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -141,7 +141,17 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) { +func (s *syncer) AddPeer(peerID types.NodeID) (err error) { + defer func() { + // TODO: remove panic recover once AddPeer can no longer accientally send on + // closed channel. + // This recover was added to protect against the p2p message being sent + // to the snapshot channel after the snapshot channel was closed. + if r := recover(); r != nil { + err = fmt.Errorf("panic sending peer snapshot request: %v", r) + } + }() + s.logger.Debug("Requesting snapshots from peer", "peer", peerID) msg := p2p.Envelope{ @@ -153,6 +163,7 @@ func (s *syncer) AddPeer(peerID types.NodeID) { case <-s.closeCh: case s.snapshotCh <- msg: } + return err } // RemovePeer removes a peer from the pool. diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 14297c049..ad902a54c 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -77,12 +77,14 @@ func TestSyncer_SyncAny(t *testing.T) { require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - rts.syncer.AddPeer(peerAID) + err = rts.syncer.AddPeer(peerAID) + require.NoError(t, err) e := <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerAID, e.To) - rts.syncer.AddPeer(peerBID) + err = rts.syncer.AddPeer(peerBID) + require.NoError(t, err) e = <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerBID, e.To) From 243c62cc68a987095789cae34700235583f24f2d Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Fri, 1 Oct 2021 16:33:12 -0400 Subject: [PATCH 059/174] statesync: improve rare p2p race condition (#7042) This is intended to fix a test failure that occurs in the p2p state provider. The issue presents as the state provider timing out waiting for the consensus params response. The reason that this can occur is because the statesync reactor has the possibility of attempting to respond to the params request before the state provider is ready to read it. This results in the reactor hitting the `default` case seen here and then never sending on the channel. The stateprovider will then block waiting for a response and never receive one because the reactor opted not to send it. --- internal/statesync/reactor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index abfa973b5..70b12b7fa 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -779,7 +779,8 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { if sp, ok := r.stateProvider.(*stateProviderP2P); ok { select { case sp.paramsRecvCh <- cp: - default: + case <-time.After(time.Second): + return errors.New("failed to send consensus params, stateprovider not ready for response") } } else { r.Logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) From f361ce09b31fb1f62c75ea952aaafbb597175344 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 4 Oct 2021 08:40:50 -0700 Subject: [PATCH 060/174] Update Go toolchains to 1.17 in Actions workflows. (#7049) --- .github/workflows/coverage.yml | 6 +++--- .github/workflows/e2e-nightly-34x.yml | 2 +- .github/workflows/e2e-nightly-master.yml | 2 +- .github/workflows/e2e.yml | 2 +- .github/workflows/fuzz-nightly.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/tests.yml | 6 +++--- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 3553f6356..4a3b89074 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -46,7 +46,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - uses: technote-space/get-diff-action@v5 with: @@ -69,7 +69,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - uses: technote-space/get-diff-action@v5 with: @@ -85,7 +85,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.16 + go-version: "1.17" - name: test & coverage report creation run: | cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 1b076d2ea..d43bff12f 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 with: diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 5f5ed9706..95cb859e8 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 5cc605ead..a0f18cd1b 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 - uses: technote-space/get-diff-action@v5 with: diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index c47dc4411..38ca6896d 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dd18e750b..567a607ca 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - name: Build uses: goreleaser/goreleaser-action@v2 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 14d531812..abb1e848e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - uses: technote-space/get-diff-action@v5 with: @@ -49,7 +49,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - uses: technote-space/get-diff-action@v5 with: @@ -80,7 +80,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - uses: technote-space/get-diff-action@v5 with: From 6276fdcb5d23198a21dd8be9a067c931635c8b11 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 4 Oct 2021 13:04:15 -0400 Subject: [PATCH 061/174] ci: mergify support for 0.35 backports (#7050) --- .github/mergify.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index 6b08c877f..d49264597 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -16,3 +16,12 @@ pull_request_rules: backport: branches: - v0.34.x + - name: backport patches to v0.35.x branch + conditions: + - base=master + - label=S:backport-to-v0.35.x + actions: + backport: + branches: + - v0.35.x + From b30ec89ee98f72407d87062702ce137499633b08 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 4 Oct 2021 10:35:16 -0700 Subject: [PATCH 062/174] Add an e2e workflow for the v0.35.x backport branch. (#7048) --- .github/workflows/e2e-nightly-35x.yml | 74 +++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 .github/workflows/e2e-nightly-35x.yml diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml new file mode 100644 index 000000000..b78812e92 --- /dev/null +++ b/.github/workflows/e2e-nightly-35x.yml @@ -0,0 +1,74 @@ +# Runs randomly generated E2E testnets nightly on v0.35.x. + +# !! If you change something in this file, you probably want +# to update the e2e-nightly-master workflow as well! + +name: e2e-nightly-35x +on: + workflow_dispatch: # allow running workflow manually + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test-2: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + p2p: ['legacy', 'new', 'hybrid'] + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/checkout@v2.3.4 + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + + - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail-2: + needs: e2e-nightly-test-2 + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x + SLACK_FOOTER: '' + + e2e-nightly-success: # may turn this off once they seem to pass consistently + needs: e2e-nightly-test-2 + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':white_check_mark:' + SLACK_COLOR: good + SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x + SLACK_FOOTER: '' From c201e3b54d7db677febe759b4e4121135c087871 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 4 Oct 2021 15:47:50 -0400 Subject: [PATCH 063/174] scripts: fix authors script to take a ref (#7051) This script is referenced from the release documentation, we should make sure it's functional. This is helpful in generating the "Special Thanks" section of the changelog. --- scripts/authors.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/authors.sh b/scripts/authors.sh index 7aafb0127..49251242e 100755 --- a/scripts/authors.sh +++ b/scripts/authors.sh @@ -1,16 +1,16 @@ #! /bin/bash -# Usage: -# `./authors.sh` -# Print a list of all authors who have committed to develop since master. -# -# `./authors.sh ` -# Lookup the email address on Github and print the associated username +set -euo pipefail -author=$1 +ref=$1 -if [[ "$author" == "" ]]; then - git log master..develop | grep Author | sort | uniq +if [[ ! -z "$ref" ]]; then + git log master..$ref | grep Author | sort | uniq else - curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login +cat << EOF +Usage: + ./authors.sh + Print a list of all authors who have committed to the codebase since the supplied commit ref. +EOF fi + From cb69ed8135a04949475ac73976fcca994febbcbd Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 4 Oct 2021 17:12:51 -0400 Subject: [PATCH 064/174] blocksync/v2: remove unsupported reactor (#7046) This commit should be one of the first to land as part of the v0.36 cycle *after* cutting the 0.35 branch. The blocksync/v2 reactor was originally implemented as an experiement to produce an implementation of the blockstack protocol that would be easier to test and validate, but it was never appropriately operationalized and this implementation was never fully debugged. When the p2p layer was refactored as part of the 0.35 cycle, the v2 implementation was not refactored and it was left in the codebase but not removed. This commit just removes all references to it. --- config/config.go | 18 +- config/config_test.go | 7 - config/toml.go | 5 - internal/blocksync/doc.go | 11 +- .../blocksync/v2/internal/behavior/doc.go | 42 - .../v2/internal/behavior/peer_behaviour.go | 47 - .../v2/internal/behavior/reporter.go | 87 - .../v2/internal/behavior/reporter_test.go | 205 -- internal/blocksync/v2/io.go | 187 -- internal/blocksync/v2/metrics.go | 125 - internal/blocksync/v2/processor.go | 193 -- internal/blocksync/v2/processor_context.go | 112 - internal/blocksync/v2/processor_test.go | 305 --- internal/blocksync/v2/reactor.go | 643 ----- internal/blocksync/v2/reactor_test.go | 533 ---- internal/blocksync/v2/routine.go | 166 -- internal/blocksync/v2/routine_test.go | 163 -- internal/blocksync/v2/scheduler.go | 711 ------ internal/blocksync/v2/scheduler_test.go | 2253 ----------------- internal/blocksync/v2/types.go | 65 - node/node.go | 4 +- node/setup.go | 67 +- test/e2e/networks/ci.toml | 1 + test/e2e/runner/setup.go | 3 +- 24 files changed, 33 insertions(+), 5920 deletions(-) delete mode 100644 internal/blocksync/v2/internal/behavior/doc.go delete mode 100644 internal/blocksync/v2/internal/behavior/peer_behaviour.go delete mode 100644 internal/blocksync/v2/internal/behavior/reporter.go delete mode 100644 internal/blocksync/v2/internal/behavior/reporter_test.go delete mode 100644 internal/blocksync/v2/io.go delete mode 100644 internal/blocksync/v2/metrics.go delete mode 100644 internal/blocksync/v2/processor.go delete mode 100644 internal/blocksync/v2/processor_context.go delete mode 100644 internal/blocksync/v2/processor_test.go delete mode 100644 internal/blocksync/v2/reactor.go delete mode 100644 internal/blocksync/v2/reactor_test.go delete mode 100644 internal/blocksync/v2/routine.go delete mode 100644 internal/blocksync/v2/routine_test.go delete mode 100644 internal/blocksync/v2/scheduler.go delete mode 100644 internal/blocksync/v2/scheduler_test.go delete mode 100644 internal/blocksync/v2/types.go diff --git a/config/config.go b/config/config.go index dfc4836da..79ce56935 100644 --- a/config/config.go +++ b/config/config.go @@ -30,7 +30,6 @@ const ( ModeSeed = "seed" BlockSyncV0 = "v0" - BlockSyncV2 = "v2" MempoolV0 = "v0" MempoolV1 = "v1" @@ -1025,15 +1024,13 @@ func (cfg *StateSyncConfig) ValidateBasic() error { // allows them to catchup quickly by downloading blocks in parallel // and verifying their commits. type BlockSyncConfig struct { - Enable bool `mapstructure:"enable"` - Version string `mapstructure:"version"` + Enable bool `mapstructure:"enable"` } // DefaultBlockSyncConfig returns a default configuration for the block sync service func DefaultBlockSyncConfig() *BlockSyncConfig { return &BlockSyncConfig{ - Enable: true, - Version: BlockSyncV0, + Enable: true, } } @@ -1043,16 +1040,7 @@ func TestBlockSyncConfig() *BlockSyncConfig { } // ValidateBasic performs basic validation. -func (cfg *BlockSyncConfig) ValidateBasic() error { - switch cfg.Version { - case BlockSyncV0: - return nil - case BlockSyncV2: - return errors.New("blocksync version v2 is no longer supported. Please use v0") - default: - return fmt.Errorf("unknown blocksync version %s", cfg.Version) - } -} +func (cfg *BlockSyncConfig) ValidateBasic() error { return nil } //----------------------------------------------------------------------------- // ConsensusConfig diff --git a/config/config_test.go b/config/config_test.go index 075cedc6a..79d627910 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -128,13 +128,6 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { func TestBlockSyncConfigValidateBasic(t *testing.T) { cfg := TestBlockSyncConfig() assert.NoError(t, cfg.ValidateBasic()) - - // tamper with version - cfg.Version = "v2" - assert.Error(t, cfg.ValidateBasic()) - - cfg.Version = "invalid" - assert.Error(t, cfg.ValidateBasic()) } func TestConsensusConfig_ValidateBasic(t *testing.T) { diff --git a/config/toml.go b/config/toml.go index e4c86c9f4..6f07d6537 100644 --- a/config/toml.go +++ b/config/toml.go @@ -477,11 +477,6 @@ fetchers = "{{ .StateSync.Fetchers }}" # and verifying their commits enable = {{ .BlockSync.Enable }} -# Block Sync version to use: -# 1) "v0" (default) - the standard Block Sync implementation -# 2) "v2" - DEPRECATED, please use v0 -version = "{{ .BlockSync.Version }}" - ####################################################### ### Consensus Configuration Options ### ####################################################### diff --git a/internal/blocksync/doc.go b/internal/blocksync/doc.go index 3111130e4..5f84b1261 100644 --- a/internal/blocksync/doc.go +++ b/internal/blocksync/doc.go @@ -13,14 +13,9 @@ will no longer blocksync and thus no longer run the blocksync process. Note, the blocksync reactor Service gossips entire block and relevant data such that each receiving peer may construct the entire view of the blocksync state. -There are currently two versions of the blocksync reactor Service: - -- v0: The initial implementation that is battle-tested, but whose test coverage - is lacking and is not formally verifiable. -- v2: The latest implementation that has much higher test coverage and is formally - verified. However, the current implementation of v2 is not as battle-tested and - is known to have various bugs that could make it unreliable in production - environments. +There is currently only one version of the blocksync reactor Service +that is battle-tested, but whose test coverage is lacking and is not +formally verified. The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This channel is responsible for handling messages that both request blocks and respond diff --git a/internal/blocksync/v2/internal/behavior/doc.go b/internal/blocksync/v2/internal/behavior/doc.go deleted file mode 100644 index c4bd06cce..000000000 --- a/internal/blocksync/v2/internal/behavior/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Package Behavior provides a mechanism for reactors to report behavior of peers. - -Instead of a reactor calling the switch directly it will call the behavior module which will -handle the stoping and marking peer as good on behalf of the reactor. - -There are four different behaviors a reactor can report. - -1. bad message - -type badMessage struct { - explanation string -} - -This message will request the peer be stopped for an error - -2. message out of order - -type messageOutOfOrder struct { - explanation string -} - -This message will request the peer be stopped for an error - -3. consesnsus Vote - -type consensusVote struct { - explanation string -} - -This message will request the peer be marked as good - -4. block part - -type blockPart struct { - explanation string -} - -This message will request the peer be marked as good - -*/ -package behavior diff --git a/internal/blocksync/v2/internal/behavior/peer_behaviour.go b/internal/blocksync/v2/internal/behavior/peer_behaviour.go deleted file mode 100644 index 90948d888..000000000 --- a/internal/blocksync/v2/internal/behavior/peer_behaviour.go +++ /dev/null @@ -1,47 +0,0 @@ -package behavior - -import "github.com/tendermint/tendermint/types" - -// PeerBehavior is a struct describing a behavior a peer performed. -// `peerID` identifies the peer and reason characterizes the specific -// behavior performed by the peer. -type PeerBehavior struct { - peerID types.NodeID - reason interface{} -} - -type badMessage struct { - explanation string -} - -// BadMessage returns a badMessage PeerBehavior. -func BadMessage(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: badMessage{explanation}} -} - -type messageOutOfOrder struct { - explanation string -} - -// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior. -func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}} -} - -type consensusVote struct { - explanation string -} - -// ConsensusVote returns a consensusVote PeerBehavior. -func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}} -} - -type blockPart struct { - explanation string -} - -// BlockPart returns blockPart PeerBehavior. -func BlockPart(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: blockPart{explanation}} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter.go b/internal/blocksync/v2/internal/behavior/reporter.go deleted file mode 100644 index c150a98d5..000000000 --- a/internal/blocksync/v2/internal/behavior/reporter.go +++ /dev/null @@ -1,87 +0,0 @@ -package behavior - -import ( - "errors" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// Reporter provides an interface for reactors to report the behavior -// of peers synchronously to other components. -type Reporter interface { - Report(behavior PeerBehavior) error -} - -// SwitchReporter reports peer behavior to an internal Switch. -type SwitchReporter struct { - sw *p2p.Switch -} - -// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch. -func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter { - return &SwitchReporter{ - sw: sw, - } -} - -// Report reports the behavior of a peer to the Switch. -func (spbr *SwitchReporter) Report(behavior PeerBehavior) error { - peer := spbr.sw.Peers().Get(behavior.peerID) - if peer == nil { - return errors.New("peer not found") - } - - switch reason := behavior.reason.(type) { - case consensusVote, blockPart: - spbr.sw.MarkPeerAsGood(peer) - case badMessage: - spbr.sw.StopPeerForError(peer, reason.explanation) - case messageOutOfOrder: - spbr.sw.StopPeerForError(peer, reason.explanation) - default: - return errors.New("unknown reason reported") - } - - return nil -} - -// MockReporter is a concrete implementation of the Reporter -// interface used in reactor tests to ensure reactors report the correct -// behavior in manufactured scenarios. -type MockReporter struct { - mtx tmsync.RWMutex - pb map[types.NodeID][]PeerBehavior -} - -// NewMockReporter returns a Reporter which records all reported -// behaviors in memory. -func NewMockReporter() *MockReporter { - return &MockReporter{ - pb: map[types.NodeID][]PeerBehavior{}, - } -} - -// Report stores the PeerBehavior produced by the peer identified by peerID. -func (mpbr *MockReporter) Report(behavior PeerBehavior) error { - mpbr.mtx.Lock() - defer mpbr.mtx.Unlock() - mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior) - - return nil -} - -// GetBehaviors returns all behaviors reported on the peer identified by peerID. -func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior { - mpbr.mtx.RLock() - defer mpbr.mtx.RUnlock() - if items, ok := mpbr.pb[peerID]; ok { - result := make([]PeerBehavior, len(items)) - copy(result, items) - - return result - } - - return []PeerBehavior{} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter_test.go b/internal/blocksync/v2/internal/behavior/reporter_test.go deleted file mode 100644 index 861a63df0..000000000 --- a/internal/blocksync/v2/internal/behavior/reporter_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package behavior_test - -import ( - "sync" - "testing" - - bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/types" -) - -// TestMockReporter tests the MockReporter's ability to store reported -// peer behavior in memory indexed by the peerID. -func TestMockReporter(t *testing.T) { - var peerID types.NodeID = "MockPeer" - pr := bh.NewMockReporter() - - behaviors := pr.GetBehaviors(peerID) - if len(behaviors) != 0 { - t.Error("Expected to have no behaviors reported") - } - - badMessage := bh.BadMessage(peerID, "bad message") - if err := pr.Report(badMessage); err != nil { - t.Error(err) - } - behaviors = pr.GetBehaviors(peerID) - if len(behaviors) != 1 { - t.Error("Expected the peer have one reported behavior") - } - - if behaviors[0] != badMessage { - t.Error("Expected Bad Message to have been reported") - } -} - -type scriptItem struct { - peerID types.NodeID - behavior bh.PeerBehavior -} - -// equalBehaviors returns true if a and b contain the same PeerBehaviors with -// the same freequencies and otherwise false. -func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool { - aHistogram := map[bh.PeerBehavior]int{} - bHistogram := map[bh.PeerBehavior]int{} - - for _, behavior := range a { - aHistogram[behavior]++ - } - - for _, behavior := range b { - bHistogram[behavior]++ - } - - if len(aHistogram) != len(bHistogram) { - return false - } - - for _, behavior := range a { - if aHistogram[behavior] != bHistogram[behavior] { - return false - } - } - - for _, behavior := range b { - if bHistogram[behavior] != aHistogram[behavior] { - return false - } - } - - return true -} - -// TestEqualPeerBehaviors tests that equalBehaviors can tell that two slices -// of peer behaviors can be compared for the behaviors they contain and the -// freequencies that those behaviors occur. -func TestEqualPeerBehaviors(t *testing.T) { - var ( - peerID types.NodeID = "MockPeer" - consensusVote = bh.ConsensusVote(peerID, "voted") - blockPart = bh.BlockPart(peerID, "blocked") - equals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{}}, - // Single behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{consensusVote}}, - // Equal Frequencies - {[]bh.PeerBehavior{consensusVote, consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - // Equal frequencies different orders - {[]bh.PeerBehavior{consensusVote, blockPart}, - []bh.PeerBehavior{blockPart, consensusVote}}, - } - unequals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Comparing empty sets to non empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{consensusVote}}, - // Different behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{blockPart}}, - // Same behavior with different frequencies - {[]bh.PeerBehavior{consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - } - ) - - for _, test := range equals { - if !equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be equal", test.left, test.right) - } - } - - for _, test := range unequals { - if equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be unequal", test.left, test.right) - } - } -} - -// TestPeerBehaviorConcurrency constructs a scenario in which -// multiple goroutines are using the same MockReporter instance. -// This test reproduces the conditions in which MockReporter will -// be used within a Reactor `Receive` method tests to ensure thread safety. -func TestMockPeerBehaviorReporterConcurrency(t *testing.T) { - var ( - behaviorScript = []struct { - peerID types.NodeID - behaviors []bh.PeerBehavior - }{ - {"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}}, - {"2", []bh.PeerBehavior{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, - { - "3", - []bh.PeerBehavior{bh.BlockPart("3", ""), - bh.ConsensusVote("3", ""), - bh.BlockPart("3", ""), - bh.ConsensusVote("3", "")}}, - { - "4", - []bh.PeerBehavior{bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", "")}}, - { - "5", - []bh.PeerBehavior{bh.BlockPart("5", ""), - bh.ConsensusVote("5", ""), - bh.BlockPart("5", ""), - bh.ConsensusVote("5", "")}}, - } - ) - - var receiveWg sync.WaitGroup - pr := bh.NewMockReporter() - scriptItems := make(chan scriptItem) - done := make(chan int) - numConsumers := 3 - for i := 0; i < numConsumers; i++ { - receiveWg.Add(1) - go func() { - defer receiveWg.Done() - for { - select { - case pb := <-scriptItems: - if err := pr.Report(pb.behavior); err != nil { - t.Error(err) - } - case <-done: - return - } - } - }() - } - - var sendingWg sync.WaitGroup - sendingWg.Add(1) - go func() { - defer sendingWg.Done() - for _, item := range behaviorScript { - for _, reason := range item.behaviors { - scriptItems <- scriptItem{item.peerID, reason} - } - } - }() - - sendingWg.Wait() - - for i := 0; i < numConsumers; i++ { - done <- 1 - } - - receiveWg.Wait() - - for _, items := range behaviorScript { - reported := pr.GetBehaviors(items.peerID) - if !equalBehaviors(reported, items.behaviors) { - t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n", - items.peerID, items.behaviors, reported) - } - } -} diff --git a/internal/blocksync/v2/io.go b/internal/blocksync/v2/io.go deleted file mode 100644 index b272db3ae..000000000 --- a/internal/blocksync/v2/io.go +++ /dev/null @@ -1,187 +0,0 @@ -package v2 - -import ( - "errors" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/state" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" -) - -var ( - errPeerQueueFull = errors.New("peer queue full") -) - -type iIO interface { - sendBlockRequest(peer p2p.Peer, height int64) error - sendBlockToPeer(block *types.Block, peer p2p.Peer) error - sendBlockNotFound(height int64, peer p2p.Peer) error - sendStatusResponse(base, height int64, peer p2p.Peer) error - - sendStatusRequest(peer p2p.Peer) error - broadcastStatusRequest() error - - trySwitchToConsensus(state state.State, skipWAL bool) bool -} - -type switchIO struct { - sw *p2p.Switch -} - -func newSwitchIo(sw *p2p.Switch) *switchIO { - return &switchIO{ - sw: sw, - } -} - -const ( - // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) - BlockchainChannel = byte(0x40) -) - -type consensusReactor interface { - // for when we switch from blockchain reactor and block sync to - // the consensus machine - SwitchToConsensus(state state.State, skipWAL bool) -} - -func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockRequest{ - BlockRequest: &bcproto.BlockRequest{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { - return errPeerQueueFull - } - return nil -} - -func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusResponse{ - StatusResponse: &bcproto.StatusResponse{ - Height: height, - Base: base, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { - if block == nil { - panic("trying to send nil block") - } - - bpb, err := block.ToProto() - if err != nil { - return err - } - - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockResponse{ - BlockResponse: &bcproto.BlockResponse{ - Block: bpb, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_NoBlockResponse{ - NoBlockResponse: &bcproto.NoBlockResponse{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool { - conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) - if ok { - conR.SwitchToConsensus(state, skipWAL) - } - return ok -} - -func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) broadcastStatusRequest() error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - // XXX: maybe we should use an io specific peer list here - sio.sw.Broadcast(BlockchainChannel, msgBytes) - - return nil -} diff --git a/internal/blocksync/v2/metrics.go b/internal/blocksync/v2/metrics.go deleted file mode 100644 index c68ec6447..000000000 --- a/internal/blocksync/v2/metrics.go +++ /dev/null @@ -1,125 +0,0 @@ -package v2 - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. - MetricsSubsystem = "blockchain" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // events_in - EventsIn metrics.Counter - // events_in - EventsHandled metrics.Counter - // events_out - EventsOut metrics.Counter - // errors_in - ErrorsIn metrics.Counter - // errors_handled - ErrorsHandled metrics.Counter - // errors_out - ErrorsOut metrics.Counter - // events_shed - EventsShed metrics.Counter - // events_sent - EventsSent metrics.Counter - // errors_sent - ErrorsSent metrics.Counter - // errors_shed - ErrorsShed metrics.Counter -} - -// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines. -// Can we burn in the routine name here? -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_in", - Help: "Events read from the channel.", - }, labels).With(labelsAndValues...), - EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_handled", - Help: "Events handled", - }, labels).With(labelsAndValues...), - EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_out", - Help: "Events output from routine.", - }, labels).With(labelsAndValues...), - ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_in", - Help: "Errors read from the channel.", - }, labels).With(labelsAndValues...), - ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_handled", - Help: "Errors handled.", - }, labels).With(labelsAndValues...), - ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_out", - Help: "Errors output from routine.", - }, labels).With(labelsAndValues...), - ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_sent", - Help: "Errors sent to routine.", - }, labels).With(labelsAndValues...), - ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_shed", - Help: "Errors dropped from sending.", - }, labels).With(labelsAndValues...), - EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_sent", - Help: "Events sent to routine.", - }, labels).With(labelsAndValues...), - EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_shed", - Help: "Events dropped from sending.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - EventsIn: discard.NewCounter(), - EventsHandled: discard.NewCounter(), - EventsOut: discard.NewCounter(), - ErrorsIn: discard.NewCounter(), - ErrorsHandled: discard.NewCounter(), - ErrorsOut: discard.NewCounter(), - EventsShed: discard.NewCounter(), - EventsSent: discard.NewCounter(), - ErrorsSent: discard.NewCounter(), - ErrorsShed: discard.NewCounter(), - } -} diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go deleted file mode 100644 index d1850eb1c..000000000 --- a/internal/blocksync/v2/processor.go +++ /dev/null @@ -1,193 +0,0 @@ -package v2 - -import ( - "fmt" - - tmstate "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -// Events generated by the processor: -// block execution failure, event will indicate the peer(s) that caused the error -type pcBlockVerificationFailure struct { - priorityNormal - height int64 - firstPeerID types.NodeID - secondPeerID types.NodeID -} - -func (e pcBlockVerificationFailure) String() string { - return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", - e.height, e.firstPeerID, e.secondPeerID) -} - -// successful block execution -type pcBlockProcessed struct { - priorityNormal - height int64 - peerID types.NodeID -} - -func (e pcBlockProcessed) String() string { - return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) -} - -// processor has finished -type pcFinished struct { - priorityNormal - blocksSynced int - tmState tmstate.State -} - -func (p pcFinished) Error() string { - return "finished" -} - -type queueItem struct { - block *types.Block - peerID types.NodeID -} - -type blockQueue map[int64]queueItem - -type pcState struct { - // blocks waiting to be processed - queue blockQueue - - // draining indicates that the next rProcessBlock event with a queue miss constitutes completion - draining bool - - // the number of blocks successfully synced by the processor - blocksSynced int - - // the processorContext which contains the processor dependencies - context processorContext -} - -func (state *pcState) String() string { - return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d", - state.height(), len(state.queue), state.draining, state.blocksSynced) -} - -// newPcState returns a pcState initialized with the last verified block enqueued -func newPcState(context processorContext) *pcState { - return &pcState{ - queue: blockQueue{}, - draining: false, - blocksSynced: 0, - context: context, - } -} - -// nextTwo returns the next two unverified blocks -func (state *pcState) nextTwo() (queueItem, queueItem, error) { - if first, ok := state.queue[state.height()+1]; ok { - if second, ok := state.queue[state.height()+2]; ok { - return first, second, nil - } - } - return queueItem{}, queueItem{}, fmt.Errorf("not found") -} - -// synced returns true when at most the last verified block remains in the queue -func (state *pcState) synced() bool { - return len(state.queue) <= 1 -} - -func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) { - if item, ok := state.queue[height]; ok { - panic(fmt.Sprintf( - "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", - height, block.Hash(), peerID, item.block.Hash(), item.peerID)) - } - - state.queue[height] = queueItem{block: block, peerID: peerID} -} - -func (state *pcState) height() int64 { - return state.context.tmState().LastBlockHeight -} - -// purgePeer moves all unprocessed blocks from the queue -func (state *pcState) purgePeer(peerID types.NodeID) { - // what if height is less than state.height? - for height, item := range state.queue { - if item.peerID == peerID { - delete(state.queue, height) - } - } -} - -// handle processes FSM events -func (state *pcState) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - state.context.setState(event.state) - return noOp, nil - - case scFinishedEv: - if state.synced() { - return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil - } - state.draining = true - return noOp, nil - - case scPeerError: - state.purgePeer(event.peerID) - return noOp, nil - - case scBlockReceived: - if event.block == nil { - return noOp, nil - } - - // enqueue block if height is higher than state height, else ignore it - if event.block.Height > state.height() { - state.enqueue(event.peerID, event.block, event.block.Height) - } - return noOp, nil - - case rProcessBlock: - tmstate := state.context.tmState() - firstItem, secondItem, err := state.nextTwo() - if err != nil { - if state.draining { - return pcFinished{tmState: tmstate, blocksSynced: state.blocksSynced}, nil - } - return noOp, nil - } - - var ( - first, second = firstItem.block, secondItem.block - firstParts = first.MakePartSet(types.BlockPartSizeBytes) - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} - ) - - // verify if +second+ last commit "confirms" +first+ block - err = state.context.verifyCommit(tmstate.ChainID, firstID, first.Height, second.LastCommit) - if err != nil { - state.purgePeer(firstItem.peerID) - if firstItem.peerID != secondItem.peerID { - state.purgePeer(secondItem.peerID) - } - return pcBlockVerificationFailure{ - height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, - nil - } - - state.context.saveBlock(first, firstParts, second.LastCommit) - - if err := state.context.applyBlock(firstID, first); err != nil { - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } - - state.context.recordConsMetrics(first) - - delete(state.queue, first.Height) - state.blocksSynced++ - - return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } - - return noOp, nil -} diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go deleted file mode 100644 index 86b380f98..000000000 --- a/internal/blocksync/v2/processor_context.go +++ /dev/null @@ -1,112 +0,0 @@ -package v2 - -import ( - "fmt" - - "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -type processorContext interface { - applyBlock(blockID types.BlockID, block *types.Block) error - verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error - saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) - tmState() state.State - setState(state.State) - recordConsMetrics(block *types.Block) -} - -type pContext struct { - store blockStore - applier blockApplier - state state.State - metrics *consensus.Metrics -} - -func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *consensus.Metrics) *pContext { - return &pContext{ - store: st, - applier: ex, - state: s, - metrics: m, - } -} - -func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) - pc.state = newState - return err -} - -func (pc pContext) tmState() state.State { - return pc.state -} - -func (pc *pContext) setState(state state.State) { - pc.state = state -} - -func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit) -} - -func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - pc.store.SaveBlock(block, blockParts, seenCommit) -} - -func (pc *pContext) recordConsMetrics(block *types.Block) { - pc.metrics.RecordConsMetrics(block) -} - -type mockPContext struct { - applicationBL []int64 - verificationBL []int64 - state state.State -} - -func newMockProcessorContext( - state state.State, - verificationBlackList []int64, - applicationBlackList []int64) *mockPContext { - return &mockPContext{ - applicationBL: applicationBlackList, - verificationBL: verificationBlackList, - state: state, - } -} - -func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error { - for _, h := range mpc.applicationBL { - if h == block.Height { - return fmt.Errorf("generic application error") - } - } - mpc.state.LastBlockHeight = block.Height - return nil -} - -func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - for _, h := range mpc.verificationBL { - if h == height { - return fmt.Errorf("generic verification error") - } - } - return nil -} - -func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - -} - -func (mpc *mockPContext) setState(state state.State) { - mpc.state = state -} - -func (mpc *mockPContext) tmState() state.State { - return mpc.state -} - -func (mpc *mockPContext) recordConsMetrics(block *types.Block) { - -} diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go deleted file mode 100644 index 7c12b3610..000000000 --- a/internal/blocksync/v2/processor_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package v2 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - tmstate "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability. -type pcBlock struct { - pid string - height int64 -} - -// params is a test structure used to create processor state. -type params struct { - height int64 - items []pcBlock - blocksSynced int - verBL []int64 - appBL []int64 - draining bool -} - -// makePcBlock makes an empty block. -func makePcBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// makeState takes test parameters and creates a specific processor state. -func makeState(p *params) *pcState { - var ( - tmState = tmstate.State{LastBlockHeight: p.height} - context = newMockProcessorContext(tmState, p.verBL, p.appBL) - ) - state := newPcState(context) - - for _, item := range p.items { - state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height) - } - - state.blocksSynced = p.blocksSynced - state.draining = p.draining - return state -} - -func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived { - return scBlockReceived{ - peerID: peerID, - block: makePcBlock(height), - } -} - -type pcFsmMakeStateValues struct { - currentState *params - event Event - wantState *params - wantNextEvent Event - wantErr error - wantPanic bool -} - -type testFields struct { - name string - steps []pcFsmMakeStateValues -} - -func executeProcessorTests(t *testing.T, tests []testFields) { - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var state *pcState - for _, step := range tt.steps { - defer func() { - r := recover() - if (r != nil) != step.wantPanic { - t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic) - } - }() - - // First step must always initialize the currentState as state. - if step.currentState != nil { - state = makeState(step.currentState) - } - if state == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := state.handle(step.event) - t.Log(state) - assert.Equal(t, step.wantErr, err) - assert.Equal(t, makeState(step.wantState), state) - assert.Equal(t, step.wantNextEvent, nextEvent) - // Next step may use the wantedState as their currentState. - state = makeState(step.wantState) - } - }) - } -} - -func TestRProcessPeerError(t *testing.T) { - tests := []testFields{ - { - name: "error for existing peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P2"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, - wantNextEvent: noOp, - }, - }, - }, - { - name: "error for unknown peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P3"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestPcBlockResponse(t *testing.T) { - tests := []testFields{ - { - name: "add one block", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 1), - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp, - }, - }, - }, - - { - name: "add two blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 3), - wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp, - }, - { // use previous wantState as currentState, - event: mBlockResponse("P1", 4), - wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockSuccess(t *testing.T) { - tests := []testFields{ - { - name: "noop - no blocks over current height", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: rProcessBlock{}, - wantState: ¶ms{}, wantNextEvent: noOp, - }, - }, - }, - { - name: "noop - high new blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp, - }, - }, - }, - { - name: "blocks H+1 and H+2 present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present after draining", - steps: []pcFsmMakeStateValues{ - { // some contiguous blocks - on stop check draining is set - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}}, - event: scFinishedEv{}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true}, - wantNextEvent: noOp, - }, - { - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - { // finish when H+1 or/and H+2 are missing - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 1}, blocksSynced: 1}, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockFailures(t *testing.T) { - tests := []testFields{ - { - name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}}, - verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}}, - event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestScFinishedEv(t *testing.T) { - tests := []testFields{ - { - name: "no blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "maxHeight+1 block present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "more blocks present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true}, - wantNextEvent: noOp, - wantErr: nil, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go deleted file mode 100644 index f8dcbcf64..000000000 --- a/internal/blocksync/v2/reactor.go +++ /dev/null @@ -1,643 +0,0 @@ -package v2 - -import ( - "errors" - "fmt" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/internal/blocksync" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/internal/consensus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/sync" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" -) - -const ( - // chBufferSize is the buffer size of all event channels. - chBufferSize int = 1000 -) - -type blockStore interface { - LoadBlock(height int64) *types.Block - SaveBlock(*types.Block, *types.PartSet, *types.Commit) - Base() int64 - Height() int64 -} - -// BlockchainReactor handles block sync protocol. -type BlockchainReactor struct { - p2p.BaseReactor - - blockSync *sync.AtomicBool // enable block sync on start when it's been Set - stateSynced bool // set to true when SwitchToBlockSync is called by state sync - scheduler *Routine - processor *Routine - logger log.Logger - - mtx tmsync.RWMutex - maxPeerHeight int64 - syncHeight int64 - events chan Event // non-nil during a block sync - - reporter behavior.Reporter - io iIO - store blockStore - - syncStartTime time.Time - syncStartHeight int64 - lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks -} - -type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) -} - -// XXX: unify naming in this package around tmState -func newReactor(state state.State, store blockStore, reporter behavior.Reporter, - blockApplier blockApplier, blockSync bool, metrics *consensus.Metrics) *BlockchainReactor { - initHeight := state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = state.InitialHeight - } - scheduler := newScheduler(initHeight, time.Now()) - pContext := newProcessorContext(store, blockApplier, state, metrics) - // TODO: Fix naming to just newProcesssor - // newPcState requires a processorContext - processor := newPcState(pContext) - - return &BlockchainReactor{ - scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), - processor: newRoutine("processor", processor.handle, chBufferSize), - store: store, - reporter: reporter, - logger: log.NewNopLogger(), - blockSync: sync.NewBool(blockSync), - syncStartHeight: initHeight, - syncStartTime: time.Time{}, - lastSyncRate: 0, - } -} - -// NewBlockchainReactor creates a new reactor instance. -func NewBlockchainReactor( - state state.State, - blockApplier blockApplier, - store blockStore, - blockSync bool, - metrics *consensus.Metrics) *BlockchainReactor { - reporter := behavior.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, blockSync, metrics) -} - -// SetSwitch implements Reactor interface. -func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { - r.Switch = sw - if sw != nil { - r.io = newSwitchIo(sw) - } else { - r.io = nil - } -} - -func (r *BlockchainReactor) setMaxPeerHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - if height > r.maxPeerHeight { - r.maxPeerHeight = height - } -} - -func (r *BlockchainReactor) setSyncHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.syncHeight = height -} - -// SyncHeight returns the height to which the BlockchainReactor has synced. -func (r *BlockchainReactor) SyncHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.syncHeight -} - -// SetLogger sets the logger of the reactor. -func (r *BlockchainReactor) SetLogger(logger log.Logger) { - r.logger = logger - r.scheduler.setLogger(logger) - r.processor.setLogger(logger) -} - -// Start implements cmn.Service interface -func (r *BlockchainReactor) Start() error { - r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch) - if r.blockSync.IsSet() { - err := r.startSync(nil) - if err != nil { - return fmt.Errorf("failed to start block sync: %w", err) - } - } - return nil -} - -// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil, -// the scheduler and processor is updated with this state on startup. -func (r *BlockchainReactor) startSync(state *state.State) error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - return errors.New("block sync already in progress") - } - r.events = make(chan Event, chBufferSize) - go r.scheduler.start() - go r.processor.start() - if state != nil { - <-r.scheduler.ready() - <-r.processor.ready() - r.scheduler.send(bcResetState{state: *state}) - r.processor.send(bcResetState{state: *state}) - } - go r.demux(r.events) - return nil -} - -// endSync ends a block sync -func (r *BlockchainReactor) endSync() { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - close(r.events) - } - r.events = nil - r.scheduler.stop() - r.processor.stop() -} - -// SwitchToBlockSync is called by the state sync reactor when switching to block sync. -func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error { - r.stateSynced = true - state = state.Copy() - - err := r.startSync(&state) - if err == nil { - r.syncStartTime = time.Now() - } - - return err -} - -// reactor generated ticker events: -// ticker for cleaning peers -type rTryPrunePeer struct { - priorityHigh - time time.Time -} - -func (e rTryPrunePeer) String() string { - return fmt.Sprintf("rTryPrunePeer{%v}", e.time) -} - -// ticker event for scheduling block requests -type rTrySchedule struct { - priorityHigh - time time.Time -} - -func (e rTrySchedule) String() string { - return fmt.Sprintf("rTrySchedule{%v}", e.time) -} - -// ticker for block processing -type rProcessBlock struct { - priorityNormal -} - -func (e rProcessBlock) String() string { - return "rProcessBlock" -} - -// reactor generated events based on blockchain related messages from peers: -// blockResponse message received from a peer -type bcBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - size int64 - block *types.Block -} - -func (resp bcBlockResponse) String() string { - return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", - resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) -} - -// blockNoResponse message received from a peer -type bcNoBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - height int64 -} - -func (resp bcNoBlockResponse) String() string { - return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", - resp.peerID, resp.height, resp.time) -} - -// statusResponse message received from a peer -type bcStatusResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - base int64 - height int64 -} - -func (resp bcStatusResponse) String() string { - return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", - resp.peerID, resp.height, resp.base, resp.time) -} - -// new peer is connected -type bcAddNewPeer struct { - priorityNormal - peerID types.NodeID -} - -func (resp bcAddNewPeer) String() string { - return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) -} - -// existing peer is removed -type bcRemovePeer struct { - priorityHigh - peerID types.NodeID - reason interface{} -} - -func (resp bcRemovePeer) String() string { - return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) -} - -// resets the scheduler and processor state, e.g. following a switch from state syncing -type bcResetState struct { - priorityHigh - state state.State -} - -func (e bcResetState) String() string { - return fmt.Sprintf("bcResetState{%v}", e.state) -} - -// Takes the channel as a parameter to avoid race conditions on r.events. -func (r *BlockchainReactor) demux(events <-chan Event) { - var lastHundred = time.Now() - - var ( - processBlockFreq = 20 * time.Millisecond - doProcessBlockCh = make(chan struct{}, 1) - doProcessBlockTk = time.NewTicker(processBlockFreq) - ) - defer doProcessBlockTk.Stop() - - var ( - prunePeerFreq = 1 * time.Second - doPrunePeerCh = make(chan struct{}, 1) - doPrunePeerTk = time.NewTicker(prunePeerFreq) - ) - defer doPrunePeerTk.Stop() - - var ( - scheduleFreq = 20 * time.Millisecond - doScheduleCh = make(chan struct{}, 1) - doScheduleTk = time.NewTicker(scheduleFreq) - ) - defer doScheduleTk.Stop() - - var ( - statusFreq = 10 * time.Second - doStatusCh = make(chan struct{}, 1) - doStatusTk = time.NewTicker(statusFreq) - ) - defer doStatusTk.Stop() - doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers - - // Memoize the scSchedulerFail error to avoid printing it every scheduleFreq. - var scSchedulerFailErr error - - // XXX: Extract timers to make testing atemporal - for { - select { - // Pacers: send at most per frequency but don't saturate - case <-doProcessBlockTk.C: - select { - case doProcessBlockCh <- struct{}{}: - default: - } - case <-doPrunePeerTk.C: - select { - case doPrunePeerCh <- struct{}{}: - default: - } - case <-doScheduleTk.C: - select { - case doScheduleCh <- struct{}{}: - default: - } - case <-doStatusTk.C: - select { - case doStatusCh <- struct{}{}: - default: - } - - // Tickers: perform tasks periodically - case <-doScheduleCh: - r.scheduler.send(rTrySchedule{time: time.Now()}) - case <-doPrunePeerCh: - r.scheduler.send(rTryPrunePeer{time: time.Now()}) - case <-doProcessBlockCh: - r.processor.send(rProcessBlock{}) - case <-doStatusCh: - if err := r.io.broadcastStatusRequest(); err != nil { - r.logger.Error("Error broadcasting status request", "err", err) - } - - // Events from peers. Closing the channel signals event loop termination. - case event, ok := <-events: - if !ok { - r.logger.Info("Stopping event processing") - return - } - switch event := event.(type) { - case bcStatusResponse: - r.setMaxPeerHeight(event.height) - r.scheduler.send(event) - case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: - r.scheduler.send(event) - default: - r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from scheduler - case event := <-r.scheduler.next(): - switch event := event.(type) { - case scBlockReceived: - r.processor.send(event) - case scPeerError: - r.processor.send(event) - if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil { - r.logger.Error("Error reporting peer", "err", err) - } - case scBlockRequest: - peer := r.Switch.Peers().Get(event.peerID) - if peer == nil { - r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID) - continue - } - if err := r.io.sendBlockRequest(peer, event.height); err != nil { - r.logger.Error("Error sending block request", "err", err) - } - case scFinishedEv: - r.processor.send(event) - r.scheduler.stop() - case scSchedulerFail: - if scSchedulerFailErr != event.reason { - r.logger.Error("Scheduler failure", "err", event.reason.Error()) - scSchedulerFailErr = event.reason - } - case scPeersPruned: - // Remove peers from the processor. - for _, peerID := range event.peers { - r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) - } - r.logger.Debug("Pruned peers", "count", len(event.peers)) - case noOpEvent: - default: - r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from processor - case event := <-r.processor.next(): - switch event := event.(type) { - case pcBlockProcessed: - r.setSyncHeight(event.height) - if (r.syncHeight-r.syncStartHeight)%100 == 0 { - newSyncRate := 100 / time.Since(lastHundred).Seconds() - if r.lastSyncRate == 0 { - r.lastSyncRate = newSyncRate - } else { - r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate - } - r.logger.Info("block sync Rate", "height", r.syncHeight, - "max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate) - lastHundred = time.Now() - } - r.scheduler.send(event) - case pcBlockVerificationFailure: - r.scheduler.send(event) - case pcFinished: - r.logger.Info("block sync complete, switching to consensus") - if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { - r.logger.Error("Failed to switch to consensus reactor") - } - r.endSync() - r.blockSync.UnSet() - return - case noOpEvent: - default: - r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) - } - - // Terminal event from scheduler - case err := <-r.scheduler.final(): - switch err { - case nil: - r.logger.Info("Scheduler stopped") - default: - r.logger.Error("Scheduler aborted with error", "err", err) - } - - // Terminal event from processor - case err := <-r.processor.final(): - switch err { - case nil: - r.logger.Info("Processor stopped") - default: - r.logger.Error("Processor aborted with error", "err", err) - } - } - } -} - -// Stop implements cmn.Service interface. -func (r *BlockchainReactor) Stop() error { - r.logger.Info("reactor stopping") - r.endSync() - r.logger.Info("reactor stopped") - return nil -} - -// Receive implements Reactor by handling different message types. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - logger := r.logger.With("src", src.ID(), "chID", chID) - - msgProto := new(bcproto.Message) - - if err := proto.Unmarshal(msgBytes, msgProto); err != nil { - logger.Error("error decoding message", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - if err := msgProto.Validate(); err != nil { - logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - r.logger.Debug("received", "msg", msgProto) - - switch msg := msgProto.Sum.(type) { - case *bcproto.Message_StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil { - logger.Error("Could not send status message to src peer") - } - - case *bcproto.Message_BlockRequest: - block := r.store.LoadBlock(msg.BlockRequest.Height) - if block != nil { - if err := r.io.sendBlockToPeer(block, src); err != nil { - logger.Error("Could not send block message to src peer", "err", err) - } - } else { - logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height) - if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil { - logger.Error("Couldn't send block not found msg", "err", err) - } - } - - case *bcproto.Message_StatusResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcStatusResponse{ - peerID: src.ID(), - base: msg.StatusResponse.Base, - height: msg.StatusResponse.Height, - } - } - r.mtx.RUnlock() - - case *bcproto.Message_BlockResponse: - bi, err := types.BlockFromProto(msg.BlockResponse.Block) - if err != nil { - logger.Error("error transitioning block from protobuf", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - r.mtx.RLock() - if r.events != nil { - r.events <- bcBlockResponse{ - peerID: src.ID(), - block: bi, - size: int64(len(msgBytes)), - time: time.Now(), - } - } - r.mtx.RUnlock() - - case *bcproto.Message_NoBlockResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcNoBlockResponse{ - peerID: src.ID(), - height: msg.NoBlockResponse.Height, - time: time.Now(), - } - } - r.mtx.RUnlock() - } -} - -// AddPeer implements Reactor interface -func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer) - if err != nil { - r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err) - } - - err = r.io.sendStatusRequest(peer) - if err != nil { - r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcAddNewPeer{peerID: peer.ID()} - } -} - -// RemovePeer implements Reactor interface. -func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcRemovePeer{ - peerID: peer.ID(), - reason: reason, - } - } -} - -// GetChannels implements Reactor -func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlockchainChannel, - Priority: 5, - SendQueueCapacity: 2000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: blocksync.MaxMsgSize, - }, - } -} - -func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.maxPeerHeight -} - -func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration { - if !r.blockSync.IsSet() || r.syncStartTime.IsZero() { - return time.Duration(0) - } - return time.Since(r.syncStartTime) -} - -func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration { - if !r.blockSync.IsSet() { - return time.Duration(0) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - - targetSyncs := r.maxPeerHeight - r.syncStartHeight - currentSyncs := r.syncHeight - r.syncStartHeight + 1 - if currentSyncs < 0 || r.lastSyncRate < 0.001 { - return time.Duration(0) - } - - remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate - - return time.Duration(int64(remain * float64(time.Second))) -} diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go deleted file mode 100644 index 579a46c19..000000000 --- a/internal/blocksync/v2/reactor_test.go +++ /dev/null @@ -1,533 +0,0 @@ -package v2 - -import ( - "fmt" - "net" - "os" - "sync" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - abciclient "github.com/tendermint/tendermint/abci/client" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/mempool/mock" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/internal/proxy" - sm "github.com/tendermint/tendermint/internal/state" - sf "github.com/tendermint/tendermint/internal/state/test/factory" - tmstore "github.com/tendermint/tendermint/internal/store" - "github.com/tendermint/tendermint/internal/test/factory" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" -) - -type mockPeer struct { - service.Service - id types.NodeID -} - -func (mp mockPeer) FlushStop() {} -func (mp mockPeer) ID() types.NodeID { return mp.id } -func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } -func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } - -func (mp mockPeer) IsOutbound() bool { return true } -func (mp mockPeer) IsPersistent() bool { return true } -func (mp mockPeer) CloseConn() error { return nil } - -func (mp mockPeer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: "", - ListenAddr: "", - } -} -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } - -func (mp mockPeer) Send(byte, []byte) bool { return true } -func (mp mockPeer) TrySend(byte, []byte) bool { return true } - -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return struct{}{} } - -//nolint:unused -type mockBlockStore struct { - blocks map[int64]*types.Block -} - -//nolint:unused -func (ml *mockBlockStore) Height() int64 { - return int64(len(ml.blocks)) -} - -//nolint:unused -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { - return ml.blocks[height] -} - -//nolint:unused -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { - ml.blocks[block.Height] = block -} - -type mockBlockApplier struct { -} - -// XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock( - state sm.State, blockID types.BlockID, block *types.Block, -) (sm.State, error) { - state.LastBlockHeight++ - return state, nil -} - -type mockSwitchIo struct { - mtx sync.Mutex - switchedToConsensus bool - numStatusResponse int - numBlockResponse int - numNoBlockResponse int - numStatusRequest int -} - -var _ iIO = (*mockSwitchIo)(nil) - -func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error { - return nil -} - -func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numNoBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.switchedToConsensus = true - return true -} - -func (sio *mockSwitchIo) broadcastStatusRequest() error { - return nil -} - -func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusRequest++ - return nil -} - -type testReactorParams struct { - logger log.Logger - genDoc *types.GenesisDoc - privVals []types.PrivValidator - startHeight int64 - mockA bool -} - -func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { - store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight) - reporter := behavior.NewMockReporter() - - var appl blockApplier - - if p.mockA { - appl = &mockBlockApplier{} - } else { - app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - require.NoError(t, err) - db := dbm.NewMemDB() - stateStore := sm.NewStore(db) - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - appl = sm.NewBlockExecutor( - stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - err = stateStore.Save(state) - require.NoError(t, err) - } - - r := newReactor(state, store, reporter, appl, true, consensus.NopMetrics()) - logger := log.TestingLogger() - r.SetLogger(logger.With("module", "blockchain")) - - return r -} - -// This test is left here and not deleted to retain the termination cases for -// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). -// func TestReactorTerminationScenarios(t *testing.T) { - -// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") -// defer os.RemoveAll(config.RootDir) -// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) -// refStore, _, _ := newReactorStore(genDoc, privVals, 20) - -// params := testReactorParams{ -// logger: log.TestingLogger(), -// genDoc: genDoc, -// privVals: privVals, -// startHeight: 10, -// bufferSize: 100, -// mockA: true, -// } - -// type testEvent struct { -// evType string -// peer string -// height int64 -// } - -// tests := []struct { -// name string -// params testReactorParams -// msgs []testEvent -// }{ -// { -// name: "simple termination on max peer height - one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 12}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "simple termination on max peer height - two peers", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, noBlock error", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveNB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, remove one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "RemovePeer", peer: "P1"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// } - -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// reactor := newTestReactor(params) -// reactor.Start() -// reactor.reporter = behavior.NewMockReporter() -// mockSwitch := &mockSwitchIo{switchedToConsensus: false} -// reactor.io = mockSwitch -// // time for go routines to start -// time.Sleep(time.Millisecond) - -// for _, step := range tt.msgs { -// switch step.evType { -// case "AddPeer": -// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) -// case "RemovePeer": -// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) -// case "ReceiveS": -// reactor.scheduler.send(bcStatusResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "ReceiveB": -// reactor.scheduler.send(bcBlockResponse{ -// peerID: p2p.ID(step.peer), -// block: refStore.LoadBlock(step.height), -// size: 10, -// time: time.Now(), -// }) -// case "ReceiveNB": -// reactor.scheduler.send(bcNoBlockResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "BlockReq": -// reactor.scheduler.send(rTrySchedule{time: time.Now()}) -// case "Process": -// reactor.processor.send(rProcessBlock{}) -// } -// // give time for messages to propagate between routines -// time.Sleep(time.Millisecond) -// } - -// // time for processor to finish and reactor to switch to consensus -// time.Sleep(20 * time.Millisecond) -// assert.True(t, mockSwitch.hasSwitchedToConsensus()) -// reactor.Stop() -// }) -// } -// } - -func TestReactorHelperMode(t *testing.T) { - var ( - channelID = byte(0x40) - ) - - cfg := config.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) - - params := testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - startHeight: 20, - mockA: true, - } - - type testEvent struct { - peer string - event interface{} - } - - tests := []struct { - name string - params testReactorParams - msgs []testEvent - }{ - { - name: "status request", - params: params, - msgs: []testEvent{ - {"P1", bcproto.StatusRequest{}}, - {"P1", bcproto.BlockRequest{Height: 13}}, - {"P1", bcproto.BlockRequest{Height: 20}}, - {"P1", bcproto.BlockRequest{Height: 22}}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - reactor := newTestReactor(t, params) - mockSwitch := &mockSwitchIo{switchedToConsensus: false} - reactor.io = mockSwitch - err := reactor.Start() - require.NoError(t, err) - - for i := 0; i < len(tt.msgs); i++ { - step := tt.msgs[i] - switch ev := step.event.(type) { - case bcproto.StatusRequest: - old := mockSwitch.numStatusResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcproto.BlockRequest: - if ev.Height > params.startHeight { - old := mockSwitch.numNoBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) - } else { - old := mockSwitch.numBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numBlockResponse) - } - } - } - err = reactor.Stop() - require.NoError(t, err) - }) - } -} - -func TestReactorSetSwitchNil(t *testing.T) { - cfg := config.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) - - reactor := newTestReactor(t, testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - }) - reactor.SetSwitch(nil) - - assert.Nil(t, reactor.Switch) - assert.Nil(t, reactor.io) -} - -type testApp struct { - abci.BaseApplication -} - -func newReactorStore( - t *testing.T, - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, - maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) { - t.Helper() - - require.Len(t, privVals, 1) - app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - - stateDB := dbm.NewMemDB() - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisState(genDoc) - require.NoError(t, err) - - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - err = stateStore.Save(state) - require.NoError(t, err) - - // add blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote, err := factory.MakeVote( - privVals[0], - lastBlock.Header.ChainID, 0, - lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - time.Now(), - ) - require.NoError(t, err) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) - } - - thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) - - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) - require.NoError(t, err) - - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) - } - return blockStore, state, blockExec -} diff --git a/internal/blocksync/v2/routine.go b/internal/blocksync/v2/routine.go deleted file mode 100644 index e4ca52add..000000000 --- a/internal/blocksync/v2/routine.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "fmt" - "strings" - "sync/atomic" - - "github.com/Workiva/go-datastructures/queue" - - "github.com/tendermint/tendermint/libs/log" -) - -type handleFunc = func(event Event) (Event, error) - -const historySize = 25 - -// Routine is a structure that models a finite state machine as serialized -// stream of events processed by a handle function. This Routine structure -// handles the concurrency and messaging guarantees. Events are sent via -// `send` are handled by the `handle` function to produce an iterator -// `next()`. Calling `stop()` on a routine will conclude processing of all -// sent events and produce `final()` event representing the terminal state. -type Routine struct { - name string - handle handleFunc - queue *queue.PriorityQueue - history []Event - out chan Event - fin chan error - rdy chan struct{} - running *uint32 - logger log.Logger - metrics *Metrics -} - -func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { - return &Routine{ - name: name, - handle: handleFunc, - queue: queue.NewPriorityQueue(bufferSize, true), - history: make([]Event, 0, historySize), - out: make(chan Event, bufferSize), - rdy: make(chan struct{}, 1), - fin: make(chan error, 1), - running: new(uint32), - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } -} - -func (rt *Routine) setLogger(logger log.Logger) { - rt.logger = logger -} - -// nolint:unused -func (rt *Routine) setMetrics(metrics *Metrics) { - rt.metrics = metrics -} - -func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) - running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) - if !running { - panic(fmt.Sprintf("%s is already running", rt.name)) - } - close(rt.rdy) - defer func() { - if r := recover(); r != nil { - var ( - b strings.Builder - j int - ) - for i := len(rt.history) - 1; i >= 0; i-- { - fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) - j++ - } - panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) - } - stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) - if !stopped { - panic(fmt.Sprintf("%s is failed to stop", rt.name)) - } - }() - - for { - events, err := rt.queue.Get(1) - if err == queue.ErrDisposed { - rt.terminate(nil) - return - } else if err != nil { - rt.terminate(err) - return - } - oEvent, err := rt.handle(events[0].(Event)) - rt.metrics.EventsHandled.With("routine", rt.name).Add(1) - if err != nil { - rt.terminate(err) - return - } - rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) - - // Skip rTrySchedule and rProcessBlock events as they clutter the history - // due to their frequency. - switch events[0].(type) { - case rTrySchedule: - case rProcessBlock: - default: - rt.history = append(rt.history, events[0].(Event)) - if len(rt.history) > historySize { - rt.history = rt.history[1:] - } - } - - rt.out <- oEvent - } -} - -// XXX: look into returning OpError in the net package -func (rt *Routine) send(event Event) bool { - rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) - if !rt.isRunning() { - return false - } - err := rt.queue.Put(event) - if err != nil { - rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) - return false - } - - rt.metrics.EventsSent.With("routine", rt.name).Add(1) - return true -} - -func (rt *Routine) isRunning() bool { - return atomic.LoadUint32(rt.running) == 1 -} - -func (rt *Routine) next() chan Event { - return rt.out -} - -func (rt *Routine) ready() chan struct{} { - return rt.rdy -} - -func (rt *Routine) stop() { - if !rt.isRunning() { // XXX: this should check rt.queue.Disposed() - return - } - - rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) - rt.queue.Dispose() // this should block until all queue items are free? -} - -func (rt *Routine) final() chan error { - return rt.fin -} - -// XXX: Maybe get rid of this -func (rt *Routine) terminate(reason error) { - // We don't close the rt.out channel here, to avoid spinning on the closed channel - // in the event loop. - rt.fin <- reason -} diff --git a/internal/blocksync/v2/routine_test.go b/internal/blocksync/v2/routine_test.go deleted file mode 100644 index 8f92bee3e..000000000 --- a/internal/blocksync/v2/routine_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package v2 - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type eventA struct { - priorityNormal -} - -var errDone = fmt.Errorf("done") - -func simpleHandler(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - return noOp, errDone - } - return noOp, nil -} - -func TestRoutineFinal(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.isRunning(), - "expected an initialized routine to not be running") - go routine.start() - <-routine.ready() - assert.True(t, routine.isRunning(), - "expected an started routine") - - assert.True(t, routine.send(eventA{}), - "expected sending to a ready routine to succeed") - - assert.Equal(t, errDone, <-routine.final(), - "expected the final event to be done") - - assert.False(t, routine.isRunning(), - "expected an completed routine to no longer be running") -} - -func TestRoutineStop(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.send(eventA{}), - "expected sending to an unstarted routine to fail") - - go routine.start() - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a running routine to succeed") - - routine.stop() - - assert.False(t, routine.send(eventA{}), - "expected sending to a stopped routine to fail") -} - -type finalCount struct { - count int -} - -func (f finalCount) Error() string { - return "end" -} - -func genStatefulHandler(maxCount int) handleFunc { - counter := 0 - return func(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - counter++ - if counter >= maxCount { - return noOp, finalCount{counter} - } - - return eventA{}, nil - } - return noOp, nil - } -} - -func feedback(r *Routine) { - for event := range r.next() { - r.send(event) - } -} - -func TestStatefulRoutine(t *testing.T) { - var ( - count = 10 - handler = genStatefulHandler(count) - bufferSize = 20 - routine = newRoutine("statefulRoutine", handler, bufferSize) - ) - - go routine.start() - go feedback(routine) - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a started routine to succeed") - - final := <-routine.final() - if fnl, ok := final.(finalCount); ok { - assert.Equal(t, count, fnl.count, - "expected the routine to count to 10") - } else { - t.Fail() - } -} - -type lowPriorityEvent struct { - priorityLow -} - -type highPriorityEvent struct { - priorityHigh -} - -func handleWithPriority(event Event) (Event, error) { - switch event.(type) { - case lowPriorityEvent: - return noOp, nil - case highPriorityEvent: - return noOp, errDone - } - return noOp, nil -} - -func TestPriority(t *testing.T) { - var ( - bufferSize = 20 - routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize) - ) - - go routine.start() - <-routine.ready() - go func() { - for { - routine.send(lowPriorityEvent{}) - time.Sleep(1 * time.Millisecond) - } - }() - time.Sleep(10 * time.Millisecond) - - assert.True(t, routine.isRunning(), - "expected an started routine") - assert.True(t, routine.send(highPriorityEvent{}), - "expected send to succeed even when saturated") - - assert.Equal(t, errDone, <-routine.final()) - assert.False(t, routine.isRunning(), - "expected an started routine") -} diff --git a/internal/blocksync/v2/scheduler.go b/internal/blocksync/v2/scheduler.go deleted file mode 100644 index b731d96a4..000000000 --- a/internal/blocksync/v2/scheduler.go +++ /dev/null @@ -1,711 +0,0 @@ -package v2 - -import ( - "bytes" - "errors" - "fmt" - "math" - "sort" - "time" - - "github.com/tendermint/tendermint/types" -) - -// Events generated by the scheduler: -// all blocks have been processed -type scFinishedEv struct { - priorityNormal - reason string -} - -func (e scFinishedEv) String() string { - return fmt.Sprintf("scFinishedEv{%v}", e.reason) -} - -// send a blockRequest message -type scBlockRequest struct { - priorityNormal - peerID types.NodeID - height int64 -} - -func (e scBlockRequest) String() string { - return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) -} - -// a block has been received and validated by the scheduler -type scBlockReceived struct { - priorityNormal - peerID types.NodeID - block *types.Block -} - -func (e scBlockReceived) String() string { - return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) -} - -// scheduler detected a peer error -type scPeerError struct { - priorityHigh - peerID types.NodeID - reason error -} - -func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) -} - -// scheduler removed a set of peers (timed out or slow peer) -type scPeersPruned struct { - priorityHigh - peers []types.NodeID -} - -func (e scPeersPruned) String() string { - return fmt.Sprintf("scPeersPruned{%v}", e.peers) -} - -// XXX: make this fatal? -// scheduler encountered a fatal error -type scSchedulerFail struct { - priorityHigh - reason error -} - -func (e scSchedulerFail) String() string { - return fmt.Sprintf("scSchedulerFail{%v}", e.reason) -} - -type blockState int - -const ( - blockStateUnknown blockState = iota + 1 // no known peer has this block - blockStateNew // indicates that a peer has reported having this block - blockStatePending // indicates that this block has been requested from a peer - blockStateReceived // indicates that this block has been received by a peer - blockStateProcessed // indicates that this block has been applied -) - -func (e blockState) String() string { - switch e { - case blockStateUnknown: - return "Unknown" - case blockStateNew: - return "New" - case blockStatePending: - return "Pending" - case blockStateReceived: - return "Received" - case blockStateProcessed: - return "Processed" - default: - return fmt.Sprintf("invalid blockState: %d", e) - } -} - -type peerState int - -const ( - peerStateNew = iota + 1 - peerStateReady - peerStateRemoved -) - -func (e peerState) String() string { - switch e { - case peerStateNew: - return "New" - case peerStateReady: - return "Ready" - case peerStateRemoved: - return "Removed" - default: - panic(fmt.Sprintf("unknown peerState: %d", e)) - } -} - -type scPeer struct { - peerID types.NodeID - - // initialized as New when peer is added, updated to Ready when statusUpdate is received, - // updated to Removed when peer is removed - state peerState - - base int64 // updated when statusResponse is received - height int64 // updated when statusResponse is received - lastTouched time.Time - lastRate int64 // last receive rate in bytes -} - -func (p scPeer) String() string { - return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) -} - -func newScPeer(peerID types.NodeID) *scPeer { - return &scPeer{ - peerID: peerID, - state: peerStateNew, - base: -1, - height: -1, - lastTouched: time.Time{}, - } -} - -// The scheduler keep track of the state of each block and each peer. The -// scheduler will attempt to schedule new block requests with `trySchedule` -// events and remove slow peers with `tryPrune` events. -type scheduler struct { - initHeight int64 - - // next block that needs to be processed. All blocks with smaller height are - // in Processed state. - height int64 - - // lastAdvance tracks the last time a block execution happened. - // syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing. - // This covers the cases where there are no peers or all peers have a lower height. - lastAdvance time.Time - syncTimeout time.Duration - - // a map of peerID to scheduler specific peer struct `scPeer` used to keep - // track of peer specific state - peers map[types.NodeID]*scPeer - peerTimeout time.Duration // maximum response time from a peer otherwise prune - minRecvRate int64 // minimum receive rate from peer otherwise prune - - // the maximum number of blocks that should be New, Received or Pending at any point - // in time. This is used to enforce a limit on the blockStates map. - targetPending int - // a list of blocks to be scheduled (New), Pending or Received. Its length should be - // smaller than targetPending. - blockStates map[int64]blockState - - // a map of heights to the peer we are waiting a response from - pendingBlocks map[int64]types.NodeID - - // the time at which a block was put in blockStatePending - pendingTime map[int64]time.Time - - // a map of heights to the peers that put the block in blockStateReceived - receivedBlocks map[int64]types.NodeID -} - -func (sc scheduler) String() string { - return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v", - sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks) -} - -func newScheduler(initHeight int64, startTime time.Time) *scheduler { - sc := scheduler{ - initHeight: initHeight, - lastAdvance: startTime, - syncTimeout: 60 * time.Second, - height: initHeight, - blockStates: make(map[int64]blockState), - peers: make(map[types.NodeID]*scPeer), - pendingBlocks: make(map[int64]types.NodeID), - pendingTime: make(map[int64]time.Time), - receivedBlocks: make(map[int64]types.NodeID), - targetPending: 10, // TODO - pass as param - peerTimeout: 15 * time.Second, // TODO - pass as param - minRecvRate: 0, // int64(7680), TODO - pass as param - } - - return &sc -} - -func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer { - if _, ok := sc.peers[peerID]; !ok { - sc.peers[peerID] = newScPeer(peerID) - } - return sc.peers[peerID] -} - -func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state) - } - - peer.lastTouched = time - - return nil -} - -func (sc *scheduler) removePeer(peerID types.NodeID) { - peer, ok := sc.peers[peerID] - if !ok { - return - } - if peer.state == peerStateRemoved { - return - } - - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.pendingTime, height) - delete(sc.pendingBlocks, height) - } - } - - for height, rcvPeerID := range sc.receivedBlocks { - if rcvPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.receivedBlocks, height) - } - } - - // remove the blocks from blockStates if the peer removal causes the max peer height to be lower. - peer.state = peerStateRemoved - maxPeerHeight := int64(0) - for _, otherPeer := range sc.peers { - if otherPeer.state != peerStateReady { - continue - } - if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight { - maxPeerHeight = otherPeer.height - } - } - for h := range sc.blockStates { - if h > maxPeerHeight { - delete(sc.blockStates, h) - } - } -} - -// check if the blockPool is running low and add new blocks in New state to be requested. -// This function is called when there is an increase in the maximum peer height or when -// blocks are processed. -func (sc *scheduler) addNewBlocks() { - if len(sc.blockStates) >= sc.targetPending { - return - } - - for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ { - if i > sc.maxHeight() { - break - } - if sc.getStateAtHeight(i) == blockStateUnknown { - sc.setStateAtHeight(i, blockStateNew) - } - } -} - -func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error { - peer := sc.ensurePeer(peerID) - - if peer.state == peerStateRemoved { - return nil // noop - } - - if height < peer.height { - sc.removePeer(peerID) - return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) - } - - if base > height { - sc.removePeer(peerID) - return fmt.Errorf("cannot set peer base higher than its height") - } - - peer.base = base - peer.height = height - peer.state = peerStateReady - - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) getStateAtHeight(height int64) blockState { - if height < sc.height { - return blockStateProcessed - } else if state, ok := sc.blockStates[height]; ok { - return state - } else { - return blockStateUnknown - } -} - -func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID { - peers := make([]types.NodeID, 0) - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if peer.base <= height && peer.height >= height { - peers = append(peers, peer.peerID) - } - } - return peers -} - -func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID { - prunable := make([]types.NodeID, 0) - for peerID, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { - prunable = append(prunable, peerID) - } - } - // Tests for handleTryPrunePeer() may fail without sort due to range non-determinism - sort.Sort(PeerByID(prunable)) - return prunable -} - -func (sc *scheduler) setStateAtHeight(height int64, state blockState) { - sc.blockStates[height] = state -} - -// CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error { - peer := sc.peers[peerID] - - if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { - return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) - } - - pendingTime, ok := sc.pendingTime[height] - if !ok || now.Sub(pendingTime) <= 0 { - return fmt.Errorf("clock error: block %d received at %s but requested at %s", - height, pendingTime, now) - } - - peer.lastRate = size / now.Sub(pendingTime).Nanoseconds() - - sc.setStateAtHeight(height, blockStateReceived) - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - - sc.receivedBlocks[height] = peerID - - return nil -} - -func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error { - state := sc.getStateAtHeight(height) - if state != blockStateNew { - return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state) - } - - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("cannot find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state) - } - - if height > peer.height { - return fmt.Errorf("cannot request height %d from peer %s that is at height %d", - height, peerID, peer.height) - } - - if height < peer.base { - return fmt.Errorf("cannot request height %d for peer %s with base %d", - height, peerID, peer.base) - } - - sc.setStateAtHeight(height, blockStatePending) - sc.pendingBlocks[height] = peerID - sc.pendingTime[height] = time - - return nil -} - -func (sc *scheduler) markProcessed(height int64) error { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, so - // when pcBlockProcessed event is received, the block had been requested - // again => don't check the block state. - sc.lastAdvance = time.Now() - sc.height = height + 1 - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - delete(sc.receivedBlocks, height) - delete(sc.blockStates, height) - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) allBlocksProcessed() bool { - if len(sc.peers) == 0 { - return false - } - return sc.height >= sc.maxHeight() -} - -// returns max peer height or the last processed block, i.e. sc.height -func (sc *scheduler) maxHeight() int64 { - max := sc.height - 1 - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if max < peer.height { - max = peer.height - } - } - return max -} - -// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks -func (sc *scheduler) nextHeightToSchedule() int64 { - var min int64 = math.MaxInt64 - for height, state := range sc.blockStates { - if state == blockStateNew && height < min { - min = height - } - } - if min == math.MaxInt64 { - min = -1 - } - return min -} - -func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 { - var heights []int64 - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - heights = append(heights, height) - } - } - return heights -} - -func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { - peers := sc.getPeersWithHeight(height) - if len(peers) == 0 { - return "", fmt.Errorf("cannot find peer for height %d", height) - } - - // create a map from number of pending requests to a list - // of peers having that number of pending requests. - pendingFrom := make(map[int][]types.NodeID) - for _, peerID := range peers { - numPending := len(sc.pendingFrom(peerID)) - pendingFrom[numPending] = append(pendingFrom[numPending], peerID) - } - - // find the set of peers with minimum number of pending requests. - var minPending int64 = math.MaxInt64 - for mp := range pendingFrom { - if int64(mp) < minPending { - minPending = int64(mp) - } - } - - sort.Sort(PeerByID(pendingFrom[int(minPending)])) - return pendingFrom[int(minPending)][0], nil -} - -// PeerByID is a list of peers sorted by peerID. -type PeerByID []types.NodeID - -func (peers PeerByID) Len() int { - return len(peers) -} -func (peers PeerByID) Less(i, j int) bool { - return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1 -} - -func (peers PeerByID) Swap(i, j int) { - peers[i], peers[j] = peers[j], peers[i] -} - -// Handlers - -// This handler gets the block, performs some validation and then passes it on to the processor. -func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { - err := sc.touchPeer(event.peerID, event.time) - if err != nil { - // peer does not exist OR not ready - return noOp, nil - } - - err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) - if err != nil { - sc.removePeer(event.peerID) - return scPeerError{peerID: event.peerID, reason: err}, nil - } - - return scBlockReceived{peerID: event.peerID, block: event.block}, nil -} - -func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - // No such peer or peer was removed. - peer, ok := sc.peers[event.peerID] - if !ok || peer.state == peerStateRemoved { - return noOp, nil - } - - // The peer may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.peerID) - - return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", - event.peerID, peer.base, peer.height, event.height)}, nil -} - -func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { - if event.height != sc.height { - panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) - } - - err := sc.markProcessed(event.height) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "processed all blocks"}, nil - } - - return noOp, nil -} - -// Handles an error from the processor. The processor had already cleaned the blocks from -// the peers included in this event. Just attempt to remove the peers. -func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - // The peers may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.firstPeerID) - if event.firstPeerID != event.secondPeerID { - sc.removePeer(event.secondPeerID) - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "error on last block"}, nil - } - - return noOp, nil -} - -func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { - sc.ensurePeer(event.peerID) - return noOp, nil -} - -func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - sc.removePeer(event.peerID) - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "removed peer"}, nil - } - - // Return scPeerError so the peer (and all associated blocks) is removed from - // the processor. - return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil -} - -func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. - timeHeightAsked, ok := sc.pendingTime[sc.height] - if ok && time.Since(timeHeightAsked) > sc.peerTimeout { - // A request was sent to a peer for block at sc.height but a response was not received - // from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer - // will be timed out even if it sends blocks at higher heights but prevents progress by - // not sending the block at current height. - sc.removePeer(sc.pendingBlocks[sc.height]) - } - - prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time) - if len(prunablePeers) == 0 { - return noOp, nil - } - for _, peerID := range prunablePeers { - sc.removePeer(peerID) - } - - // If all blocks are processed we should finish. - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "after try prune"}, nil - } - - return scPeersPruned{peers: prunablePeers}, nil -} - -func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { - initHeight := event.state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = event.state.InitialHeight - } - sc.initHeight = initHeight - sc.height = initHeight - sc.lastAdvance = time.Now() - sc.addNewBlocks() - return noOp, nil -} - -func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { - if time.Since(sc.lastAdvance) > sc.syncTimeout { - return scFinishedEv{reason: "timeout, no advance"}, nil - } - - nextHeight := sc.nextHeightToSchedule() - if nextHeight == -1 { - return noOp, nil - } - - bestPeerID, err := sc.selectPeer(nextHeight) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil { - return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate - } - return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil - -} - -func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerRange(event.peerID, event.base, event.height) - if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil - } - return noOp, nil -} - -func (sc *scheduler) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - nextEvent, err := sc.handleResetState(event) - return nextEvent, err - case bcStatusResponse: - nextEvent, err := sc.handleStatusResponse(event) - return nextEvent, err - case bcBlockResponse: - nextEvent, err := sc.handleBlockResponse(event) - return nextEvent, err - case bcNoBlockResponse: - nextEvent, err := sc.handleNoBlockResponse(event) - return nextEvent, err - case rTrySchedule: - nextEvent, err := sc.handleTrySchedule(event) - return nextEvent, err - case bcAddNewPeer: - nextEvent, err := sc.handleAddNewPeer(event) - return nextEvent, err - case bcRemovePeer: - nextEvent, err := sc.handleRemovePeer(event) - return nextEvent, err - case rTryPrunePeer: - nextEvent, err := sc.handleTryPrunePeer(event) - return nextEvent, err - case pcBlockProcessed: - nextEvent, err := sc.handleBlockProcessed(event) - return nextEvent, err - case pcBlockVerificationFailure: - nextEvent, err := sc.handleBlockProcessError(event) - return nextEvent, err - default: - return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil - } -} diff --git a/internal/blocksync/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go deleted file mode 100644 index d2c4aab03..000000000 --- a/internal/blocksync/v2/scheduler_test.go +++ /dev/null @@ -1,2253 +0,0 @@ -package v2 - -import ( - "fmt" - "math" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -type scTestParams struct { - peers map[string]*scPeer - initHeight int64 - height int64 - allB []int64 - pending map[int64]types.NodeID - pendingTime map[int64]time.Time - received map[int64]types.NodeID - peerTimeout time.Duration - minRecvRate int64 - targetPending int - startTime time.Time - syncTimeout time.Duration -} - -func verifyScheduler(sc *scheduler) { - missing := 0 - if sc.maxHeight() >= sc.height { - missing = int(math.Min(float64(sc.targetPending), float64(sc.maxHeight()-sc.height+1))) - } - if len(sc.blockStates) != missing { - panic(fmt.Sprintf("scheduler block length %d different than target %d", len(sc.blockStates), missing)) - } -} - -func newTestScheduler(params scTestParams) *scheduler { - peers := make(map[types.NodeID]*scPeer) - var maxHeight int64 - - initHeight := params.initHeight - if initHeight == 0 { - initHeight = 1 - } - sc := newScheduler(initHeight, params.startTime) - if params.height != 0 { - sc.height = params.height - } - - for id, peer := range params.peers { - peer.peerID = types.NodeID(id) - peers[types.NodeID(id)] = peer - if maxHeight < peer.height { - maxHeight = peer.height - } - } - for _, h := range params.allB { - sc.blockStates[h] = blockStateNew - } - for h, pid := range params.pending { - sc.blockStates[h] = blockStatePending - sc.pendingBlocks[h] = pid - } - for h, tm := range params.pendingTime { - sc.pendingTime[h] = tm - } - for h, pid := range params.received { - sc.blockStates[h] = blockStateReceived - sc.receivedBlocks[h] = pid - } - - sc.peers = peers - sc.peerTimeout = params.peerTimeout - if params.syncTimeout == 0 { - sc.syncTimeout = 10 * time.Second - } else { - sc.syncTimeout = params.syncTimeout - } - - if params.targetPending == 0 { - sc.targetPending = 10 - } else { - sc.targetPending = params.targetPending - } - - sc.minRecvRate = params.minRecvRate - - verifyScheduler(sc) - - return sc -} - -func TestScInit(t *testing.T) { - var ( - initHeight int64 = 5 - sc = newScheduler(initHeight, time.Now()) - ) - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) -} - -func TestScMaxHeights(t *testing.T) { - - tests := []struct { - name string - sc scheduler - wantMax int64 - }{ - { - name: "no peers", - sc: scheduler{height: 11}, - wantMax: 10, - }, - { - name: "one ready peer", - sc: scheduler{ - height: 3, - peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}}, - }, - wantMax: 6, - }, - { - name: "ready and removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 4, - }, - { - name: "removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateRemoved}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 0, - }, - { - name: "new peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}, - }, - wantMax: 0, - }, - { - name: "mixed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 20, state: peerStateRemoved}, - "P4": {height: 22, state: peerStateReady}, - }, - }, - wantMax: 22, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // maxHeight() should not mutate the scheduler - wantSc := tt.sc - - resMax := tt.sc.maxHeight() - assert.Equal(t, tt.wantMax, resMax) - assert.Equal(t, wantSc, tt.sc) - }) - } -} - -func TestScEnsurePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - }{ - { - name: "add first peer", - fields: scTestParams{}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add duplicate peer is fine", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "add duplicate peer with existing peer in Ready state is noop", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.ensurePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScTouchPeer(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - time time.Time - } - - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt to touch non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - wantErr: true, - }, - { - name: "attempt to touch peer in state New", - fields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - wantErr: true, - }, - { - name: "attempt to touch peer in state Removed", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - wantErr: true, - }, - { - name: "touch peer in state Ready", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, lastTouched: now}}}, - args: args{peerID: "P1", time: now.Add(3 * time.Second)}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {state: peerStateReady, lastTouched: now.Add(3 * time.Second)}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.touchPeer(tt.args.peerID, tt.args.time); (err != nil) != tt.wantErr { - t.Errorf("touchPeer() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScPrunablePeers(t *testing.T) { - now := time.Now() - - type args struct { - threshold time.Duration - time time.Time - minSpeed int64 - } - - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{}, - }, - { - name: "mixed peers", - fields: scTestParams{peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90}, - }}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{"P4", "P5", "P6"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // peersSlowerThan should not mutate the scheduler - wantSc := sc - res := sc.prunablePeers(tt.args.threshold, tt.args.minSpeed, tt.args.time) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScRemovePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "remove non existing peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "remove single New peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}}}, - }, - { - name: "remove one of two New peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}, "P2": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}, "P2": {height: -1}}}, - }, - { - name: "remove one Ready peer, all peers removed", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateRemoved}}, - }, - }, - { - name: "attempt to remove already removed peer", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}}, - }, - { - name: "remove Ready peer with blocks requested", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 3: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer from multiple peers set, with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateReady}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateRemoved}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{3: "P2"}, - received: map[int64]types.NodeID{4: "P2", 5: "P2"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.removePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScSetPeerRange(t *testing.T) { - - type args struct { - peerID types.NodeID - base int64 - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 2, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "increase height of removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - }, - { - name: "decrease height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 2}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}}, - wantErr: true, - }, - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with huge height 10**10 ", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: -1, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P2", height: 10000000000}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with base > height should error", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", base: 6, height: 5}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "add peer with base == height is fine", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P1", base: 6, height: 6}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) - if (err != nil) != tt.wantErr { - t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScGetPeersWithHeight(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer at base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{}, - }, - { - name: "multiple mixed peers", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 5, state: peerStateReady}, - "P4": {height: 20, state: peerStateRemoved}, - "P5": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{height: 8}, - wantResult: []types.NodeID{"P2", "P5"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // getPeersWithHeight should not mutate the scheduler - wantSc := sc - res := sc.getPeersWithHeight(tt.args.height) - sort.Sort(PeerByID(res)) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkPending(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt mark pending an unknown block above height", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "attempt mark pending an unknown block below base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - wantErr: true, - }, - { - name: "attempt mark pending from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "mark pending from Removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "mark pending from New peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 2, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending from short peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - }, - args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markPending(tt.args.peerID, tt.args.height, tt.args.tm); (err != nil) != tt.wantErr { - t.Errorf("markPending() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkReceived(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - size int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "received from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "received from removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "received from unsolicited peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - wantErr: true, - }, - { - name: "received but blockRequest not sent", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - wantErr: true, - }, - { - name: "received with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - wantErr: true, - }, - { - name: "received all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - received: map[int64]types.NodeID{2: "P1"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markReceived( - tt.args.peerID, - tt.args.height, - tt.args.size, - now.Add(time.Second)); (err != nil) != tt.wantErr { - t.Errorf("markReceived() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkProcessed(t *testing.T) { - now := time.Now() - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "processed an unreceived block", - fields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - targetPending: 1, - }, - args: args{height: 2}, - wantFields: scTestParams{ - height: 3, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{3}, - targetPending: 1, - }, - }, - { - name: "mark processed success", - fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]types.NodeID{1: "P1"}}, - args: args{height: 1}, - wantFields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - oldBlockState := sc.getStateAtHeight(tt.args.height) - if err := sc.markProcessed(tt.args.height); (err != nil) != tt.wantErr { - t.Errorf("markProcessed() wantErr %v, error = %v", tt.wantErr, err) - } - if tt.wantErr { - assert.Equal(t, oldBlockState, sc.getStateAtHeight(tt.args.height)) - } else { - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(tt.args.height)) - } - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScResetState(t *testing.T) { - tests := []struct { - name string - fields scTestParams - state state.State - wantFields scTestParams - }{ - { - name: "updates height and initHeight", - fields: scTestParams{ - height: 0, - initHeight: 0, - }, - state: state.State{LastBlockHeight: 7}, - wantFields: scTestParams{ - height: 8, - initHeight: 8, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - e, err := sc.handleResetState(bcResetState{state: tt.state}) - require.NoError(t, err) - assert.Equal(t, e, noOp) - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScAllBlocksProcessed(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantResult bool - }{ - { - name: "no blocks, no peers", - fields: scTestParams{}, - wantResult: false, - }, - { - name: "only New blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantResult: false, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantResult: false, - }, - { - name: "only Received blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantResult: false, - }, - { - name: "only Processed blocks plus highest is received", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}}, - allB: []int64{4}, - received: map[int64]types.NodeID{4: "P1"}, - }, - wantResult: true, - }, - { - name: "mixed block states", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{2: now, 4: now}, - }, - wantResult: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // allBlocksProcessed() should not mutate the scheduler - wantSc := sc - res := sc.allBlocksProcessed() - assert.Equal(t, tt.wantResult, res) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScNextHeightToSchedule(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantHeight int64 - }{ - { - name: "no blocks", - fields: scTestParams{initHeight: 11, height: 11}, - wantHeight: -1, - }, - { - name: "only New blocks", - fields: scTestParams{ - initHeight: 3, - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{3, 4, 5, 6}, - }, - wantHeight: 3, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantHeight: -1, - }, - { - name: "only Received blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantHeight: -1, - }, - { - name: "only Processed blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantHeight: 1, - }, - { - name: "mixed block states", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - }, - wantHeight: 1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // nextHeightToSchedule() should not mutate the scheduler - wantSc := sc - - resMin := sc.nextHeightToSchedule() - assert.Equal(t, tt.wantHeight, resMin) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScSelectPeer(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult types.NodeID - wantError bool - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready equal peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 3}, - wantResult: "", - wantError: true, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 9, state: peerStateReady}}, - allB: []int64{4, 5, 6, 7, 8, 9}, - pending: map[int64]types.NodeID{ - 4: "P1", 6: "P1", - 5: "P2", - }, - }, - args: args{height: 4}, - wantResult: "P2", - }, - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P2": {height: 20, state: peerStateReady}, - "P1": {height: 15, state: peerStateReady}, - "P3": {height: 15, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{height: 7}, - wantResult: "P1", - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // selectPeer should not mutate the scheduler - wantSc := sc - res, err := sc.selectPeer(tt.args.height) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, tt.wantError, err != nil) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -// makeScBlock makes an empty block. -func makeScBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// used in place of assert.Equal(t, want, actual) to avoid failures due to -// scheduler.lastAdvanced timestamp inequalities. -func checkSameScheduler(t *testing.T, want *scheduler, actual *scheduler) { - assert.Equal(t, want.initHeight, actual.initHeight) - assert.Equal(t, want.height, actual.height) - assert.Equal(t, want.peers, actual.peers) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.pendingBlocks, actual.pendingBlocks) - assert.Equal(t, want.pendingTime, actual.pendingTime) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.receivedBlocks, actual.receivedBlocks) - assert.Equal(t, want.blockStates, actual.blockStates) -} - -// checkScResults checks scheduler handler test results -func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, event Event) { - if (err != nil) != wantErr { - t.Errorf("error = %v, wantErr %v", err, wantErr) - return - } - if !assert.IsType(t, wantEvent, event) { - t.Log(fmt.Sprintf("Wrong type received, got: %v", event)) - } - switch wantEvent := wantEvent.(type) { - case scPeerError: - assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID) - assert.Equal(t, wantEvent.reason != nil, event.(scPeerError).reason != nil) - case scBlockReceived: - assert.Equal(t, wantEvent.peerID, event.(scBlockReceived).peerID) - assert.Equal(t, wantEvent.block, event.(scBlockReceived).block) - case scSchedulerFail: - assert.Equal(t, wantEvent.reason != nil, event.(scSchedulerFail).reason != nil) - } -} - -func TestScHandleBlockResponse(t *testing.T) { - now := time.Now() - block6FromP1 := bcBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - size: 100, - block: makeScBlock(6), - } - - type args struct { - event bcBlockResponse - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "block from wrong peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now.Add(time.Second)}, - }, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "good block, accept", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleNoBlockResponse(t *testing.T) { - now := time.Now() - noBlock6FromP1 := bcNoBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - height: 6, - } - - tests := []struct { - name string - fields scTestParams - wantEvent Event - wantFields scTestParams - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{}, - }, - { - name: "noBlock from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "for block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "noBlock from peer we don't have", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: noOpEvent{}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - }, - { - name: "noBlock from existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleNoBlockResponse(noBlock6FromP1) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScHandleBlockProcessed(t *testing.T) { - now := time.Now() - processed6FromP1 := pcBlockProcessed{ - peerID: types.NodeID("P1"), - height: 6, - } - - type args struct { - event pcBlockProcessed - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{height: 6}, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block we don't have", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block ok, we processed all blocks", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: scFinishedEv{}, - }, - { - name: "processed block ok, we still have blocks to process", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{6: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessed(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleBlockVerificationFailure(t *testing.T) { - now := time.Now() - - type args struct { - event pcBlockVerificationFailure - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block we don't have, single peer is still removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block we don't have, one of two peers are removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, all blocks are processed after removal", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, H+1 and H+2 delivered by different peers, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}, - }, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessError(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleAddNewPeer(t *testing.T) { - addP1 := bcAddNewPeer{ - peerID: types.NodeID("P1"), - } - type args struct { - event bcAddNewPeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "add P1 to empty scheduler", - fields: scTestParams{}, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add duplicate peer", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add P1 to non empty scheduler", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleAddNewPeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTryPrunePeer(t *testing.T) { - now := time.Now() - - pruneEv := rTryPrunePeer{ - time: now.Add(time.Second + time.Millisecond), - } - type args struct { - event rTryPrunePeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{}, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "no prunable peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}}, - peerTimeout: time.Second, - }, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "mixed peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{1, 2, 3, 4, 5, 6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}}, - }, - { - name: "mixed peers, finish after pruning", - fields: scTestParams{ - minRecvRate: 100, - height: 6, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scFinishedEv{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTryPrunePeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTrySchedule(t *testing.T) { - now := time.Now() - tryEv := rTrySchedule{ - time: now.Add(time.Second + time.Millisecond), - } - - type args struct { - event rTrySchedule - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only new peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only Removed peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - startTime: now, - height: 6, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P2", height: 4}, - }, - - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P2": {height: 8, state: peerStateReady}, - "P1": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 7}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTrySchedule(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleStatusResponse(t *testing.T) { - now := time.Now() - statusRespP1Ev := bcStatusResponse{ - time: now.Add(time.Second + time.Millisecond), - peerID: "P1", - height: 6, - } - - type args struct { - event bcStatusResponse - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "increase height of removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "decrease height of single peer", - fields: scTestParams{ - height: 5, - peers: map[string]*scPeer{"P1": {height: 10, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8, 9, 10}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleStatusResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandle(t *testing.T) { - now := time.Now() - - type unknownEv struct { - priorityNormal - } - - block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3) - - t0 := time.Now() - tick := make([]time.Time, 100) - for i := range tick { - tick[i] = t0.Add(time.Duration(i) * time.Millisecond) - } - - type args struct { - event Event - } - type scStep struct { - currentSc *scTestParams - args args - wantEvent Event - wantErr bool - wantSc *scTestParams - } - tests := []struct { - name string - steps []scStep - }{ - { - name: "unknown event", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{}, - args: args{event: unknownEv{}}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, - wantSc: &scTestParams{}, - }, - }, - }, - { - name: "single peer, sync 3 blocks", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{startTime: now, peers: map[string]*scPeer{}, height: 1}, - args: args{event: bcAddNewPeer{peerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, - }, - { // set height of P1 - args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - height: 1, - }, - }, - { // schedule block 1 - args: args{event: rTrySchedule{time: tick[1]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1]}, - height: 1, - }, - }, - { // schedule block 2 - args: args{event: rTrySchedule{time: tick[2]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]}, - height: 1, - }, - }, - { // schedule block 3 - args: args{event: rTrySchedule{time: tick[3]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]}, - height: 1, - }, - }, - { // block response 1 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}}, - wantEvent: scBlockReceived{peerID: "P1", block: block1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]}, - received: map[int64]types.NodeID{1: "P1"}, - height: 1, - }, - }, - { // block response 2 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}}, - wantEvent: scBlockReceived{peerID: "P1", block: block2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{3: "P1"}, - pendingTime: map[int64]time.Time{3: tick[3]}, - received: map[int64]types.NodeID{1: "P1", 2: "P1"}, - height: 1, - }, - }, - { // block response 3 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}}, - wantEvent: scBlockReceived{peerID: "P1", block: block3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - }, - { // processed block 1 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{2, 3}, - received: map[int64]types.NodeID{2: "P1", 3: "P1"}, - height: 2, - }, - }, - { // processed block 2 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}}, - wantEvent: scFinishedEv{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{3}, - received: map[int64]types.NodeID{3: "P1"}, - height: 3, - }, - }, - }, - }, - { - name: "block verification failure", - steps: []scStep{ - { // failure processing block 1 - currentSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{}, - height: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var sc *scheduler - for i, step := range tt.steps { - // First step must always initialize the currentState as state. - if step.currentSc != nil { - sc = newTestScheduler(*step.currentSc) - } - if sc == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := sc.handle(step.args.event) - wantSc := newTestScheduler(*step.wantSc) - - t.Logf("step %d(%v): %s", i, step.args.event, sc) - checkSameScheduler(t, wantSc, sc) - - checkScResults(t, step.wantErr, err, step.wantEvent, nextEvent) - - // Next step may use the wantedState as their currentState. - sc = newTestScheduler(*step.wantSc) - } - }) - } -} diff --git a/internal/blocksync/v2/types.go b/internal/blocksync/v2/types.go deleted file mode 100644 index 7a73728e4..000000000 --- a/internal/blocksync/v2/types.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2 - -import ( - "github.com/Workiva/go-datastructures/queue" -) - -// Event is the type that can be added to the priority queue. -type Event queue.Item - -type priority interface { - Compare(other queue.Item) int - Priority() int -} - -type priorityLow struct{} -type priorityNormal struct{} -type priorityHigh struct{} - -func (p priorityLow) Priority() int { - return 1 -} - -func (p priorityNormal) Priority() int { - return 2 -} - -func (p priorityHigh) Priority() int { - return 3 -} - -func (p priorityLow) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityNormal) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityHigh) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -type noOpEvent struct { - priorityLow -} - -var noOp = noOpEvent{} diff --git a/node/node.go b/node/node.go index fc43aace4..820e0630a 100644 --- a/node/node.go +++ b/node/node.go @@ -640,7 +640,7 @@ func (n *nodeImpl) OnStart() error { } if n.config.Mode != config.ModeSeed { - if n.config.BlockSync.Version == config.BlockSyncV0 { + if n.config.BlockSync.Enable { if err := n.bcReactor.Start(); err != nil { return err } @@ -762,7 +762,7 @@ func (n *nodeImpl) OnStop() { if n.config.Mode != config.ModeSeed { // now stop the reactors - if n.config.BlockSync.Version == config.BlockSyncV0 { + if n.config.BlockSync.Enable { // Stop the real blockchain reactor separately since the switch uses the shim. if err := n.bcReactor.Stop(); err != nil { n.Logger.Error("failed to stop the blockchain reactor", "err", err) diff --git a/node/setup.go b/node/setup.go index 3f305125d..16aa715c9 100644 --- a/node/setup.go +++ b/node/setup.go @@ -3,7 +3,6 @@ package node import ( "bytes" "context" - "errors" "fmt" "math" "net" @@ -16,7 +15,6 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" - bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" @@ -295,40 +293,31 @@ func createBlockchainReactor( logger = logger.With("module", "blockchain") - switch cfg.BlockSync.Version { - case config.BlockSyncV0: - reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) + reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) + var ( + channels map[p2p.ChannelID]*p2p.Channel + peerUpdates *p2p.PeerUpdates + ) - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - reactor, err := bcv0.NewReactor( - logger, state.Copy(), blockExec, blockStore, csReactor, - channels[bcv0.BlockSyncChannel], peerUpdates, blockSync, - metrics, - ) - if err != nil { - return nil, nil, err - } - - return reactorShim, reactor, nil - - case config.BlockSyncV2: - return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") - - default: - return nil, nil, fmt.Errorf("unknown block sync version %s", cfg.BlockSync.Version) + if cfg.P2P.UseLegacy { + channels = getChannelsFromShim(reactorShim) + peerUpdates = reactorShim.PeerUpdates + } else { + channels = makeChannelsFromShims(router, bcv0.ChannelShims) + peerUpdates = peerManager.Subscribe() } + + reactor, err := bcv0.NewReactor( + logger, state.Copy(), blockExec, blockStore, csReactor, + channels[bcv0.BlockSyncChannel], peerUpdates, blockSync, + metrics, + ) + if err != nil { + return nil, nil, err + } + + return reactorShim, reactor, nil } func createConsensusReactor( @@ -676,17 +665,7 @@ func makeNodeInfo( txIndexerStatus = "on" } - var bcChannel byte - switch cfg.BlockSync.Version { - case config.BlockSyncV0: - bcChannel = byte(bcv0.BlockSyncChannel) - - case config.BlockSyncV2: - bcChannel = bcv2.BlockchainChannel - - default: - return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", cfg.BlockSync.Version) - } + bcChannel := byte(bcv0.BlockSyncChannel) nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 7e07febd5..6eb32e56f 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -35,6 +35,7 @@ perturb = ["restart"] perturb = ["disconnect"] seeds = ["seed01"] snapshot_interval = 5 +block_sync = "v0" [node.validator02] abci_protocol = "tcp" diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 3af7a9944..5a8407ab2 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -296,10 +296,9 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mempool.Version = node.Mempool } + cfg.BlockSync.Enable = true if node.BlockSync == "" { cfg.BlockSync.Enable = false - } else { - cfg.BlockSync.Version = node.BlockSync } switch node.StateSync { From f5b9c210ca1b2cf1c2303038b14a2588604a4af3 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 4 Oct 2021 18:37:18 -0400 Subject: [PATCH 065/174] consensus: wait until peerUpdates channel is closed to close remaining peers (#7058) The race occurred as a result of a goroutine launched by `processPeerUpdate` racing with the `OnStop` method. The `processPeerUpdates` goroutine deletes from the map as `OnStop` is reading from it. This change updates the `OnStop` method to wait for the peer updates channel to be done before closing the peers. It also copies the map contents to a new map so that it will not conflict with the view of the map that the goroutine created in `processPeerUpdate` sees. --- internal/consensus/reactor.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index ca3f5d353..22bd7576e 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -230,17 +230,15 @@ func (r *Reactor) OnStop() { } r.mtx.Lock() - peers := r.peers + // Close and wait for each of the peers to shutdown. + // This is safe to perform with the lock since none of the peers require the + // lock to complete any of the methods that the waitgroup is waiting on. + for _, state := range r.peers { + state.closer.Close() + state.broadcastWG.Wait() + } r.mtx.Unlock() - // wait for all spawned peer goroutines to gracefully exit - for _, ps := range peers { - ps.closer.Close() - } - for _, ps := range peers { - ps.broadcastWG.Wait() - } - // Close the StateChannel goroutine separately since it uses its own channel // to signal closure. close(r.stateCloseCh) From 03ad7d6f20d9fd8fe83d31db168f433480552e94 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 5 Oct 2021 09:40:32 -0400 Subject: [PATCH 066/174] p2p: delete legacy stack initial pass (#7035) A few notes: - this is not all the deletion that we can do, but this is the most "simple" case: it leaves in shims, and there's some trivial additional cleanup to the transport that can happen but that requires writing more code, and I wanted this to be easy to review above all else. - This should land *after* we cut the branch for 0.35, but I'm anticipating that to happen soon, and I wanted to run this through CI. --- CHANGELOG_PENDING.md | 7 + config/config.go | 6 - config/toml.go | 3 - docs/nodes/configuration.md | 3 - internal/p2p/base_reactor.go | 74 - internal/p2p/conn/secret_connection_test.go | 2 +- internal/p2p/mock/reactor.go | 23 - internal/p2p/peer.go | 371 ---- internal/p2p/peer_set.go | 149 -- internal/p2p/peer_set_test.go | 190 --- internal/p2p/peer_test.go | 239 --- internal/p2p/pex/addrbook.go | 948 ----------- internal/p2p/pex/addrbook_test.go | 777 --------- internal/p2p/pex/bench_test.go | 24 - internal/p2p/pex/errors.go | 11 - internal/p2p/pex/file.go | 83 - internal/p2p/pex/known_address.go | 141 -- internal/p2p/pex/params.go | 55 - internal/p2p/pex/pex_reactor.go | 862 ---------- internal/p2p/pex/pex_reactor_test.go | 680 -------- internal/p2p/pex/reactor.go | 16 + internal/p2p/router.go | 4 +- internal/p2p/shim.go | 303 +--- internal/p2p/shim_test.go | 207 --- internal/p2p/switch.go | 1064 ------------ internal/p2p/switch_test.go | 932 ---------- internal/p2p/test_util.go | 256 --- internal/p2p/transport.go | 9 - internal/rpc/core/consensus.go | 60 +- internal/rpc/core/env.go | 9 - internal/rpc/core/net.go | 115 +- internal/rpc/core/net_test.go | 89 - internal/rpc/core/routes.go | 2 - internal/rpc/core/status.go | 1 + node/node.go | 166 +- node/setup.go | 194 +-- rpc/client/local/local.go | 14 - rpc/client/mock/client.go | 14 - test/e2e/generator/generate.go | 91 +- test/e2e/generator/generate_test.go | 72 +- test/e2e/generator/main.go | 16 - test/e2e/networks/ci.toml | 1 - test/e2e/pkg/manifest.go | 3 - test/e2e/pkg/testnet.go | 2 - test/e2e/runner/setup.go | 2 - test/fuzz/p2p/addrbook/fuzz.go | 35 - test/fuzz/p2p/addrbook/fuzz_test.go | 33 - test/fuzz/p2p/addrbook/init-corpus/main.go | 59 - test/fuzz/p2p/addrbook/testdata/cases/empty | 0 test/fuzz/p2p/pex/fuzz_test.go | 33 - test/fuzz/p2p/pex/init-corpus/main.go | 84 - test/fuzz/p2p/pex/reactor_receive.go | 95 -- test/fuzz/p2p/pex/testdata/addrbook1 | 1705 ------------------- test/fuzz/p2p/pex/testdata/cases/empty | 0 54 files changed, 127 insertions(+), 10207 deletions(-) delete mode 100644 internal/p2p/base_reactor.go delete mode 100644 internal/p2p/mock/reactor.go delete mode 100644 internal/p2p/peer.go delete mode 100644 internal/p2p/peer_set.go delete mode 100644 internal/p2p/peer_set_test.go delete mode 100644 internal/p2p/peer_test.go delete mode 100644 internal/p2p/pex/addrbook.go delete mode 100644 internal/p2p/pex/addrbook_test.go delete mode 100644 internal/p2p/pex/bench_test.go delete mode 100644 internal/p2p/pex/file.go delete mode 100644 internal/p2p/pex/known_address.go delete mode 100644 internal/p2p/pex/params.go delete mode 100644 internal/p2p/pex/pex_reactor.go delete mode 100644 internal/p2p/pex/pex_reactor_test.go delete mode 100644 internal/p2p/shim_test.go delete mode 100644 internal/p2p/switch.go delete mode 100644 internal/p2p/switch_test.go delete mode 100644 internal/rpc/core/net_test.go delete mode 100644 test/fuzz/p2p/addrbook/fuzz.go delete mode 100644 test/fuzz/p2p/addrbook/fuzz_test.go delete mode 100644 test/fuzz/p2p/addrbook/init-corpus/main.go delete mode 100644 test/fuzz/p2p/addrbook/testdata/cases/empty delete mode 100644 test/fuzz/p2p/pex/fuzz_test.go delete mode 100644 test/fuzz/p2p/pex/init-corpus/main.go delete mode 100644 test/fuzz/p2p/pex/reactor_receive.go delete mode 100644 test/fuzz/p2p/pex/testdata/addrbook1 delete mode 100644 test/fuzz/p2p/pex/testdata/cases/empty diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 56efd763b..0a4c90f40 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -16,8 +16,15 @@ Special thanks to external contributors on this release: - P2P Protocol + - [p2p] \#7035 Remove legacy P2P routing implementation and + associated configuration (@tychoish) + - Go API + - [blocksync] \#7046 Remove v2 implementation of the blocksync + service and recactor, which was disabled in the previous release + (@tychoish) + - Blockchain Protocol ### FEATURES diff --git a/config/config.go b/config/config.go index 79ce56935..a393e6edc 100644 --- a/config/config.go +++ b/config/config.go @@ -709,11 +709,6 @@ type P2PConfig struct { //nolint: maligned // Force dial to fail TestDialFail bool `mapstructure:"test-dial-fail"` - // UseLegacy enables the "legacy" P2P implementation and - // disables the newer default implementation. This flag will - // be removed in a future release. - UseLegacy bool `mapstructure:"use-legacy"` - // Makes it possible to configure which queue backend the p2p // layer uses. Options are: "fifo", "priority" and "wdrr", // with the default being "priority". @@ -748,7 +743,6 @@ func DefaultP2PConfig() *P2PConfig { DialTimeout: 3 * time.Second, TestDialFail: false, QueueType: "priority", - UseLegacy: false, } } diff --git a/config/toml.go b/config/toml.go index 6f07d6537..ee4c30004 100644 --- a/config/toml.go +++ b/config/toml.go @@ -265,9 +265,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = {{ .P2P.UseLegacy }} - # Select the p2p internal queue queue-type = "{{ .P2P.QueueType }}" diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index 5695c1a28..0c11df6f7 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -221,9 +221,6 @@ pprof-laddr = "" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = false - # Select the p2p internal queue queue-type = "priority" diff --git a/internal/p2p/base_reactor.go b/internal/p2p/base_reactor.go deleted file mode 100644 index 09925caf8..000000000 --- a/internal/p2p/base_reactor.go +++ /dev/null @@ -1,74 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" -) - -// Reactor is responsible for handling incoming messages on one or more -// Channel. Switch calls GetChannels when reactor is added to it. When a new -// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called -// when the peer is stopped. Receive is called when a message is received on a -// channel associated with this reactor. -// -// Peer#Send or Peer#TrySend should be used to send the message to a peer. -type Reactor interface { - service.Service // Start, Stop - - // SetSwitch allows setting a switch. - SetSwitch(*Switch) - - // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure - // that each ID is unique across all the reactors added to the switch. - GetChannels() []*conn.ChannelDescriptor - - // InitPeer is called by the switch before the peer is started. Use it to - // initialize data for the peer (e.g. peer state). - // - // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start - // the peer. Do not store any data associated with the peer in the reactor - // itself unless you don't want to have a state, which is never cleaned up. - InitPeer(peer Peer) Peer - - // AddPeer is called by the switch after the peer is added and successfully - // started. Use it to start goroutines communicating with the peer. - AddPeer(peer Peer) - - // RemovePeer is called by the switch when the peer is stopped (due to error - // or other reason). - RemovePeer(peer Peer, reason interface{}) - - // Receive is called by the switch when msgBytes is received from the peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - // - // XXX: do not call any methods that can block or incur heavy processing. - // https://github.com/tendermint/tendermint/issues/2888 - Receive(chID byte, peer Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - service.BaseService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - BaseService: *service.NewBaseService(nil, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 62587c0da..84384011b 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -195,7 +195,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read, write string = "", writes[0] + var read, write = "", writes[0] var readCount = 0 for _, readChunk := range reads { read += readChunk diff --git a/internal/p2p/mock/reactor.go b/internal/p2p/mock/reactor.go deleted file mode 100644 index d634a8032..000000000 --- a/internal/p2p/mock/reactor.go +++ /dev/null @@ -1,23 +0,0 @@ -package mock - -import ( - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" -) - -type Reactor struct { - p2p.BaseReactor -} - -func NewReactor() *Reactor { - r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) - r.SetLogger(log.TestingLogger()) - return r -} - -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go deleted file mode 100644 index 709a1294a..000000000 --- a/internal/p2p/peer.go +++ /dev/null @@ -1,371 +0,0 @@ -package p2p - -import ( - "fmt" - "io" - "net" - "runtime/debug" - "time" - - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -//go:generate ../../scripts/mockery_generate.sh Peer - -const metricsTickerDuration = 10 * time.Second - -// Peer is an interface representing a peer connected on a reactor. -type Peer interface { - service.Service - FlushStop() - - ID() types.NodeID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection - RemoteAddr() net.Addr // remote address of the connection - - IsOutbound() bool // did we dial the peer - IsPersistent() bool // do we redial this peer when we disconnect - - CloseConn() error // close original connection - - NodeInfo() types.NodeInfo // peer's info - Status() tmconn.ConnectionStatus - SocketAddr() *NetAddress // actual address of the socket - - Send(byte, []byte) bool - TrySend(byte, []byte) bool - - Set(string, interface{}) - Get(string) interface{} -} - -//---------------------------------------------------------- - -// peerConn contains the raw connection and its config. -type peerConn struct { - outbound bool - persistent bool - conn Connection - ip net.IP // cached RemoteIP() -} - -func newPeerConn(outbound, persistent bool, conn Connection) peerConn { - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - } -} - -// Return the IP from the connection RemoteAddr -func (pc peerConn) RemoteIP() net.IP { - if pc.ip == nil { - pc.ip = pc.conn.RemoteEndpoint().IP - } - return pc.ip -} - -// peer implements Peer. -// -// Before using a peer, you will need to perform a handshake on connection. -type peer struct { - service.BaseService - - // raw peerConn and the multiplex connection - peerConn - - // peer's node info and the channel it knows about - // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo types.NodeInfo - channels []byte - reactors map[byte]Reactor - onPeerError func(Peer, interface{}) - - // User data - Data *cmap.CMap - - metrics *Metrics - metricsTicker *time.Ticker -} - -type PeerOption func(*peer) - -func newPeer( - nodeInfo types.NodeInfo, - pc peerConn, - reactorsByCh map[byte]Reactor, - onPeerError func(Peer, interface{}), - options ...PeerOption, -) *peer { - p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - reactors: reactorsByCh, - onPeerError: onPeerError, - Data: cmap.NewCMap(), - metricsTicker: time.NewTicker(metricsTickerDuration), - metrics: NopMetrics(), - } - - p.BaseService = *service.NewBaseService(nil, "Peer", p) - for _, option := range options { - option(p) - } - - return p -} - -// onError calls the peer error callback. -func (p *peer) onError(err interface{}) { - p.onPeerError(p, err) -} - -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID()) - } - - return fmt.Sprintf("Peer{%v %v in}", p.conn, p.ID()) -} - -//--------------------------------------------------- -// Implements service.Service - -// SetLogger implements BaseService. -func (p *peer) SetLogger(l log.Logger) { - p.Logger = l -} - -// OnStart implements BaseService. -func (p *peer) OnStart() error { - if err := p.BaseService.OnStart(); err != nil { - return err - } - - go p.processMessages() - go p.metricsReporter() - - return nil -} - -// processMessages processes messages received from the connection. -func (p *peer) processMessages() { - defer func() { - if r := recover(); r != nil { - p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack())) - p.onError(fmt.Errorf("panic during peer message processing: %v", r)) - } - }() - - for { - chID, msg, err := p.conn.ReceiveMessage() - if err != nil { - p.onError(err) - return - } - reactor, ok := p.reactors[byte(chID)] - if !ok { - p.onError(fmt.Errorf("unknown channel %v", chID)) - return - } - reactor.Receive(byte(chID), p, msg) - } -} - -// FlushStop mimics OnStop but additionally ensures that all successful -// .Send() calls will get flushed before closing the connection. -// NOTE: it is not safe to call this method more than once. -func (p *peer) FlushStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.FlushClose(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -// OnStop implements BaseService. -func (p *peer) OnStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.Close(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -//--------------------------------------------------- -// Implements Peer - -// ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() types.NodeID { - return p.nodeInfo.ID() -} - -// IsOutbound returns true if the connection is outbound, false otherwise. -func (p *peer) IsOutbound() bool { - return p.peerConn.outbound -} - -// IsPersistent returns true if the peer is persitent, false otherwise. -func (p *peer) IsPersistent() bool { - return p.peerConn.persistent -} - -// NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() types.NodeInfo { - return p.nodeInfo -} - -// SocketAddr returns the address of the socket. -// For outbound peers, it's the address dialed (after DNS resolution). -// For inbound peers, it's the address returned by the underlying connection -// (not what's reported in the peer's NodeInfo). -func (p *peer) SocketAddr() *NetAddress { - endpoint := p.peerConn.conn.RemoteEndpoint() - return &NetAddress{ - ID: p.ID(), - IP: endpoint.IP, - Port: endpoint.Port, - } -} - -// Status returns the peer's ConnectionStatus. -func (p *peer) Status() tmconn.ConnectionStatus { - return p.conn.Status() -} - -// Send msg bytes to the channel identified by chID byte. Returns false if the -// send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.SendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// TrySend msg bytes to the channel identified by chID byte. Immediately returns -// false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// Get the data for a given key. -func (p *peer) Get(key string) interface{} { - return p.Data.Get(key) -} - -// Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { - p.Data.Set(key, data) -} - -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { - for _, ch := range p.channels { - if ch == chID { - return true - } - } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) - return false -} - -// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all. -func (p *peer) CloseConn() error { - return p.peerConn.conn.Close() -} - -//--------------------------------------------------- -// methods only used for testing -// TODO: can we remove these? - -// CloseConn closes the underlying connection -func (pc *peerConn) CloseConn() { - pc.conn.Close() -} - -// RemoteAddr returns peer's remote network address. -func (p *peer) RemoteAddr() net.Addr { - endpoint := p.conn.RemoteEndpoint() - return &net.TCPAddr{ - IP: endpoint.IP, - Port: int(endpoint.Port), - } -} - -//--------------------------------------------------- - -func PeerMetrics(metrics *Metrics) PeerOption { - return func(p *peer) { - p.metrics = metrics - } -} - -func (p *peer) metricsReporter() { - for { - select { - case <-p.metricsTicker.C: - status := p.conn.Status() - var sendQueueSize float64 - for _, chStatus := range status.Channels { - sendQueueSize += float64(chStatus.SendQueueSize) - } - - p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) - case <-p.Quit(): - return - } - } -} diff --git a/internal/p2p/peer_set.go b/internal/p2p/peer_set.go deleted file mode 100644 index 8d4ad4939..000000000 --- a/internal/p2p/peer_set.go +++ /dev/null @@ -1,149 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key types.NodeID) bool - HasIP(ip net.IP) bool - Get(key types.NodeID) Peer - List() []Peer - Size() int -} - -//----------------------------------------------------------------------------- - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -type PeerSet struct { - mtx tmsync.Mutex - lookup map[types.NodeID]*peerSetItem - list []Peer -} - -type peerSetItem struct { - peer Peer - index int -} - -// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[types.NodeID]*peerSetItem), - list: make([]Peer, 0, 256), - } -} - -// Add adds the peer to the PeerSet. -// It returns an error carrying the reason, if the peer is already present. -func (ps *PeerSet) Add(peer Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.lookup[peer.ID()] != nil { - return ErrSwitchDuplicatePeerID{peer.ID()} - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.ID()] = &peerSetItem{peer, index} - return nil -} - -// Has returns true if the set contains the peer referred to by this -// peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey types.NodeID) bool { - ps.mtx.Lock() - _, ok := ps.lookup[peerKey] - ps.mtx.Unlock() - return ok -} - -// HasIP returns true if the set contains the peer referred to by this IP -// address, otherwise false. -func (ps *PeerSet) HasIP(peerIP net.IP) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { - return true - } - } - - return false -} - -// Get looks up a peer by the provided peerKey. Returns nil if peer is not -// found. -func (ps *PeerSet) Get(peerKey types.NodeID) Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } - return nil -} - -// Remove discards peer by its Key, if the peer was previously memoized. -// Returns true if the peer was removed, and false if it was not found. -// in the set. -func (ps *PeerSet) Remove(peer Peer) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - item := ps.lookup[peer.ID()] - if item == nil { - return false - } - - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return true - } - - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.ID()) - return true -} - -// Size returns the number of unique items in the peerSet. -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} diff --git a/internal/p2p/peer_set_test.go b/internal/p2p/peer_set_test.go deleted file mode 100644 index 3e2397d2d..000000000 --- a/internal/p2p/peer_set_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// mockPeer for testing the PeerSet -type mockPeer struct { - service.BaseService - ip net.IP - id types.NodeID -} - -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() types.NodeID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } - -// Returns a mock peer -func newMockPeer(ip net.IP) *mockPeer { - if ip == nil { - ip = net.IP{127, 0, 0, 1} - } - nodeKey := types.GenNodeKey() - return &mockPeer{ - ip: ip, - id: nodeKey.ID, - } -} - -func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - - peerSet := NewPeerSet() - - var peerList []Peer - for i := 0; i < 5; i++ { - p := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(p); err != nil { - t.Error(err) - } - peerList = append(peerList, p) - } - - n := len(peerList) - // 1. Test removing from the front - for i, peerAtFront := range peerList { - removed := peerSet.Remove(peerAtFront) - assert.True(t, removed) - wantSize := n - i - 1 - for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) - assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) - // Test the route of removing the now non-existent element - removed := peerSet.Remove(peerAtFront) - assert.False(t, removed) - } - } - - // 2. Next we are testing removing the peer at the end - // a) Replenish the peerSet - for _, peer := range peerList { - if err := peerSet.Add(peer); err != nil { - t.Error(err) - } - } - - // b) In reverse, remove each element - for i := n - 1; i >= 0; i-- { - peerAtEnd := peerList[i] - removed := peerSet.Remove(peerAtEnd) - assert.True(t, removed) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) - assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) - } -} - -func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - - peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { - peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(peer); err != nil { - t.Errorf("failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - removed := peerSet.Remove(peer) - assert.True(t, removed) - if peerSet.Has(peer.ID()) { - t.Errorf("failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("failed to remove peer and decrement size") - } - } -} - -func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - peer := newMockPeer(nil) - - n := 20 - errsChan := make(chan error) - // Add the same asynchronously to test the - // concurrent guarantees of our APIs, and - // our expectation in the end is that only - // one addition succeeded, but the rest are - // instances of ErrSwitchDuplicatePeer. - for i := 0; i < n; i++ { - go func() { - errsChan <- peerSet.Add(peer) - }() - } - - // Now collect and tally the results - errsTally := make(map[string]int) - for i := 0; i < n; i++ { - err := <-errsChan - - switch err.(type) { - case ErrSwitchDuplicatePeerID: - errsTally["duplicateID"]++ - default: - errsTally["other"]++ - } - } - - // Our next procedure is to ensure that only one addition - // succeeded and that the rest are each ErrSwitchDuplicatePeer. - wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] - assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") - - wantNilErrCount, gotNilErrCount := 1, errsTally["other"] - assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") -} - -func TestPeerSetGet(t *testing.T) { - t.Parallel() - - var ( - peerSet = NewPeerSet() - peer = newMockPeer(nil) - ) - - assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") - - if err := peerSet.Add(peer); err != nil { - t.Fatalf("Failed to add new peer: %v", err) - } - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - // Add them asynchronously to test the - // concurrent guarantees of our APIs. - wg.Add(1) - go func(i int) { - defer wg.Done() - have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) - }(i) - } - wg.Wait() -} diff --git a/internal/p2p/peer_test.go b/internal/p2p/peer_test.go deleted file mode 100644 index dfe7bc798..000000000 --- a/internal/p2p/peer_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - golog "log" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" -) - -func TestPeerBasic(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.IsRunning()) - assert.True(p.IsOutbound()) - assert.False(p.IsPersistent()) - p.persistent = true - assert.True(p.IsPersistent()) - assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String()) - assert.Equal(rp.ID(), p.ID()) -} - -func TestPeerSend(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - config := cfg - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.Send(testCh, []byte("Asylum"))) -} - -func createOutboundPeerAndPerformHandshake( - addr *NetAddress, - config *config.P2PConfig, - mConfig tmconn.MConnConfig, -) (*peer, error) { - chDescs := []*tmconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - pk := ed25519.GenPrivKey() - ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer") - transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{}) - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pc, err := testOutboundPeerConn(transport, addr, config, false, pk) - if err != nil { - return nil, err - } - peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk) - if err != nil { - return nil, err - } - - p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {}) - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p, nil -} - -func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func testOutboundPeerConn( - transport *MConnTransport, - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - var pc peerConn - conn, err := testDial(addr, config) - if err != nil { - return pc, fmt.Errorf("error creating peer: %w", err) - } - - pc, err = testPeerConn(transport, conn, true, persistent) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return pc, fmt.Errorf("%v: %w", cerr.Error(), err) - } - return pc, err - } - - return pc, nil -} - -type remotePeer struct { - PrivKey crypto.PrivKey - Config *config.P2PConfig - Network string - addr *NetAddress - channels bytes.HexBytes - listenAddr string - listener net.Listener -} - -func (rp *remotePeer) Addr() *NetAddress { - return rp.addr -} - -func (rp *remotePeer) ID() types.NodeID { - return types.NodeIDFromPubKey(rp.PrivKey.PubKey()) -} - -func (rp *remotePeer) Start() { - if rp.listenAddr == "" { - rp.listenAddr = "127.0.0.1:0" - } - - l, e := net.Listen("tcp", rp.listenAddr) // any available address - if e != nil { - golog.Fatalf("net.Listen tcp :0: %+v", e) - } - rp.listener = l - rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) - if rp.channels == nil { - rp.channels = []byte{testCh} - } - go rp.accept() -} - -func (rp *remotePeer) Stop() { - rp.listener.Close() -} - -func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conn, err := addr.DialTimeout(1 * time.Second) - if err != nil { - return nil, err - } - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - return nil, err - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - return nil, err - } - return conn, err -} - -func (rp *remotePeer) accept() { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conns := []net.Conn{} - - for { - conn, err := rp.listener.Accept() - if err != nil { - golog.Printf("Failed to accept conn: %+v", err) - for _, conn := range conns { - _ = conn.Close() - } - return - } - - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - golog.Printf("Failed to create a peer: %+v", err) - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - golog.Printf("Failed to handshake a peer: %+v", err) - } - - conns = append(conns, conn) - } -} - -func (rp *remotePeer) nodeInfo() types.NodeInfo { - ni := types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: rp.Addr().ID, - ListenAddr: rp.listener.Addr().String(), - Network: "testing", - Version: "1.2.3-rc0-deadbeef", - Channels: rp.channels, - Moniker: "remote_peer", - } - if rp.Network != "" { - ni.Network = rp.Network - } - return ni -} diff --git a/internal/p2p/pex/addrbook.go b/internal/p2p/pex/addrbook.go deleted file mode 100644 index 6c5f78663..000000000 --- a/internal/p2p/pex/addrbook.go +++ /dev/null @@ -1,948 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package pex - -import ( - "encoding/binary" - "fmt" - "hash" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/minio/highwayhash" - "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// AddrBook is an address book used for tracking peers -// so we can gossip about them to others and select -// peers to dial. -// TODO: break this up? -type AddrBook interface { - service.Service - - // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) - // Check if it is our address - OurAddress(*p2p.NetAddress) bool - - AddPrivateIDs([]string) - - // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) - - // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool - - // Do we need more peers? - NeedMoreAddrs() bool - // Is Address Book Empty? Answer should not depend on being in your own - // address book, or private peers - Empty() bool - - // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress - - // Mark address - MarkGood(types.NodeID) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list - // Add bad peers back to addrBook - ReinstateBadPeers() - - IsGood(*p2p.NetAddress) bool - IsBanned(*p2p.NetAddress) bool - - // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress - // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - - Size() int - - // Persist to disk - Save() -} - -var _ AddrBook = (*addrBook)(nil) - -// addrBook - concurrency safe peer address manager. -// Implements AddrBook. -type addrBook struct { - service.BaseService - - // accessed concurrently - mtx tmsync.Mutex - ourAddrs map[string]struct{} - privateIDs map[types.NodeID]struct{} - addrLookup map[types.NodeID]*knownAddress // new & old - badPeers map[types.NodeID]*knownAddress // blacklisted peers - bucketsOld []map[string]*knownAddress - bucketsNew []map[string]*knownAddress - nOld int - nNew int - - // immutable after creation - filePath string - key string // random prefix for bucket placement - routabilityStrict bool - hasher hash.Hash64 - - wg sync.WaitGroup -} - -func mustNewHasher() hash.Hash64 { - key := crypto.CRandBytes(highwayhash.Size) - hasher, err := highwayhash.New64(key) - if err != nil { - panic(err) - } - return hasher -} - -// NewAddrBook creates a new address book. -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { - am := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: filePath, - routabilityStrict: routabilityStrict, - } - am.init() - am.BaseService = *service.NewBaseService(nil, "AddrBook", am) - return am -} - -// Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() -func (a *addrBook) init() { - a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.bucketsNew { - a.bucketsNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.bucketsOld { - a.bucketsOld[i] = make(map[string]*knownAddress) - } - a.hasher = mustNewHasher() -} - -// OnStart implements Service. -func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } - a.loadFromFile(a.filePath) - - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. - a.wg.Add(1) - go a.saveRoutine() - - return nil -} - -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { - a.wg.Wait() -} - -func (a *addrBook) FilePath() string { - return a.filePath -} - -//------------------------------------------------------- - -// AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = struct{}{} -} - -// OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - _, ok := a.ourAddrs[addr.String()] - return ok -} - -func (a *addrBook) AddPrivateIDs(ids []string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, id := range ids { - a.privateIDs[types.NodeID(id)] = struct{}{} - } -} - -// AddAddress implements AddrBook -// Add address to a "new" bucket. If it's already in one, only add it probabilistically. -// Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addAddress(addr, src) -} - -// RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.removeAddress(addr) -} - -// IsGood returns true if peer was ever marked as good and haven't -// done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addrLookup[addr.ID].isOld() -} - -// IsBanned returns true if the peer is currently banned -func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { - a.mtx.Lock() - _, ok := a.badPeers[addr.ID] - a.mtx.Unlock() - - return ok -} - -// HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - return ka != nil -} - -// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. -func (a *addrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -// Empty implements AddrBook - returns true if there are no addresses in the address book. -// Does not count the peer appearing in its own address book, or private peers. -func (a *addrBook) Empty() bool { - return a.Size() == 0 -} - -// PickAddress implements AddrBook. It picks an address to connect to. -// The address is picked randomly from an old or new bucket according -// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) -// and determines how biased we are to pick an address from a new bucket. -// PickAddress returns nil if the AddrBook is empty or if we try to pick -// from an empty bucket. -// nolint:gosec // G404: Use of weak random number generator -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) - - // pick a random peer from a random bucket - var bucket map[string]*knownAddress - pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation - if (pickFromOldBucket && a.nOld == 0) || - (!pickFromOldBucket && a.nNew == 0) { - return nil - } - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))] - } else { - bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))] - } - } - // pick a random index and loop over the map to return that index - randIndex := mrand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - return nil -} - -// MarkGood implements AddrBook - it marks the peer as good and -// moves it into an "old" bucket. -func (a *addrBook) MarkGood(id types.NodeID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[id] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - if err := a.moveToOld(ka); err != nil { - a.Logger.Error("Error moving address to old", "err", err) - } - } -} - -// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markAttempt() -} - -// MarkBad implements AddrBook. Kicks address out from book, places -// the address in the badPeers pool. -func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.addBadPeer(addr, banTime) { - a.removeAddress(addr) - } -} - -// ReinstateBadPeers removes bad peers from ban list and places them into a new -// bucket. -func (a *addrBook) ReinstateBadPeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, ka := range a.badPeers { - if ka.isBanned() { - continue - } - - bucket, err := a.calcNewBucket(ka.Addr, ka.Src) - if err != nil { - a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", - "addr", ka.Addr, "err", err) - continue - } - - if err := a.addToNewBucket(ka, bucket); err != nil { - a.Logger.Error("Error adding peer to new bucket", "err", err) - } - delete(a.badPeers, ka.ID()) - - a.Logger.Info("Reinstated address", "addr", ka.Addr) - } -} - -// GetSelection implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, - // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) - i := 0 - for _, ka := range a.addrLookup { - allAddr[i] = ka.Addr - i++ - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - // nolint:gosec // G404: Use of weak random number generator - j := mrand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -func percentageOfNum(p, n int) int { - return int(math.Round((float64(p) / float64(100)) * float64(n))) -} - -// GetSelectionWithBias implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -// -// Each address is picked randomly from an old or new bucket according to the -// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to -// that range) and determines how biased we are to pick an address from a new -// bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // number of new addresses that, if possible, should be in the beginning of the selection - // if there are no enough old addrs, will choose new addr instead. - numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld) - selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd) - selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...) - return selection -} - -//------------------------------------------------ - -// Size returns the number of addresses in the book. -func (a *addrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.size() -} - -func (a *addrBook) size() int { - return a.nNew + a.nOld -} - -//---------------------------------------------------------- - -// Save persists the address book to disk. -func (a *addrBook) Save() { - a.saveToFile(a.filePath) // thread safe -} - -func (a *addrBook) saveRoutine() { - defer a.wg.Done() - - saveFileTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-saveFileTicker.C: - a.saveToFile(a.filePath) - case <-a.Quit(): - break out - } - } - saveFileTicker.Stop() - a.saveToFile(a.filePath) -} - -//---------------------------------------------------------- - -func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.bucketsNew[bucketIdx] - case bucketTypeOld: - return a.bucketsOld[bucketIdx] - default: - panic("Invalid bucket type") - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { - // Consistency check to ensure we don't add an already known address - if ka.isOld() { - return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return nil - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - a.Logger.Info("new bucket is full, expiring new") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Add it to addrLookup - a.addrLookup[ka.ID()] = ka - return nil -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeOld, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[ka.ID()] = ka - - return true -} - -func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) - } -} - -func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) -} - -//---------------------------------------------------------- - -func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -// adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { - if addr == nil || src == nil { - return ErrAddrBookNilAddr{addr, src} - } - - if err := addr.Valid(); err != nil { - return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} - } - - if _, ok := a.badPeers[addr.ID]; ok { - return ErrAddressBanned{addr} - } - - if _, ok := a.privateIDs[addr.ID]; ok { - return ErrAddrBookPrivate{addr} - } - - if _, ok := a.privateIDs[src.ID]; ok { - return ErrAddrBookPrivateSrc{src} - } - - // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. - if _, ok := a.ourAddrs[addr.String()]; ok { - return ErrAddrBookSelf{addr} - } - - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} - } - - ka := a.addrLookup[addr.ID] - if ka != nil { - // If its already old and the address ID's are the same, ignore it. - // Thereby avoiding issues with a node on the network attempting to change - // the IP of a known node ID. (Which could yield an eclipse attack on the node) - if ka.isOld() && ka.Addr.ID == addr.ID { - return nil - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return nil - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - // nolint:gosec // G404: Use of weak random number generator - if mrand.Int31n(factor) != 0 { - return nil - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket, err := a.calcNewBucket(addr, src) - if err != nil { - return err - } - return a.addToNewBucket(ka, bucket) -} - -func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { - var buckets []map[string]*knownAddress - switch bucketType { - case bucketTypeNew: - buckets = a.bucketsNew - case bucketTypeOld: - buckets = a.bucketsOld - default: - panic("unexpected bucketType") - } - total := 0 - for _, bucket := range buckets { - total += len(bucket) - } - addresses := make([]*knownAddress, 0, total) - for _, bucket := range buckets { - for _, ka := range bucket { - addresses = append(addresses, ka) - } - } - selection := make([]*p2p.NetAddress, 0, num) - chosenSet := make(map[string]bool, num) - rand := tmrand.NewRand() - rand.Shuffle(total, func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - for _, addr := range addresses { - if chosenSet[addr.Addr.String()] { - continue - } - chosenSet[addr.Addr.String()] = true - selection = append(selection, addr.Addr) - if len(selection) >= num { - return selection - } - } - return selection -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *addrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.bucketsNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. If the destination bucket is full, -// demote the oldest one to a "new" bucket. -// TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) error { - // Sanity check - if ka.isOld() { - a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return nil - } - if len(ka.Buckets) == 0 { - a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return nil - } - - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx, err := a.calcOldBucket(ka.Addr) - if err != nil { - return err - } - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room; move the oldest to a new bucket - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) - if err != nil { - return err - } - if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { - a.Logger.Error("Error adding peer to old bucket", "err", err) - } - - // Finally, add our ka to old bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } - return nil -} - -func (a *addrBook) removeAddress(addr *p2p.NetAddress) { - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) -} - -func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { - // check it exists in addrbook - ka := a.addrLookup[addr.ID] - // check address is not already there - if ka == nil { - return false - } - - if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { - // add to bad peer list - ka.ban(banTime) - a.badPeers[addr.ID] = ka - a.Logger.Info("Add address to blacklist", "addr", addr) - } - return true -} - -//--------------------------------------------------------------------- -// calculate bucket placements - -// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(a.groupKey(addr))...) - data1 = append(data1, []byte(a.groupKey(src))...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) - return result, nil -} - -// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) - return result, nil -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable" for an unroutable -// address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { - return groupKeyFor(na, a.routabilityStrict) -} - -func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { - if routabilityStrict && na.Local() { - return "local" - } - if routabilityStrict && !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return na.IP.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := na.IP[12:16] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC3964() { - ip := na.IP[2:6] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.OnionCatTor() { - // group is keyed off the first 4 bits of the actual onion key. - return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - ipv6Mask := net.CIDRMask(bits, 128) - return na.IP.Mask(ipv6Mask).String() -} - -func (a *addrBook) hash(b []byte) ([]byte, error) { - a.hasher.Reset() - a.hasher.Write(b) - return a.hasher.Sum(nil), nil -} diff --git a/internal/p2p/pex/addrbook_test.go b/internal/p2p/pex/addrbook_test.go deleted file mode 100644 index 3d21314ad..000000000 --- a/internal/p2p/pex/addrbook_test.go +++ /dev/null @@ -1,777 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math" - mrand "math/rand" - "net" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -// FIXME These tests should not rely on .(*addrBook) assertions - -func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - addr := book.PickAddress(50) - assert.Nil(t, addr, "expected no address") - - randAddrs := randNetAddressPairs(t, 1) - addrSrc := randAddrs[0] - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - - // pick an address when we only have new address - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(100) - assert.NotNil(t, addr, "expected an address") - - // pick an address when we only have old address - book.MarkGood(addrSrc.addr.ID) - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - - // in this case, nNew==0 but we biased 100% to new, so we return nil - addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") -} - -func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err := book.Start() - require.NoError(t, err) - - assert.True(t, book.Empty()) - - // 100 addresses - randAddrs := randNetAddressPairs(t, 100) - - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.Start() - require.NoError(t, err) - - assert.Equal(t, 100, book.Size()) -} - -func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - err := book.AddAddress(addr, src) - require.NoError(t, err) - - ka := book.HasAddress(addr) - assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - } -} - -func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr.ID) - } - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection could not be bigger than the book") - } - - selection = book.GetSelectionWithBias(30) - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection with bias could not be bigger than the book") - } - - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") -} - -func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - - book.SetLogger(log.TestingLogger()) - - randAddrs := randNetAddressPairs(t, 100) - - differentSrc := randIPv4Address(t) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, differentSrc) // different src - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) -} - -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress -} - -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) - for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} - } - return randAddrs -} - -func randIPv4Address(t *testing.T) *p2p.NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - mrand.Intn(254)+1, - mrand.Intn(255), - mrand.Intn(255), - mrand.Intn(255), - ) - port := mrand.Intn(65535-1) + 1 - id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) - idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port)) - addr, err := types.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") - if addr.Routable() { - return addr - } - } -} - -func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - assert.Equal(t, 1, book.Size()) - - book.RemoveAddress(addr) - assert.Equal(t, 0, book.Size()) - - nonExistingAddr := randIPv4Address(t) - book.RemoveAddress(nonExistingAddr) - assert.Equal(t, 0, book.Size()) -} - -func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { - // create a book with 10 addresses, 1 good/old and 9 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book) -} - -func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) { - // create a book with 10 addresses, 9 good/old and 1 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book) -} - -func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.Nil(t, addrs) -} - -func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - assert.Empty(t, book.GetSelection()) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.Equal(t, 1, len(book.GetSelection())) - assert.Equal(t, addr, book.GetSelection()[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection := book.GetSelection() - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Errorf("selection %v could not be bigger than the book", selection) - } -} - -func TestAddrBookGetSelectionWithBias(t *testing.T) { - const biasTowardsNewAddrs = 30 - - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - selection := book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Empty(t, selection) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) - assert.Equal(t, addr, selection[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Fatalf("selection %v could not be bigger than the book", selection) - } - - // 4) mark 80% of the addresses as good - randAddrsLen := len(randAddrs) - for i, addrSrc := range randAddrs { - if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr.ID) - } - } - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - - // check that ~70% of addresses returned are good - good := 0 - for _, addr := range selection { - if book.IsGood(addr) { - good++ - } - } - - got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs - - // compute some slack to protect against small differences due to rounding: - slack := int(math.Round(float64(100) / float64(len(selection)))) - if got > expected+slack { - t.Fatalf( - "got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } - if got < expected-slack { - t.Fatalf( - "got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } -} - -func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.True(t, book.HasAddress(addr)) - - book.RemoveAddress(addr) - - assert.False(t, book.HasAddress(addr)) -} - -func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { - t.Helper() - addrs := make([]*p2p.NetAddress, numAddrs) - for i := 0; i < numAddrs; i++ { - addrs[i] = randIPv4Address(t) - } - - private := make([]string, numAddrs) - for i, addr := range addrs { - private[i] = string(addr.ID) - } - return addrs, private -} - -func TestBanBadPeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - _ = book.AddAddress(addr, addr) - - book.MarkBad(addr, 1*time.Second) - // addr should not reachable - assert.False(t, book.HasAddress(addr)) - assert.True(t, book.IsBanned(addr)) - - err := book.AddAddress(addr, addr) - // book should not add address from the blacklist - assert.Error(t, err) - - time.Sleep(1 * time.Second) - book.ReinstateBadPeers() - // address should be reinstated in the new bucket - assert.EqualValues(t, 1, book.Size()) - assert.True(t, book.HasAddress(addr)) - assert.False(t, book.IsGood(addr)) -} - -func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - // Check that empty book is empty - require.True(t, book.Empty()) - // Check that book with our address is empty - book.AddOurAddress(randIPv4Address(t)) - require.True(t, book.Empty()) - // Check that book with private addrs is empty - _, privateIds := testCreatePrivateAddrs(t, 5) - book.AddPrivateIDs(privateIds) - require.True(t, book.Empty()) - - // Check that book with address is not empty - err := book.AddAddress(randIPv4Address(t), randIPv4Address(t)) - require.NoError(t, err) - require.False(t, book.Empty()) -} - -func TestPrivatePeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addrs, private := testCreatePrivateAddrs(t, 10) - book.AddPrivateIDs(private) - - // private addrs must not be added - for _, addr := range addrs { - err := book.AddAddress(addr, addr) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivate) - assert.True(t, ok) - } - } - - // addrs coming from private peers must not be added - err := book.AddAddress(randIPv4Address(t), addrs[0]) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivateSrc) - assert.True(t, ok) - } -} - -func testAddrBookAddressSelection(t *testing.T, bookSize int) { - // generate all combinations of old (m) and new addresses - for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { - nBookNew := bookSize - nBookOld - dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) - - // create book and get selection - book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) - nAddrs := len(addrs) - assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr) - - // check there's no nil addresses - for _, addr := range addrs { - if addr == nil { - t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs) - } - } - - // XXX: shadowing - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - - // Given: - // n - num new addrs, m - num old addrs - // k - num new addrs expected in the beginning (based on bias %) - // i=min(n, max(k,r-m)), aka expNew - // j=min(m, r-i), aka expOld - // - // We expect this layout: - // indices: 0...i-1 i...i+j-1 - // addresses: N0..Ni-1 O0..Oj-1 - // - // There is at least one partition and at most three. - var ( - k = percentageOfNum(biasToSelectNewPeers, nAddrs) - expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld)) - expOld = tmmath.MinInt(nOld, nAddrs-expNew) - ) - - // Verify that the number of old and new addresses are as expected - if nNew != expNew { - t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew) - } - if nOld != expOld { - t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld) - } - - // Verify that the order of addresses is as expected - // Get the sequence types and lengths of the selection - seqLens, seqTypes, err := analyseSelectionLayout(book, addrs) - assert.NoError(t, err, "%s", dbgStr) - - // Build a list with the expected lengths of partitions and another with the expected types, e.g.: - // expSeqLens = [10, 22], expSeqTypes = [1, 2] - // means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses. - var expSeqLens []int - var expSeqTypes []int - - switch { - case expOld == 0: // all new addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{1} - case expNew == 0: // all old addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{2} - case nAddrs-expNew-expOld == 0: // new addresses, old addresses - expSeqLens = []int{expNew, expOld} - expSeqTypes = []int{1, 2} - } - - assert.Equal(t, expSeqLens, seqLens, - "%s - expected sequence lengths of old/new %v, got %v", - dbgStr, expSeqLens, seqLens) - assert.Equal(t, expSeqTypes, seqTypes, - "%s - expected sequence types (1-new, 2-old) was %v, got %v", - dbgStr, expSeqTypes, seqTypes) - } -} - -func TestMultipleAddrBookAddressSelection(t *testing.T) { - // test books with smaller size, < N - const N = 32 - for bookSize := 1; bookSize < N; bookSize++ { - testAddrBookAddressSelection(t, bookSize) - } - - // Test for two books with sizes from following ranges - ranges := [...][]int{{33, 100}, {100, 175}} - bookSizes := make([]int, 0, len(ranges)) - for _, r := range ranges { - bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0]) - } - t.Logf("Testing address selection for the following book sizes %v\n", bookSizes) - for _, bookSize := range bookSizes { - testAddrBookAddressSelection(t, bookSize) - } -} - -func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // This test creates adds a peer to the address book and marks it good - // It then attempts to override the peer's IP, by adding a peer with the same ID - // but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP" - peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" - peerRealIP := "1.1.1.1:26656" - peerOverrideAttemptIP := "2.2.2.2:26656" - SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" - - // There is a chance that AddAddress will ignore the new peer its given. - // So we repeat trying to override the peer several times, - // to ensure we aren't in a case that got probabilistically ignored - numOverrideAttempts := 10 - - peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP) - require.Nil(t, err) - - peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) - require.Nil(t, err) - - src, err := types.NewNetAddressString(SrcAddr) - require.Nil(t, err) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - book.MarkAttempt(peerRealAddr) - book.MarkGood(peerRealAddr.ID) - - // Double check that adding a peer again doesn't error - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - - // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) - // This should just be ignored, and not error. - for i := 0; i < numOverrideAttempts; i++ { - err = book.AddAddress(peerOverrideAttemptAddr, src) - require.Nil(t, err) - } - // Now check that the IP was not overridden. - // This is done by sampling several peers from addr book - // and ensuring they all have the correct IP. - // In the expected functionality, this test should only have 1 Peer, hence will pass. - for i := 0; i < numOverrideAttempts; i++ { - selection := book.GetSelection() - for _, addr := range selection { - require.Equal(t, addr.IP, peerRealAddr.IP) - } - } -} - -func TestAddrBookGroupKey(t *testing.T) { - // non-strict routability - testCases := []struct { - name string - ip string - expKey string - }{ - // IPv4 normal. - {"ipv4 normal class a", "12.1.2.3", "12.1.0.0"}, - {"ipv4 normal class b", "173.1.2.3", "173.1.0.0"}, - {"ipv4 normal class c", "196.1.2.3", "196.1.0.0"}, - - // IPv6/IPv4 translations. - {"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"}, - {"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"}, - {"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"}, - {"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"}, - - // Tor. - {"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"}, - {"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"}, - {"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"}, - - // IPv6 normal. - {"ipv6 normal", "2602:100::1", "2602:100::"}, - {"ipv6 normal 2", "2602:0100::1234", "2602:100::"}, - {"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"}, - {"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false) - assert.Equal(t, tc.expKey, key, "#%d", i) - } - - // strict routability - testCases = []struct { - name string - ip string - expKey string - }{ - // Local addresses. - {"ipv4 localhost", "127.0.0.1", "local"}, - {"ipv6 localhost", "::1", "local"}, - {"ipv4 zero", "0.0.0.0", "local"}, - {"ipv4 first octet zero", "0.1.2.3", "local"}, - - // Unroutable addresses. - {"ipv4 invalid bcast", "255.255.255.255", "unroutable"}, - {"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"}, - {"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"}, - {"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"}, - {"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"}, - {"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"}, - {"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"}, - {"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"}, - {"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true) - assert.Equal(t, tc.expKey, key, "#%d", i) - } -} - -func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - assert.Equal(t, m, nOld, "old addresses") - assert.Equal(t, n, nNew, "new addresses") -} - -func createTempFileName(t *testing.T, prefix string) string { - t.Helper() - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - - fname := f.Name() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = os.Remove(fname) }) - - return fname -} - -func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - t.Helper() - fname = createTempFileName(t, "addrbook_test") - - book = NewAddrBook(fname, true).(*addrBook) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - randAddrs := randNetAddressPairs(t, nOld) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - book.MarkGood(addr.addr.ID) - } - - randAddrs = randNetAddressPairs(t, nNew) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - } - - return -} - -func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) { - for _, addr := range addrs { - if book.IsGood(addr) { - nOld++ - } else { - nNew++ - } - } - return -} - -// Analyze the layout of the selection specified by 'addrs' -// Returns: -// - seqLens - the lengths of the sequences of addresses of same type -// - seqTypes - the types of sequences in selection -func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) { - // address types are: 0 - nil, 1 - new, 2 - old - var ( - prevType = 0 - currentSeqLen = 0 - ) - - for _, addr := range addrs { - addrType := 0 - if book.IsGood(addr) { - addrType = 2 - } else { - addrType = 1 - } - if addrType != prevType && prevType != 0 { - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - currentSeqLen = 0 - } - currentSeqLen++ - prevType = addrType - } - - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - - return -} diff --git a/internal/p2p/pex/bench_test.go b/internal/p2p/pex/bench_test.go deleted file mode 100644 index 37019f60a..000000000 --- a/internal/p2p/pex/bench_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package pex - -import ( - "testing" - - "github.com/tendermint/tendermint/types" -) - -func BenchmarkAddrBook_hash(b *testing.B) { - book := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: "", - routabilityStrict: true, - } - book.init() - msg := []byte(`foobar`) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = book.hash(msg) - } -} diff --git a/internal/p2p/pex/errors.go b/internal/p2p/pex/errors.go index 275e71bf9..4d41cce07 100644 --- a/internal/p2p/pex/errors.go +++ b/internal/p2p/pex/errors.go @@ -15,17 +15,6 @@ func (err ErrAddrBookNonRoutable) Error() string { return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) } -type errAddrBookOldAddressNewBucket struct { - Addr *p2p.NetAddress - BucketID int -} - -func (err errAddrBookOldAddressNewBucket) Error() string { - return fmt.Sprintf("failed consistency check!"+ - " Cannot add pre-existing address %v into new bucket %v", - err.Addr, err.BucketID) -} - type ErrAddrBookSelf struct { Addr *p2p.NetAddress } diff --git a/internal/p2p/pex/file.go b/internal/p2p/pex/file.go deleted file mode 100644 index ce65f7d4d..000000000 --- a/internal/p2p/pex/file.go +++ /dev/null @@ -1,83 +0,0 @@ -package pex - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/tendermint/tendermint/internal/libs/tempfile" -) - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string `json:"key"` - Addrs []*knownAddress `json:"addrs"` -} - -func (a *addrBook) saveToFile(filePath string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Saving AddrBook to file", "size", a.size()) - - addrs := make([]*knownAddress, 0, len(a.addrLookup)) - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "err", err) - return - } - err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644) - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) - } -} - -// Returns false if file does not exist. -// cmn.Panics if file is corrupt. -func (a *addrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) - } - defer r.Close() - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .bucketsNew & .bucketsOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.ID()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} diff --git a/internal/p2p/pex/known_address.go b/internal/p2p/pex/known_address.go deleted file mode 100644 index 2a2ebe038..000000000 --- a/internal/p2p/pex/known_address.go +++ /dev/null @@ -1,141 +0,0 @@ -package pex - -import ( - "time" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// knownAddress tracks information about a known network address -// that is used to determine how viable an address is. -type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Buckets []int `json:"buckets"` - Attempts int32 `json:"attempts"` - BucketType byte `json:"bucket_type"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - LastBanTime time.Time `json:"last_ban_time"` -} - -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) ID() types.NodeID { - return ka.Addr.ID -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts++ -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) ban(banTime time.Duration) { - if ka.LastBanTime.Before(time.Now().Add(banTime)) { - ka.LastBanTime = time.Now().Add(banTime) - } -} - -func (ka *knownAddress) isBanned() bool { - return ka.LastBanTime.After(time.Now()) -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - // TODO refactor to return error? - // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - // TODO refactor to return error? - // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. - -*/ -func (ka *knownAddress) isBad() bool { - // Is Old --> good - if ka.BucketType == bucketTypeOld { - return false - } - - // Has been attempted in the last minute --> good - if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // TODO: From the future? - - // Too old? - // TODO: should be a timestamp of last seen, not just last attempt - if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/internal/p2p/pex/params.go b/internal/p2p/pex/params.go deleted file mode 100644 index 29b4d45ab..000000000 --- a/internal/p2p/pex/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package pex - -import "time" - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which a source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 7 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 // ? - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" - maxGetSelection = 250 -) diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go deleted file mode 100644 index 9eb58c054..000000000 --- a/internal/p2p/pex/pex_reactor.go +++ /dev/null @@ -1,862 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -type Peer = p2p.Peer - -const ( - // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) - - // over-estimate of max NetAddress size - // hexID (40) + IP (16) + Port (2) + Name (100) ... - // NOTE: dont use massive DNS name .. - maxAddressSize = 256 - - // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response - maxMsgSize = maxAddressSize * maxGetSelection - - // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - - // Seed/Crawler constants - - // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. - minTimeBetweenCrawls = 2 * time.Minute - - // check some peers every this - crawlPeerPeriod = 30 * time.Second - - maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) - - // if node connects to seed, it does not have any trusted peers. - // Especially in the beginning, node should have more trusted peers than - // untrusted. - biasToSelectNewPeers = 30 // 70 to select good peers - - // if a peer is marked bad, it will be banned for at least this time period - defaultBanTime = 24 * time.Hour -) - -type errMaxAttemptsToDial struct { -} - -func (e errMaxAttemptsToDial) Error() string { - return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) -} - -type errTooEarlyToDial struct { - backoffDuration time.Duration - lastDialed time.Time -} - -func (e errTooEarlyToDial) Error() string { - return fmt.Sprintf( - "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", - e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) -} - -// Reactor handles PEX (peer exchange) and ensures that an -// adequate number of peers are connected to the switch. -// -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. -// -// ## Preventing abuse -// -// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. -// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. -type Reactor struct { - p2p.BaseReactor - - book AddrBook - config *ReactorConfig - ensurePeersPeriod time.Duration // TODO: should go in the config - - // maps to prevent abuse - requestsSent *cmap.CMap // ID->struct{}: unanswered send requests - lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us - - seedAddrs []*p2p.NetAddress - - attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} - - // seed/crawled mode fields - crawlPeerInfos map[types.NodeID]crawlPeerInfo -} - -func (r *Reactor) minReceiveRequestInterval() time.Duration { - // NOTE: must be less than ensurePeersPeriod, otherwise we'll request - // peers too quickly from others and they'll think we're bad! - return r.ensurePeersPeriod / 3 -} - -// ReactorConfig holds reactor specific configuration data. -type ReactorConfig struct { - // Seed/Crawler mode - SeedMode bool - - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - SeedDisconnectWaitPeriod time.Duration - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration - - // Seeds is a list of addresses reactor may use - // if it can't connect to peers in the addrbook. - Seeds []string -} - -type _attemptsToDial struct { - number int - lastDialed time.Time -} - -// NewReactor creates new PEX reactor. -func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { - r := &Reactor{ - book: b, - config: config, - ensurePeersPeriod: defaultEnsurePeersPeriod, - requestsSent: cmap.NewCMap(), - lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo), - } - r.BaseReactor = *p2p.NewBaseReactor("PEX", r) - return r -} - -// OnStart implements BaseService -func (r *Reactor) OnStart() error { - err := r.book.Start() - if err != nil && err != service.ErrAlreadyStarted { - return err - } - - numOnline, seedAddrs, err := r.checkSeeds() - if err != nil { - return err - } else if numOnline == 0 && r.book.Empty() { - return errors.New("address book is empty and couldn't resolve any seed nodes") - } - - r.seedAddrs = seedAddrs - - // Check if this node should run - // in seed/crawler mode - if r.config.SeedMode { - go r.crawlPeersRoutine() - } else { - go r.ensurePeersRoutine() - } - return nil -} - -// OnStop implements BaseService -func (r *Reactor) OnStop() { - if err := r.book.Stop(); err != nil { - r.Logger.Error("Error stopping address book", "err", err) - } -} - -// GetChannels implements Reactor -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - RecvMessageCapacity: maxMsgSize, - - MaxSendBytes: 200, - }, - } -} - -// AddPeer implements Reactor by adding peer to the address book (if inbound) -// or by requesting more addresses (if outbound). -func (r *Reactor) AddPeer(p Peer) { - if p.IsOutbound() { - // For outbound peers, the address is already in the books - - // either via DialPeersAsync or r.Receive. - // Ask it for more peers if we need. - if r.book.NeedMoreAddrs() { - r.RequestAddrs(p) - } - } else { - // inbound peer is its own source - addr, err := p.NodeInfo().NetAddress() - if err != nil { - r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) - return - } - - // Make it explicit that addr and src are the same for an inbound peer. - src := addr - - // add to book. dont RequestAddrs right away because - // we don't trust inbound as much - let ensurePeersRoutine handle it. - err = r.book.AddAddress(addr, src) - r.logErrAddrBook(err) - } -} - -// RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - -func (r *Reactor) logErrAddrBook(err error) { - if err != nil { - switch err.(type) { - case ErrAddrBookNilAddr: - r.Logger.Error("Failed to add new address", "err", err) - default: - // non-routable, self, full book, private, etc. - r.Logger.Debug("Failed to add new address", "err", err) - } - } -} - -// Receive implements Reactor by handling incoming PEX messages. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *tmp2p.PexRequest: - - // NOTE: this is a prime candidate for amplification attacks, - // so it's important we - // 1) restrict how frequently peers can request - // 2) limit the output size - - // If we're a seed and this is an inbound peer, - // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v != nil { - // FlushStop/StopPeer are already - // running in a go-routine. - return - } - r.lastReceivedRequests.Set(id, time.Now()) - - // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - go func() { - // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) - }() - - } else { - // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - r.SendAddrs(src, r.book.GetSelection()) - } - - case *tmp2p.PexResponse: - // If we asked for addresses, add them to the book - addrs, err := NetAddressesFromProto(msg.Addresses) - if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - err = r.ReceiveAddrs(addrs, src) - if err != nil { - r.Switch.StopPeerForError(src, err) - if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - } - return - } - - default: - r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg)) - } -} - -// enforces a minimum amount of time between requests -func (r *Reactor) receiveRequest(src Peer) error { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v == nil { - // initialize with empty time - lastReceived := time.Time{} - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - lastReceived := v.(time.Time) - if lastReceived.Equal(time.Time{}) { - // first time gets a free pass. then we start tracking the time - lastReceived = time.Now() - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - now := time.Now() - minInterval := r.minReceiveRequestInterval() - if now.Sub(lastReceived) < minInterval { - return fmt.Errorf( - "peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) - } - r.lastReceivedRequests.Set(id, now) - return nil -} - -// RequestAddrs asks peer for more addresses if we do not already have a -// request out for this peer. -func (r *Reactor) RequestAddrs(p Peer) { - id := string(p.ID()) - if _, exists := r.requestsSent.GetOrSet(id, struct{}{}); exists { - return - } - r.Logger.Debug("Request addrs", "from", p) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) -} - -// ReceiveAddrs adds the given addrs to the addrbook if theres an open -// request for this peer and deletes the open request. -// If there's no open request for the src peer, it returns an error. -func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { - id := string(src.ID()) - if !r.requestsSent.Has(id) { - return ErrUnsolicitedList - } - r.requestsSent.Delete(id) - - srcAddr, err := src.NodeInfo().NetAddress() - if err != nil { - return err - } - - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - - for _, netAddr := range addrs { - // NOTE: we check netAddr validity and routability in book#AddAddress. - err = r.book.AddAddress(netAddr, srcAddr) - if err != nil { - r.logErrAddrBook(err) - // XXX: should we be strict about incoming data and disconnect from a - // peer here too? - continue - } - - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr) - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(netAddr) - } - } - - return nil -} - -// SendAddrs sends addrs to the peer. -func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)})) -} - -// SetEnsurePeersPeriod sets period to ensure peers connected. -func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { - r.ensurePeersPeriod = d -} - -// Ensures that sufficient peers are connected. (continuous) -func (r *Reactor) ensurePeersRoutine() { - var ( - seed = tmrand.NewRand() - jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) - ) - - // Randomize first round of communication to avoid thundering herd. - // If no peers are present directly start connecting so we guarantee swift - // setup with the help of configured seeds. - if r.nodeHasSomePeersOrDialingAny() { - time.Sleep(time.Duration(jitter)) - } - - // fire once immediately. - // ensures we dial the seeds right away if the book is empty - r.ensurePeers() - - // fire periodically - ticker := time.NewTicker(r.ensurePeersPeriod) - for { - select { - case <-ticker.C: - r.ensurePeers() - case <-r.Quit(): - ticker.Stop() - return - } - } -} - -// ensurePeers ensures that sufficient peers are connected. (once) -// -// heuristic that we haven't perfected yet, or, perhaps is manually edited by -// the node operator. It should not be used to compute what addresses are -// already connected or not. -func (r *Reactor) ensurePeers() { - var ( - out, in, dial = r.Switch.NumPeers() - numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) - ) - r.Logger.Info( - "Ensure peers", - "numOutPeers", out, - "numInPeers", in, - "numDialing", dial, - "numToDial", numToDial, - ) - - if numToDial <= 0 { - return - } - - // bias to prefer more vetted peers when we have fewer connections. - // not perfect, but somewhate ensures that we prioritize connecting to more-vetted - // NOTE: range here is [10, 90]. Too high ? - newBias := tmmath.MinInt(out, 8)*10 + 10 - - toDial := make(map[types.NodeID]*p2p.NetAddress) - // Try maxAttempts times to pick numToDial addresses to dial - maxAttempts := numToDial * 3 - - for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { - try := r.book.PickAddress(newBias) - if try == nil { - continue - } - if _, selected := toDial[try.ID]; selected { - continue - } - if r.Switch.IsDialingOrExistingAddress(try) { - continue - } - // TODO: consider moving some checks from toDial into here - // so we don't even consider dialing peers that we want to wait - // before dialing again, or have dialed too many times already - r.Logger.Info("Will dial address", "addr", try) - toDial[try.ID] = try - } - - // Dial picked addresses - for _, addr := range toDial { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(addr) - } - - if r.book.NeedMoreAddrs() { - // Check if banned nodes can be reinstated - r.book.ReinstateBadPeers() - } - - if r.book.NeedMoreAddrs() { - - // 1) Pick a random peer and ask for more. - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - rand := tmrand.NewRand() - peer := peers[rand.Int()%peersCount] - r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) - r.RequestAddrs(peer) - } - - // 2) Dial seeds if we are not dialing anyone. - // This is done in addition to asking a peer for addresses to work-around - // peers not participating in PEX. - if len(toDial) == 0 { - r.Logger.Info("No addresses to dial. Falling back to seeds") - r.dialSeeds() - } - } -} - -func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { - _attempts, ok := r.attemptsToDial.Load(addr.DialString()) - if !ok { - return - } - atd := _attempts.(_attemptsToDial) - return atd.number, atd.lastDialed -} - -func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { - attempts, lastDialed := r.dialAttemptsInfo(addr) - if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - r.book.MarkBad(addr, defaultBanTime) - return errMaxAttemptsToDial{} - } - - // exponential backoff if it's not our first attempt to dial given address - if attempts > 0 { - rand := tmrand.NewRand() - jitter := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitter + ((1 << uint(attempts)) * time.Second) - backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) - sinceLastDialed := time.Since(lastDialed) - if sinceLastDialed < backoffDuration { - return errTooEarlyToDial{backoffDuration, lastDialed} - } - } - - err := r.Switch.DialPeerWithAddress(addr) - if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return err - } - - markAddrInBookBasedOnErr(addr, r.book, err) - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr - r.attemptsToDial.Delete(addr.DialString()) - default: - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } - return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) - } - - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) - return nil -} - -// maxBackoffDurationForPeer caps the backoff duration for persistent peers. -func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration { - if r.config.PersistentPeersMaxDialPeriod > 0 && - planned > r.config.PersistentPeersMaxDialPeriod && - r.Switch.IsPeerPersistent(addr) { - return r.config.PersistentPeersMaxDialPeriod - } - return planned -} - -// checkSeeds checks that addresses are well formed. -// Returns number of seeds we can connect to, along with all seeds addrs. -// return err if user provided any badly formatted seed addresses. -// Doesn't error if the seed node can't be reached. -// numOnline returns -1 if no seed nodes were in the initial configuration. -func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return -1, nil, nil - } - netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds) - numOnline = lSeeds - len(errs) - for _, err := range errs { - switch e := err.(type) { - case types.ErrNetAddressLookup: - r.Logger.Error("Connecting to seed failed", "err", e) - default: - return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) - } - } - return numOnline, netAddrs, nil -} - -// randomly dial seeds until we connect to one or exhaust them -func (r *Reactor) dialSeeds() { - rand := tmrand.NewRand() - perm := rand.Perm(len(r.seedAddrs)) - // perm := r.Switch.rng.Perm(lSeeds) - for _, i := range perm { - // dial a random seed - seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr) - - switch err.(type) { - case nil, p2p.ErrCurrentlyDialingOrExistingAddress: - return - } - r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) - } - // do not write error message if there were no seeds specified in config - if len(r.seedAddrs) > 0 { - r.Switch.Logger.Error("Couldn't connect to any seeds") - } -} - -// AttemptsToDial returns the number of attempts to dial specific address. It -// returns 0 if never attempted or successfully connected. -func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { - lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) - if attempted { - return lAttempts.(_attemptsToDial).number - } - return 0 -} - -//---------------------------------------------------------- - -// Explores the network searching for more peers. (continuous) -// Seed/Crawler Mode causes this node to quickly disconnect -// from peers, except other seed nodes. -func (r *Reactor) crawlPeersRoutine() { - // If we have any seed nodes, consult them first - if len(r.seedAddrs) > 0 { - r.dialSeeds() - } else { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) - } - - // Fire periodically - ticker := time.NewTicker(crawlPeerPeriod) - - for { - select { - case <-ticker.C: - r.attemptDisconnects() - r.crawlPeers(r.book.GetSelection()) - r.cleanupCrawlPeerInfos() - case <-r.Quit(): - return - } - } -} - -// nodeHasSomePeersOrDialingAny returns true if the node is connected to some -// peers or dialing them currently. -func (r *Reactor) nodeHasSomePeersOrDialingAny() bool { - out, in, dial := r.Switch.NumPeers() - return out+in+dial > 0 -} - -// crawlPeerInfo handles temporary data needed for the network crawling -// performed during seed/crawler mode. -type crawlPeerInfo struct { - Addr *p2p.NetAddress `json:"addr"` - // The last time we crawled the peer or attempted to do so. - LastCrawled time.Time `json:"last_crawled"` -} - -// crawlPeers will crawl the network looking for new peer addresses. -func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { - now := time.Now() - - for _, addr := range addrs { - peerInfo, ok := r.crawlPeerInfos[addr.ID] - - // Do not attempt to connect with peers we recently crawled. - if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls { - continue - } - - // Record crawling attempt. - r.crawlPeerInfos[addr.ID] = crawlPeerInfo{ - Addr: addr, - LastCrawled: now, - } - - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - continue - } - - peer := r.Switch.Peers().Get(addr.ID) - if peer != nil { - r.RequestAddrs(peer) - } - } -} - -func (r *Reactor) cleanupCrawlPeerInfos() { - for id, info := range r.crawlPeerInfos { - // If we did not crawl a peer for 24 hours, it means the peer was removed - // from the addrbook => remove - // - // 10000 addresses / maxGetSelection = 40 cycles to get all addresses in - // the ideal case, - // 40 * crawlPeerPeriod ~ 20 minutes - if time.Since(info.LastCrawled) > 24*time.Hour { - delete(r.crawlPeerInfos, id) - } - } -} - -// attemptDisconnects checks if we've been with each peer long enough to disconnect -func (r *Reactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { - continue - } - if peer.IsPersistent() { - continue - } - r.Switch.StopPeerGracefully(peer) - } -} - -func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { - // TODO: detect more "bad peer" scenarios - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr, defaultBanTime) - default: - book.MarkAttempt(addr) - } -} - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.PexMessage{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb} - case *tmp2p.PexResponse: - msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.PexMessage{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.PexMessage_PexRequest: - return msg.PexRequest, nil - case *tmp2p.PexMessage_PexResponse: - return msg.PexResponse, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} - -//----------------------------------------------------------------------------- -// address converters - -// NetAddressFromProto converts a Protobuf PexAddress into a native struct. -func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { - ip := net.ParseIP(pb.IP) - if ip == nil { - return nil, fmt.Errorf("invalid IP address %v", pb.IP) - } - if pb.Port >= 1<<16 { - return nil, fmt.Errorf("invalid port number %v", pb.Port) - } - return &types.NetAddress{ - ID: types.NodeID(pb.ID), - IP: ip, - Port: uint16(pb.Port), - }, nil -} - -// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) { - nas := make([]*types.NetAddress, 0, len(pbs)) - for _, pb := range pbs { - na, err := NetAddressFromProto(pb) - if err != nil { - return nil, err - } - nas = append(nas, na) - } - return nas, nil -} - -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. -func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress { - pbs := make([]tmp2p.PexAddress, 0, len(nas)) - for _, na := range nas { - if na != nil { - pbs = append(pbs, tmp2p.PexAddress{ - ID: string(na.ID), - IP: na.IP.String(), - Port: uint32(na.Port), - }) - } - } - return pbs -} diff --git a/internal/p2p/pex/pex_reactor_test.go b/internal/p2p/pex/pex_reactor_test.go deleted file mode 100644 index 56f24457f..000000000 --- a/internal/p2p/pex/pex_reactor_test.go +++ /dev/null @@ -1,680 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/mock" - "github.com/tendermint/tendermint/libs/log" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -func TestPEXReactorBasic(t *testing.T) { - r, _ := createReactor(t, &ReactorConfig{}) - - assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) -} - -func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - - size := book.Size() - peer := p2p.CreateRandomPeer(false) - - r.AddPeer(peer) - assert.Equal(t, size+1, book.Size()) - - r.RemovePeer(peer, "peer not available") - - outboundPeer := p2p.CreateRandomPeer(true) - - r.AddPeer(outboundPeer) - assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") - - r.RemovePeer(outboundPeer, "peer not available") -} - -// --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) -// -// EXPLANATION: peers are getting rejected because in switch#addPeer we check -// if any peer (who we already connected to) has the same IP. Even though local -// peers have different IP addresses, they all have the same underlying remote -// IP: 127.0.0.1. -// -func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - r := NewReactor(books[i], &ReactorConfig{}) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NetAddress() - err := books[switchIndex].AddAddress(addr, addr) - require.NoError(t, err) - } - - addOtherNodeAddrToAddrBook(0, 1) - addOtherNodeAddrToAddrBook(1, 0) - addOtherNodeAddrToAddrBook(2, 1) - - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) - - // stop them - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - peer := p2p.CreateRandomPeer(false) - - // we have to send a request to receive responses - r.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - r.Receive(PexChannel, peer, msg) - assert.Equal(t, size+1, book.Size()) - - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. -} - -func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - peerAddr := peer.SocketAddr() - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - err := book.AddAddress(peerAddr, peerAddr) - require.NoError(t, err) - require.True(t, book.HasAddress(peerAddr)) - - id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) - - // first time creates the entry - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // next time sets the last time value - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) - assert.False(t, r.lastReceivedRequests.Has(id)) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peerAddr)) -} - -func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - - // request addrs from the peer - r.RequestAddrs(peer) - assert.True(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})}) - - // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) - assert.False(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peer.SocketAddr())) -} - -func TestCheckSeeds(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. test creating peer with no seeds works - peerSwitch := testCreateDefaultPeer(dir, 0) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 2. create seed - seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - - // 3. test create peer with online seed works - peerSwitch = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 4. test create peer with all seeds having unresolvable DNS fails - badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 5. test create peer with one good seed address succeeds - badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests -} - -func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create seed - seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 2. create usual peer with only seed configured. - peer := testCreatePeerWithSeed(dir, 1, seed) - require.Nil(t, peer.Start()) - t.Cleanup(func() { _ = peer.Stop() }) - - // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) -} - -func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - t.Cleanup(func() { _ = secondPeer.Stop() }) - - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) - - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) -} - -func TestPEXReactorSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreateDefaultPeer(dir, 1) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // 2. attemptDisconnects should not disconnect because of wait period - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 3. attemptDisconnects should disconnect after wait period - pexR.attemptDisconnects() - assert.Equal(t, 0, sw.Peers().Size()) -} - -func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreatePeerWithConfig(dir, 1, pexRConfig) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 2. attemptDisconnects should not disconnect because the peer is persistent - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { - // directory to store address books - pexR, book := createReactor(t, &ReactorConfig{SeedMode: true}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - // No need to start sw since crawlPeers is called manually here. - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - require.NoError(t, book.AddAddress(addr, addr)) - - assert.True(t, book.HasAddress(addr)) - - // imitate maxAttemptsToDial reached - pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) - pexR.crawlPeers([]*p2p.NetAddress{addr}) - - assert.False(t, book.HasAddress(addr)) -} - -// connect a peer to a seed, wait a bit, then stop it. -// this should give it time to request addrs and for the seed -// to call FlushStop, and allows us to test calling Stop concurrently -// with FlushStop. Before a fix, this non-deterministically reproduced -// https://github.com/tendermint/tendermint/issues/3231. -func TestPEXReactorSeedModeFlushStop(t *testing.T) { - t.Skip("flaky test, will be replaced by new P2P stack") - N := 2 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - config := &ReactorConfig{} - if i == 0 { - // first one is a seed node - config = &ReactorConfig{SeedMode: true} - } - r := NewReactor(books[i], config) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - reactor := switches[0].Reactors()["pex"].(*Reactor) - peerID := switches[1].NodeInfo().ID() - - assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress())) - - // sleep up to a second while waiting for the peer to send us a message. - // this isn't perfect since it's possible the peer sends us a msg and we FlushStop - // before this loop catches it. but non-deterministically it works pretty well. - for i := 0; i < 1000; i++ { - v := reactor.lastReceivedRequests.Get(string(peerID)) - if v != nil { - break - } - time.Sleep(time.Millisecond) - } - - // by now the FlushStop should have happened. Try stopping the peer. - // it should be safe to do this. - peers := switches[0].Peers().List() - for _, peer := range peers { - err := peer.Stop() - require.NoError(t, err) - } - - // stop the switches - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { - peer := p2p.CreateRandomPeer(false) - - pexR, book := createReactor(t, &ReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) - - // we have to send a request to receive responses - pexR.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - pexR.Receive(PexChannel, peer, msg) - assert.Equal(t, size, book.Size()) - - pexR.AddPeer(peer) - assert.Equal(t, size, book.Size()) -} - -func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - assert.Equal(t, 0, pexR.AttemptsToDial(addr)) - - // 1st unsuccessful attempt - err := pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - // 2nd unsuccessful attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - // must be skipped because it is too early - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - if !testing.Short() { - time.Sleep(3 * time.Second) - - // 3rd attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 2, pexR.AttemptsToDial(addr)) - } -} - -func assertPeersWithTimeout( - t *testing.T, - switches []*p2p.Switch, - checkPeriod, timeout time.Duration, - nPeers int, -) { - var ( - ticker = time.NewTicker(checkPeriod) - remaining = timeout - ) - - for { - select { - case <-ticker.C: - // check peers are connected - allGood := true - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound < nPeers { - allGood = false - break - } - } - remaining -= checkPeriod - if remaining < 0 { - remaining = 0 - } - if allGood { - return - } - case <-time.After(remaining): - numPeersStr := "" - for i, s := range switches { - outbound, inbound, _ := s.NumPeers() - numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) - } - t.Errorf( - "expected all switches to be connected to at least %d peer(s) (switches: %s)", - nPeers, numPeersStr, - ) - return - } - } -} - -// Creates a peer with the provided config -func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch { - peer := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor( - book, - config, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return peer -} - -// Creates a peer with the default config -func testCreateDefaultPeer(dir string, id int) *p2p.Switch { - return testCreatePeerWithConfig(dir, id, &ReactorConfig{}) -} - -// Creates a seed which knows about the provided addresses / source address pairs. -// Starting and stopping the seed is left to the caller -func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch { - seed := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) - book.SetLogger(log.TestingLogger()) - for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests - book.MarkGood(knownAddrs[j].ID) - } - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor(book, &ReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return seed -} - -// Creates a peer which knows about the provided seed. -// Starting and stopping the peer is left to the caller -func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { - conf := &ReactorConfig{ - Seeds: []string{seed.NetAddress().String()}, - } - return testCreatePeerWithConfig(dir, id, conf) -} - -func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) { - // directory to store address book - book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true) - book.SetLogger(log.TestingLogger()) - - r = NewReactor(book, conf) - r.SetLogger(log.TestingLogger()) - return -} - -func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - for _, r := range reactors { - sw.AddReactor(r.String(), r) - } - return sw - }, log.TestingLogger()) - return sw -} - -func TestPexVectors(t *testing.T) { - addr := tmp2p.PexAddress{ - ID: "1", - IP: "127.0.0.1", - Port: 9090, - } - - testCases := []struct { - testName string - msg proto.Message - expBytes string - }{ - {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, - {"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, - } - - for _, tc := range testCases { - tc := tc - - bz := mustEncode(tc.msg) - - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) - } -} - -// FIXME: This function is used in place of testing.TB.TempDir() -// as the latter seems to cause test cases to fail when it is -// unable to remove the temporary directory once the test case -// execution terminates. This seems to happen often with pex -// reactor test cases. -// -// References: -// https://github.com/tendermint/tendermint/pull/5733 -// https://github.com/tendermint/tendermint/issues/5732 -func tempDir(t *testing.T) string { - t.Helper() - dir, err := ioutil.TempDir("", "") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) - return dir -} diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 8cff2f95b..300a6022d 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -24,6 +24,22 @@ var ( // TODO: Consolidate with params file. // See https://github.com/tendermint/tendermint/issues/6371 const ( + // PexChannel is a channel for PEX messages + PexChannel = byte(0x00) + + // over-estimate of max NetAddress size + // hexID (40) + IP (16) + Port (2) + Name (100) ... + // NOTE: dont use massive DNS name .. + maxAddressSize = 256 + + // max addresses returned by GetSelection + // NOTE: this must match "maxMsgSize" + maxGetSelection = 250 + + // NOTE: amplification factor! + // small request results in up to maxMsgSize response + maxMsgSize = maxAddressSize * maxGetSelection + // the minimum time one peer can send another request to the same peer minReceiveRequestInterval = 100 * time.Millisecond diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 1171566d1..d68f16c4f 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,6 +21,8 @@ import ( const queueBufferDefault = 32 +const dialRandomizerIntervalMillisecond = 3000 + // ChannelID is an arbitrary channel ID. type ChannelID uint16 @@ -544,7 +546,7 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond) + timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond) defer timer.Stop() select { diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go index 07d1ad156..c9cdc2756 100644 --- a/internal/p2p/shim.go +++ b/internal/p2p/shim.go @@ -1,58 +1,42 @@ package p2p import ( - "errors" "sort" "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" ) -// ============================================================================ -// TODO: Types and business logic below are temporary and will be removed once -// the legacy p2p stack is removed in favor of the new model. -// -// ref: https://github.com/tendermint/tendermint/issues/5670 -// ============================================================================ +// ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel +// and the proto.Message the new p2p Channel is responsible for handling. +// A ChannelDescriptorShim is not contained in ReactorShim, but is rather +// used to construct a ReactorShim. +type ChannelDescriptorShim struct { + MsgType proto.Message + Descriptor *ChannelDescriptor +} -var _ Reactor = (*ReactorShim)(nil) +// ChannelShim defines a generic shim wrapper around a legacy p2p channel +// and the new p2p Channel. It also includes the raw bi-directional Go channels +// so we can proxy message delivery. +type ChannelShim struct { + Descriptor *ChannelDescriptor + Channel *Channel + inCh chan<- Envelope + outCh <-chan Envelope + errCh <-chan PeerError +} -type ( - messageValidator interface { - Validate() error - } - - // ReactorShim defines a generic shim wrapper around a BaseReactor. It is - // responsible for wiring up legacy p2p behavior to the new p2p semantics - // (e.g. proxying Envelope messages to legacy peers). - ReactorShim struct { - BaseReactor - - Name string - PeerUpdates *PeerUpdates - Channels map[ChannelID]*ChannelShim - } - - // ChannelShim defines a generic shim wrapper around a legacy p2p channel - // and the new p2p Channel. It also includes the raw bi-directional Go channels - // so we can proxy message delivery. - ChannelShim struct { - Descriptor *ChannelDescriptor - Channel *Channel - inCh chan<- Envelope - outCh <-chan Envelope - errCh <-chan PeerError - } - - // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel - // and the proto.Message the new p2p Channel is responsible for handling. - // A ChannelDescriptorShim is not contained in ReactorShim, but is rather - // used to construct a ReactorShim. - ChannelDescriptorShim struct { - MsgType proto.Message - Descriptor *ChannelDescriptor - } -) +// ReactorShim defines a generic shim wrapper around a BaseReactor. It is +// responsible for wiring up legacy p2p behavior to the new p2p semantics +// (e.g. proxying Envelope messages to legacy peers). +type ReactorShim struct { + Name string + PeerUpdates *PeerUpdates + Channels map[ChannelID]*ChannelShim +} func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { channels := make(map[ChannelID]*ChannelShim) @@ -68,9 +52,6 @@ func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*C Channels: channels, } - rs.BaseReactor = *NewBaseReactor(name, rs) - rs.SetLogger(logger) - return rs } @@ -93,121 +74,15 @@ func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { } } -// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate -// go-routine where we listen for outbound envelopes sent during Receive -// executions (or anything else that may send on the Channel) and proxy them to -// the corresponding Peer using the To field from the envelope. -func (rs *ReactorShim) proxyPeerEnvelopes() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for e := range cs.outCh { - msg := proto.Clone(cs.Channel.messageType) - msg.Reset() - - wrapper, ok := msg.(Wrapper) - if ok { - if err := wrapper.Wrap(e.Message); err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to wrap message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - } else { - msg = e.Message - } - - bz, err := proto.Marshal(msg) - if err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to encode message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - - switch { - case e.Broadcast: - rs.Switch.Broadcast(cs.Descriptor.ID, bz) - - case e.To != "": - src := rs.Switch.peers.Get(e.To) - if src == nil { - rs.Logger.Debug( - "failed to proxy envelope; failed to find peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - continue - } - - if !src.Send(cs.Descriptor.ID, bz) { - // This usually happens when we try to send across a channel - // that the peer doesn't have open. To avoid bloating the - // logs we set this to be Debug - rs.Logger.Debug( - "failed to proxy message to peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - } - - default: - rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID) - } - } - }(cs) - } -} - -// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine -// where we listen for peer errors. For each peer error, we find the peer from -// the legacy p2p Switch and execute a StopPeerForError call with the corresponding -// peer error. -func (rs *ReactorShim) handlePeerErrors() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for pErr := range cs.errCh { - if pErr.NodeID != "" { - peer := rs.Switch.peers.Get(pErr.NodeID) - if peer == nil { - rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID) - continue - } - - rs.Switch.StopPeerForError(peer, pErr.Err) - } - } - }(cs) - } -} - -// OnStart executes the reactor shim's OnStart hook where we start all the -// necessary go-routines in order to proxy peer envelopes and errors per p2p -// Channel. -func (rs *ReactorShim) OnStart() error { - if rs.Switch == nil { - return errors.New("proxyPeerEnvelopes: reactor shim switch is nil") - } - - // start envelope proxying and peer error handling in separate go routines - rs.proxyPeerEnvelopes() - rs.handlePeerErrors() - - return nil -} - -// GetChannel returns a p2p Channel reference for a given ChannelID. If no -// Channel exists, nil is returned. -func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { - channelShim, ok := rs.Channels[cID] - if ok { - return channelShim.Channel - } - - return nil +// MConnConfig returns an MConnConfig based on the defaults, with fields updated +// from the P2PConfig. +func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { + mConfig := conn.DefaultMConnConfig() + mConfig.FlushThrottle = cfg.FlushThrottleTimeout + mConfig.SendRate = cfg.SendRate + mConfig.RecvRate = cfg.RecvRate + mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize + return mConfig } // GetChannels implements the legacy Reactor interface for getting a slice of all @@ -228,107 +103,13 @@ func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { return descriptors } -// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle adding a peer. -func (rs *ReactorShim) AddPeer(peer Peer) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusUp}: - rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle removing a peer. -func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown}: - rs.Logger.Debug( - "sent peer update", - "reactor", rs.Name, - "peer", peer.ID(), - "reason", reason, - "status", PeerStatusDown, - ) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// Receive implements a generic wrapper around implementing the Receive method -// on the legacy Reactor p2p interface. If the reactor is running, Receive will -// find the corresponding new p2p Channel, create and decode the appropriate -// proto.Message from the msgBytes, execute any validation and finally construct -// and send a p2p Envelope on the appropriate p2p Channel. -func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) { - if !rs.IsRunning() { - return - } - - cID := ChannelID(chID) +// GetChannel returns a p2p Channel reference for a given ChannelID. If no +// Channel exists, nil is returned. +func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { channelShim, ok := rs.Channels[cID] - if !ok { - rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID) - return - } - - msg := proto.Clone(channelShim.Channel.messageType) - msg.Reset() - - if err := proto.Unmarshal(msgBytes, msg); err != nil { - rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - - validator, ok := msg.(messageValidator) if ok { - if err := validator.Validate(); err != nil { - rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } + return channelShim.Channel } - wrapper, ok := msg.(Wrapper) - if ok { - var err error - - msg, err = wrapper.Unwrap() - if err != nil { - rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err) - return - } - } - - select { - case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}: - rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID()) - - case <-channelShim.Channel.Done(): - // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the inbound channel and when the reactor stops we - // do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the inbound channel when closing or - // stopping. - } + return nil } diff --git a/internal/p2p/shim_test.go b/internal/p2p/shim_test.go deleted file mode 100644 index d8b9e30c3..000000000 --- a/internal/p2p/shim_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package p2p_test - -import ( - "sync" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks" - "github.com/tendermint/tendermint/libs/log" - ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" -) - -var ( - channelID1 = byte(0x01) - channelID2 = byte(0x02) - - p2pCfg = config.DefaultP2PConfig() - - testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - p2p.ChannelID(channelID1): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID1, - Priority: 3, - SendQueueCapacity: 10, - RecvMessageCapacity: int(4e6), - }, - }, - p2p.ChannelID(channelID2): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID2, - Priority: 1, - SendQueueCapacity: 4, - RecvMessageCapacity: int(16e6), - }, - }, - } -) - -type reactorShimTestSuite struct { - shim *p2p.ReactorShim - sw *p2p.Switch -} - -func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { - t.Helper() - - rts := &reactorShimTestSuite{ - shim: p2p.NewReactorShim(log.TestingLogger(), "TestShim", testChannelShims), - } - - rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", func(_ int, sw *p2p.Switch) *p2p.Switch { - for _, peer := range peers { - p2p.AddPeerToSwitchPeerSet(sw, peer) - } - - sw.AddReactor(rts.shim.Name, rts.shim) - return sw - }, log.TestingLogger()) - - // start the reactor shim - require.NoError(t, rts.shim.Start()) - - t.Cleanup(func() { - require.NoError(t, rts.shim.Stop()) - - for _, chs := range rts.shim.Channels { - chs.Channel.Close() - } - }) - - return rts -} - -func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) { - t.Helper() - - peerID := types.NodeID(id) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(peerID) - - return peer, peerID -} - -func TestReactorShim_GetChannel(t *testing.T) { - rts := setup(t, nil) - - p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1)) - require.NotNil(t, p2pCh) - require.Equal(t, p2pCh.ID, p2p.ChannelID(channelID1)) - - p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03))) - require.Nil(t, p2pCh) -} - -func TestReactorShim_GetChannels(t *testing.T) { - rts := setup(t, nil) - - p2pChs := rts.shim.GetChannels() - require.Len(t, p2pChs, 2) - require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1)) - require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2)) -} - -func TestReactorShim_AddPeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.AddPeer(peerA) - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status) -} - -func TestReactorShim_RemovePeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.RemovePeer(peerA, "test reason") - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status) -} - -func TestReactorShim_Receive(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - msg := &ssproto.Message{ - Sum: &ssproto.Message_ChunkRequest{ - ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, - }, - } - - bz, err := proto.Marshal(msg) - require.NoError(t, err) - - var wg sync.WaitGroup - - var response *ssproto.Message - peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) { - m := &ssproto.Message{} - require.NoError(t, proto.Unmarshal(args[1].([]byte), m)) - - response = m - wg.Done() - }).Return(true) - - p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)] - - wg.Add(2) - - // Simulate receiving the envelope in some real reactor and replying back with - // the same envelope and then closing the Channel. - go func() { - e := <-p2pCh.Channel.In - require.Equal(t, peerIDA, e.From) - require.NotNil(t, e.Message) - - p2pCh.Channel.Out <- p2p.Envelope{To: e.From, Message: e.Message} - p2pCh.Channel.Close() - wg.Done() - }() - - rts.shim.Receive(channelID1, peerA, bz) - - // wait until the mock peer called Send and we (fake) proxied the envelope - wg.Wait() - require.NotNil(t, response) - - m, err := response.Unwrap() - require.NoError(t, err) - require.Equal(t, msg.GetChunkRequest(), m) - - // Since p2pCh was closed in the simulated reactor above, calling Receive - // should not block. - rts.shim.Receive(channelID1, peerA, bz) - require.Empty(t, p2pCh.Channel.In) - - peerA.AssertExpectations(t) -} diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go deleted file mode 100644 index ea1272354..000000000 --- a/internal/p2p/switch.go +++ /dev/null @@ -1,1064 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - "io" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS - dialRandomizerIntervalMilliseconds = 3000 - - // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s - reconnectAttempts = 20 - reconnectInterval = 5 * time.Second - - // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs - reconnectBackOffAttempts = 10 - reconnectBackOffBaseSeconds = 3 - - defaultFilterTimeout = 5 * time.Second -) - -// MConnConfig returns an MConnConfig with fields updated -// from the P2PConfig. -func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = cfg.FlushThrottleTimeout - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - return mConfig -} - -//----------------------------------------------------------------------------- - -// An AddrBook represents an address book from the pex package, which is used -// to store peer addresses. -type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddPrivateIDs([]string) - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(types.NodeID) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool - Save() -} - -// ConnFilterFunc is a callback for connection filtering. If it returns an -// error, the connection is rejected. The set of existing connections is passed -// along with the new connection and all resolved IPs. -type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error - -// PeerFilterFunc to be implemented by filter hooks after a new Peer has been -// fully setup. -type PeerFilterFunc func(IPeerSet, Peer) error - -// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection -// and refuses new ones if they come from a known ip. -var ConnDuplicateIPFilter ConnFilterFunc = func(cs ConnSet, c net.Conn, ips []net.IP) error { - for _, ip := range ips { - if cs.HasIP(ip) { - return ErrRejected{ - conn: c, - err: fmt.Errorf("ip<%v> already connected", ip), - isDuplicate: true, - } - } - } - return nil -} - -//----------------------------------------------------------------------------- - -// Switch handles peer connections and exposes an API to receive incoming messages -// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -// or more `Channels`. So while sending outgoing messages is typically performed on the peer, -// incoming messages are received on the reactor. -type Switch struct { - service.BaseService - - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo types.NodeInfo // our node info - nodeKey types.NodeKey // our node privkey - addrBook AddrBook - // peers addresses with whom we'll maintain constant connection - persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[types.NodeID]struct{} - - transport Transport - - filterTimeout time.Duration - peerFilters []PeerFilterFunc - connFilters []ConnFilterFunc - conns ConnSet - - metrics *Metrics -} - -// NetAddress returns the first address the switch is listening on, -// or nil if no addresses are found. -func (sw *Switch) NetAddress() *NetAddress { - endpoints := sw.transport.Endpoints() - if len(endpoints) == 0 { - return nil - } - return &NetAddress{ - ID: sw.nodeInfo.NodeID, - IP: endpoints[0].IP, - Port: endpoints[0].Port, - } -} - -// SwitchOption sets an optional parameter on the Switch. -type SwitchOption func(*Switch) - -// NewSwitch creates a new Switch with the given config. -func NewSwitch( - cfg *config.P2PConfig, - transport Transport, - options ...SwitchOption, -) *Switch { - sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmap.NewCMap(), - reconnecting: cmap.NewCMap(), - metrics: NopMetrics(), - transport: transport, - persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[types.NodeID]struct{}), - filterTimeout: defaultFilterTimeout, - conns: NewConnSet(), - } - - // Ensure PRNG is reseeded. - tmrand.Reseed() - - sw.BaseService = *service.NewBaseService(nil, "P2P Switch", sw) - - for _, option := range options { - option(sw) - } - - return sw -} - -// SwitchFilterTimeout sets the timeout used for peer filters. -func SwitchFilterTimeout(timeout time.Duration) SwitchOption { - return func(sw *Switch) { sw.filterTimeout = timeout } -} - -// SwitchPeerFilters sets the filters for rejection of new peers. -func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption { - return func(sw *Switch) { sw.peerFilters = filters } -} - -// SwitchConnFilters sets the filters for rejection of connections. -func SwitchConnFilters(filters ...ConnFilterFunc) SwitchOption { - return func(sw *Switch) { sw.connFilters = filters } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) SwitchOption { - return func(sw *Switch) { sw.metrics = metrics } -} - -//--------------------------------------------------------------------- -// Switch setup - -// AddReactor adds the given reactor to the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - for _, chDesc := range reactor.GetChannels() { - chID := chDesc.ID - // No two reactors can share the same channel. - if sw.reactorsByCh[chID] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// RemoveReactor removes the given Reactor from the Switch. -// NOTE: Not goroutine safe. -func (sw *Switch) RemoveReactor(name string, reactor Reactor) { - for _, chDesc := range reactor.GetChannels() { - // remove channel description - for i := 0; i < len(sw.chDescs); i++ { - if chDesc.ID == sw.chDescs[i].ID { - sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) - break - } - } - delete(sw.reactorsByCh, chDesc.ID) - } - delete(sw.reactors, name) - reactor.SetSwitch(nil) -} - -// Reactors returns a map of reactors registered on the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Reactor returns the reactor with the given name. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo types.NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// NodeInfo returns the switch's NodeInfo. -// NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() types.NodeInfo { - return sw.nodeInfo -} - -// SetNodeKey sets the switch's private key for authenticated encryption. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey types.NodeKey) { - sw.nodeKey = nodeKey -} - -//--------------------------------------------------------------------- -// Service start/stop - -// OnStart implements BaseService. It starts all the reactors and peers. -func (sw *Switch) OnStart() error { - - // FIXME: Temporary hack to pass channel descriptors to MConn transport, - // since they are not available when it is constructed. This will be - // fixed when we implement the new router abstraction. - if t, ok := sw.transport.(*MConnTransport); ok { - t.channelDescs = sw.chDescs - } - - // Start reactors - for _, reactor := range sw.reactors { - err := reactor.Start() - if err != nil { - return fmt.Errorf("failed to start %v: %w", reactor, err) - } - } - - // Start accepting Peers. - go sw.acceptRoutine() - - return nil -} - -// OnStop implements BaseService. It stops all peers and reactors. -func (sw *Switch) OnStop() { - // Stop peers - for _, p := range sw.peers.List() { - sw.stopAndRemovePeer(p, nil) - } - - // Stop reactors - sw.Logger.Debug("Switch: Stopping reactors") - for _, reactor := range sw.reactors { - if err := reactor.Stop(); err != nil { - sw.Logger.Error("error while stopping reactor", "reactor", reactor, "error", err) - } - } -} - -//--------------------------------------------------------------------- -// Peers - -// Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). -// -// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", msgBytes) - - peers := sw.peers.List() - var wg sync.WaitGroup - wg.Add(len(peers)) - successChan := make(chan bool, len(peers)) - - for _, peer := range peers { - go func(p Peer) { - defer wg.Done() - success := p.Send(chID, msgBytes) - successChan <- success - }(peer) - } - - go func() { - wg.Wait() - close(successChan) - }() - - return successChan -} - -// NumPeers returns the count of outbound/inbound and outbound-dialing peers. -// unconditional peers are not counted here. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - if !sw.IsPeerUnconditional(peer.ID()) { - outbound++ - } - } else { - if !sw.IsPeerUnconditional(peer.ID()) { - inbound++ - } - } - } - dialing = sw.dialing.Size() - return -} - -func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool { - _, ok := sw.unconditionalPeerIDs[id] - return ok -} - -// MaxNumOutboundPeers returns a maximum number of outbound peers. -func (sw *Switch) MaxNumOutboundPeers() int { - return sw.config.MaxNumOutboundPeers -} - -// Peers returns the set of peers that are connected to the switch. -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// StopPeerForError disconnects from a peer due to external error. -// If the peer is persistent, it will attempt to reconnect. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { - if !peer.IsRunning() { - return - } - - sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) - sw.stopAndRemovePeer(peer, reason) - - if peer.IsPersistent() { - var addr *NetAddress - if peer.IsOutbound() { // socket address for outbound peers - addr = peer.SocketAddr() - } else { // self-reported address for inbound peers - var err error - addr, err = peer.NodeInfo().NetAddress() - if err != nil { - sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", - "peer", peer, "err", err) - return - } - } - go sw.reconnectToPeer(addr) - } -} - -// StopPeerGracefully disconnects from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer Peer) { - sw.Logger.Info("Stopping peer gracefully") - sw.stopAndRemovePeer(peer, nil) -} - -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if err := peer.Stop(); err != nil { - sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly - } - - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } - - // Removing a peer should go last to avoid a situation where a peer - // reconnect to our node and the switch calls InitPeer before - // RemovePeer is finished. - // https://github.com/tendermint/tendermint/issues/3338 - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } - - sw.conns.RemoveAddr(peer.RemoteAddr()) -} - -// reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. -// If no success after all that, it stops trying, and leaves it -// to the PEX/Addrbook to find the peer with the addr again -// NOTE: this will keep trying even if the handshake or auth fails. -// TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if _, exists := sw.reconnecting.GetOrSet(string(addr.ID), addr); exists { - return - } - defer sw.reconnecting.Delete(string(addr.ID)) - - start := time.Now() - sw.Logger.Info("Reconnecting to peer", "addr", addr) - for i := 0; i < reconnectAttempts; i++ { - if !sw.IsRunning() { - return - } - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - // sleep a set amount - sw.randomSleep(reconnectInterval) - continue - } - - sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", - "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { - if !sw.IsRunning() { - return - } - - // sleep an exponentially increasing amount - sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) - sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - } - sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) -} - -// SetAddrBook allows to set address book on Switch. -func (sw *Switch) SetAddrBook(addrBook AddrBook) { - sw.addrBook = addrBook -} - -// MarkPeerAsGood marks the given peer as good when it did something useful -// like contributed to consensus. -func (sw *Switch) MarkPeerAsGood(peer Peer) { - if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.ID()) - } -} - -//--------------------------------------------------------------------- -// Dialing - -type privateAddr interface { - PrivateAddr() bool -} - -func isPrivateAddr(err error) bool { - te, ok := err.(privateAddr) - return ok && te.PrivateAddr() -} - -// DialPeersAsync dials a list of peers asynchronously in random order. -// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// It ignores ErrNetAddressLookup. However, if there are other errors, first -// encounter is returned. -// Nop if there are no peers. -func (sw *Switch) DialPeersAsync(peers []string) error { - netAddrs, errs := NewNetAddressStrings(peers) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.dialPeersAsync(netAddrs) - return nil -} - -func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { - ourAddr := sw.NetAddress() - - // TODO: this code feels like it's in the wrong place. - // The integration tests depend on the addrBook being saved - // right away but maybe we can change that. Recall that - // the addrBook is only written to disk every 2min - if sw.addrBook != nil { - // add peers to `addrBook` - for _, netAddr := range netAddrs { - // do not add our address or ID - if !netAddr.Same(ourAddr) { - if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { - if isPrivateAddr(err) { - sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) - } else { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) - } - } - } - } - // Persist some peers to disk right away. - // NOTE: integration tests depend on this - sw.addrBook.Save() - } - - // permute the list, dial them in random order. - perm := mrand.Perm(len(netAddrs)) - for i := 0; i < len(perm); i++ { - go func(i int) { - j := perm[i] - addr := netAddrs[j] - - if addr.Same(ourAddr) { - sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) - return - } - - sw.randomSleep(0) - - err := sw.DialPeerWithAddress(addr) - if err != nil { - switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: - sw.Logger.Debug("Error dialing peer", "err", err) - default: - sw.Logger.Error("Error dialing peer", "err", err) - } - } - }(i) - } -} - -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects -// and authenticates successfully. -// If we're currently dialing this address or it belongs to an existing peer, -// ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { - if sw.IsDialingOrExistingAddress(addr) { - return ErrCurrentlyDialingOrExistingAddress{addr.String()} - } - - sw.dialing.Set(string(addr.ID), addr) - defer sw.dialing.Delete(string(addr.ID)) - - return sw.addOutboundPeerWithConfig(addr, sw.config) -} - -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] -func (sw *Switch) randomSleep(interval time.Duration) { - // nolint:gosec // G404: Use of weak random number generator - r := time.Duration(mrand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond - time.Sleep(r + interval) -} - -// IsDialingOrExistingAddress returns true if switch has a peer with the given -// address or dialing it at the moment. -func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { - return sw.dialing.Has(string(addr.ID)) || - sw.peers.Has(addr.ID) || - (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) -} - -// AddPersistentPeers allows you to set persistent peers. It ignores -// ErrNetAddressLookup. However, if there are other errors, first encounter is -// returned. -func (sw *Switch) AddPersistentPeers(addrs []string) error { - sw.Logger.Info("Adding persistent peers", "addrs", addrs) - netAddrs, errs := NewNetAddressStrings(addrs) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.persistentPeersAddrs = netAddrs - return nil -} - -func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { - sw.Logger.Info("Adding unconditional peer ids", "ids", ids) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{} - } - return nil -} - -func (sw *Switch) AddPrivatePeerIDs(ids []string) error { - validIDs := make([]string, 0, len(ids)) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - validIDs = append(validIDs, id) - } - - sw.addrBook.AddPrivateIDs(validIDs) - - return nil -} - -func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { - for _, pa := range sw.persistentPeersAddrs { - if pa.Equals(na) { - return true - } - } - return false -} - -func (sw *Switch) acceptRoutine() { - for { - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Accept() - if err == nil { - // NOTE: The legacy MConn transport did handshaking in Accept(), - // which was asynchronous and avoided head-of-line-blocking. - // However, as handshakes are being migrated out from the transport, - // we just do it synchronously here for now. - peerNodeInfo, _, err = sw.handshakePeer(c, "") - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if err == io.EOF { - err = ErrTransportClosed{} - } - switch err := err.(type) { - case ErrRejected: - addr := err.Addr() - if err.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(&addr) - sw.addrBook.AddOurAddress(&addr) - } - if err.IsIncompatible() { - sw.addrBook.RemoveAddress(&addr) - } - - sw.Logger.Info( - "Inbound Peer rejected", - "err", err, - "numPeers", sw.peers.Size(), - ) - - continue - case ErrFilterTimeout: - sw.Logger.Error( - "Peer filter timed out", - "err", err, - ) - - continue - case ErrTransportClosed: - sw.Logger.Error( - "Stopped accept routine, as transport is closed", - "numPeers", sw.peers.Size(), - ) - default: - sw.Logger.Error( - "Accept on transport errored", - "err", err, - "numPeers", sw.peers.Size(), - ) - // We could instead have a retry loop around the acceptRoutine, - // but that would need to stop and let the node shutdown eventually. - // So might as well panic and let process managers restart the node. - // There's no point in letting the node run without the acceptRoutine, - // since it won't be able to accept new connections. - panic(fmt.Errorf("accept routine exited: %v", err)) - } - - break - } - - isPersistent := false - addr, err := peerNodeInfo.NetAddress() - if err == nil { - isPersistent = sw.IsPeerPersistent(addr) - } - - p := newPeer( - peerNodeInfo, - newPeerConn(false, isPersistent, c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if !sw.IsPeerUnconditional(p.NodeInfo().ID()) { - // Ignore connection if we already have enough peers. - _, in, _ := sw.NumPeers() - if in >= sw.config.MaxNumInboundPeers { - sw.Logger.Info( - "Ignoring inbound connection: already have enough inbound peers", - "address", p.SocketAddr(), - "have", in, - "max", sw.config.MaxNumInboundPeers, - ) - _ = p.CloseConn() - continue - } - - } - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - sw.Logger.Info( - "Ignoring inbound connection: error while adding peer", - "err", err, - "id", p.ID(), - ) - } - } -} - -// dial the peer; make secret connection; authenticate against the dialed ID; -// add the peer. -// if dialing fails, start the reconnect loop. If handshake fails, it's over. -// If peer is started successfully, reconnectLoop will start when -// StopPeerForError is called. -func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, - cfg *config.P2PConfig, -) error { - sw.Logger.Info("Dialing peer", "address", addr) - - // XXX(xla): Remove the leakage of test concerns in implementation. - if cfg.TestDialFail { - go sw.reconnectToPeer(addr) - return fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - // Hardcoded timeout moved from MConn transport during refactoring. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Dial(ctx, Endpoint{ - Protocol: MConnProtocol, - IP: addr.IP, - Port: addr.Port, - }) - if err == nil { - peerNodeInfo, _, err = sw.handshakePeer(c, addr.ID) - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if e, ok := err.(ErrRejected); ok { - if e.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - } - if e.IsIncompatible() { - sw.addrBook.RemoveAddress(addr) - } - - return err - } - - // retry persistent peers after - // any dial error besides IsSelf() - if sw.IsPeerPersistent(addr) { - go sw.reconnectToPeer(addr) - } - - return err - } - - p := newPeer( - peerNodeInfo, - newPeerConn(true, sw.IsPeerPersistent(addr), c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - return err - } - - return nil -} - -func (sw *Switch) handshakePeer( - c Connection, - expectPeerID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { - // Moved from transport and hardcoded until legacy P2P stack removal. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: fmt.Errorf("handshake failed: %v", err), - isAuthFailure: true, - } - } - - if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - isNodeInfoInvalid: true, - } - } - - // For outgoing conns, ensure connection key matches dialed key. - if expectPeerID != "" { - peerID := types.NodeIDFromPubKey(peerKey) - if expectPeerID != peerID { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - id: peerID, - err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) mismatch", - peerID, - expectPeerID, - ), - isAuthFailure: true, - } - } - } - - if sw.nodeInfo.ID() == peerInfo.ID() { - return peerInfo, peerKey, ErrRejected{ - addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), - conn: c.(*mConnConnection).conn, - id: peerInfo.ID(), - isSelf: true, - } - } - - if err = sw.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - id: peerInfo.ID(), - isIncompatible: true, - } - } - - return peerInfo, peerKey, nil -} - -func (sw *Switch) filterPeer(p Peer) error { - // Avoid duplicate - if sw.peers.Has(p.ID()) { - return ErrRejected{id: p.ID(), isDuplicate: true} - } - - errc := make(chan error, len(sw.peerFilters)) - - for _, f := range sw.peerFilters { - go func(f PeerFilterFunc, p Peer, errc chan<- error) { - errc <- f(sw.peers, p) - }(f, p, errc) - } - - for i := 0; i < cap(errc); i++ { - select { - case err := <-errc: - if err != nil { - return ErrRejected{id: p.ID(), err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - } - - return nil -} - -// filterConn filters a connection, rejecting it if this function errors. -// -// FIXME: This is only here for compatibility with the current Switch code. In -// the new P2P stack, peer/connection filtering should be moved into the Router -// or PeerManager and removed from here. -func (sw *Switch) filterConn(conn net.Conn) error { - if sw.conns.Has(conn) { - return ErrRejected{conn: conn, isDuplicate: true} - } - - host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) - if err != nil { - return err - } - ip := net.ParseIP(host) - if ip == nil { - return fmt.Errorf("connection address has invalid IP address %q", host) - } - - // Apply filter callbacks. - chErr := make(chan error, len(sw.connFilters)) - for _, connFilter := range sw.connFilters { - go func(connFilter ConnFilterFunc) { - chErr <- connFilter(sw.conns, conn, []net.IP{ip}) - }(connFilter) - } - - for i := 0; i < cap(chErr); i++ { - select { - case err := <-chErr: - if err != nil { - return ErrRejected{conn: conn, err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - - } - - // FIXME: Doesn't really make sense to set this here, but we preserve the - // behavior from the previous P2P transport implementation. - sw.conns.Set(conn, []net.IP{ip}) - return nil -} - -// addPeer starts up the Peer and adds it to the Switch. Error is returned if -// the peer is filtered out or failed to start or can't be added. -func (sw *Switch) addPeer(p Peer) error { - if err := sw.filterPeer(p); err != nil { - return err - } - - p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) - - // Handle the shut down case where the switch has stopped but we're - // concurrently trying to add a peer. - if !sw.IsRunning() { - // XXX should this return an error or just log and terminate? - sw.Logger.Error("Won't start a peer - switch is not running", "peer", p) - return nil - } - - // Add some data to the peer, which is required by reactors. - for _, reactor := range sw.reactors { - p = reactor.InitPeer(p) - } - - // Start the peer's send/recv routines. - // Must start it before adding it to the peer set - // to prevent Start and Stop from being called concurrently. - err := p.Start() - if err != nil { - // Should never happen - sw.Logger.Error("Error starting peer", "err", err, "peer", p) - return err - } - - // Add the peer to PeerSet. Do this before starting the reactors - // so that if Receive errors, we will find the peer and remove it. - // Add should not err since we already checked peers.Has(). - if err := sw.peers.Add(p); err != nil { - return err - } - sw.metrics.Peers.Add(float64(1)) - - // Start all the reactor protocols on the peer. - for _, reactor := range sw.reactors { - reactor.AddPeer(p) - } - - sw.Logger.Info("Added peer", "peer", p) - - return nil -} - -// NewNetAddressStrings returns an array of NetAddress'es build using -// the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) - errs := make([]error, 0) - for _, addr := range addrs { - netAddr, err := types.NewNetAddressString(addr) - if err != nil { - errs = append(errs, err) - } else { - netAddrs = append(netAddrs, netAddr) - } - } - return netAddrs, errs -} diff --git a/internal/p2p/switch_test.go b/internal/p2p/switch_test.go deleted file mode 100644 index 8cb755c9f..000000000 --- a/internal/p2p/switch_test.go +++ /dev/null @@ -1,932 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "sync/atomic" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig - ctx = context.Background() -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -type PeerMessage struct { - PeerID types.NodeID - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx tmsync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor("TestReactor", tr) - tr.SetLogger(log.TestingLogger()) - return tr -} - -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer Peer) {} - -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} - -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { - tr.mtx.Lock() - defer tr.mtx.Unlock() - return tr.msgsReceived[chID] -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) - return switches[0], switches[1] -} - -func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) - - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, true)) - - return sw -} - -func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) - t.Cleanup(func() { - if err := s1.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := s2.Stop(); err != nil { - t.Error(err) - } - }) - - if s1.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - assertMsgReceivedWithTimeout(t, - ch0Msg, - byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch1Msg, - byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch2Msg, - byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) -} - -func assertMsgReceivedWithTimeout( - t *testing.T, - msgBytes []byte, - channel byte, - reactor *TestReactor, - checkPeriod, - timeout time.Duration, -) { - ticker := time.NewTicker(checkPeriod) - for { - select { - case <-ticker.C: - msgs := reactor.getMsgs(channel) - if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) - } - return - } - - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) - } - } -} - -func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - - // simulate s1 having a public IP by creating a remote peer with the same ID - rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} - rp.Start() - - // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr()) - if assert.Error(t, err) { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { - t.Errorf("expected self to be rejected") - } - } else { - t.Errorf("expected ErrRejected") - } - } - - assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) - - rp.Stop() - - assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) -} - -func TestSwitchDialFailsOnIncompatiblePeer(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := s1.NodeInfo() - ni.Network = "network-a" - s1.SetNodeInfo(ni) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - err := s1.DialPeerWithAddress(rp.Addr()) - require.Error(t, err) - errRejected, ok := err.(ErrRejected) - require.True(t, ok, "expected error to be of type IsRejected") - require.True(t, errRejected.IsIncompatible(), "expected error to be IsIncompatible") - - // remote peer should not have been added to the addressbook - require.False(t, s1.addrBook.HasAddress(rp.Addr())) -} - -func TestSwitchPeerFilter(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { return nil }, - func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, - func(_ IPeerSet, _ Peer) error { return nil }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { - t.Errorf("expected peer to be filtered") - } - } else { - t.Errorf("expected ErrRejected") - } -} - -func TestSwitchPeerFilterTimeout(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { - time.Sleep(10 * time.Millisecond) - return nil - }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchFilterTimeout(5*time.Millisecond), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Log(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout") - } -} - -func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err := sw.addPeer(p); err != nil { - t.Fatal(err) - } - - err = sw.addPeer(p) - if errRej, ok := err.(ErrRejected); ok { - if !errRej.IsDuplicate() { - t.Errorf("expected peer to be duplicate. got %v", errRej) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) - } -} - -func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - if err != nil { - t.Error(err) - } - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - require.Nil(err) - - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection - err = p.CloseConn() - require.NoError(err) - - assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(p.IsRunning()) -} - -func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(promhttp.Handler()) - defer s.Close() - - scrapeMetrics := func() string { - resp, err := http.Get(s.URL) - require.NoError(t, err) - defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - peersMetricValue := func() float64 { - matches := re.FindStringSubmatch(scrapeMetrics()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - p2pMetrics := PrometheusMetrics(namespace) - - // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { - // set metrics on sw1 - if i == 0 { - opt := WithMetrics(p2pMetrics) - opt(sw) - } - return initSwitchFunc(i, sw) - }) - - assert.Equal(t, len(sw1.Peers().List()), 1) - assert.EqualValues(t, 1, peersMetricValue()) - - // send messages to the peer from sw1 - p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) - - // stop sw2. this should cause the p to fail, - // which results in calling StopPeerForError internally - t.Cleanup(func() { - if err := sw2.Stop(); err != nil { - t.Error(err) - } - }) - - // now call StopPeerForError explicitly, eg. from a reactor - sw1.StopPeerForError(p, fmt.Errorf("some err")) - - assert.Equal(t, len(sw1.Peers().List()), 0) - assert.EqualValues(t, 0, peersMetricValue()) -} - -func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - err = sw.DialPeerWithAddress(rp.Addr()) - require.Nil(t, err) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - p := sw.Peers().List()[0] - err = p.(*peer).CloseConn() - require.NoError(t, err) - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.False(t, p.IsRunning()) // old peer instance - assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - - // 2. simulate first time dial failure - rp = &remotePeer{ - PrivKey: ed25519.GenPrivKey(), - Config: cfg, - // Use different interface to prevent duplicate IP filter, this will break - // beyond two peers. - listenAddr: "127.0.0.1:0", - } - rp.Start() - defer rp.Stop() - - conf := config.DefaultP2PConfig() - conf.TestDialFail = true // will trigger a reconnect - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) - require.NotNil(t, err) - // DialPeerWithAddres - sw.peerConfig resets the dialer - waitUntilSwitchHasAtLeastNPeers(sw, 2) - assert.Equal(t, 2, sw.Peers().Size()) -} - -func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing the connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - conn, err := rp.Dial(sw.NetAddress()) - require.NoError(t, err) - time.Sleep(50 * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - conn.Close() - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestSwitchDialPeersAsync(t *testing.T) { - if testing.Short() { - return - } - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.DialPeersAsync([]string{rp.Addr().String()}) - require.NoError(t, err) - time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) -} - -func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - has := sw.Peers().Size() - if has >= n { - break - } - } -} - -func TestSwitchFullConnectivity(t *testing.T) { - switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) - defer func() { - for _, sw := range switches { - sw := sw - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - } - }() - - for i, sw := range switches { - if sw.Peers().Size() != 2 { - t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) - } - } -} - -func TestSwitchAcceptRoutine(t *testing.T) { - cfg.MaxNumInboundPeers = 5 - - // Create some unconditional peers. - const unconditionalPeersNum = 2 - var ( - unconditionalPeers = make([]*remotePeer, unconditionalPeersNum) - unconditionalPeerIDs = make([]string, unconditionalPeersNum) - ) - for i := 0; i < unconditionalPeersNum; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - unconditionalPeers[i] = peer - unconditionalPeerIDs[i] = string(peer.ID()) - } - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) - require.NoError(t, err) - err = sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - // 0. check there are no peers - assert.Equal(t, 0, sw.Peers().Size()) - - // 1. check we connect up to MaxNumInboundPeers - peers := make([]*remotePeer, 0) - for i := 0; i < cfg.MaxNumInboundPeers; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peers = append(peers, peer) - peer.Start() - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(100 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - - // 2. check we close new connections if we already have MaxNumInboundPeers peers - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - conn, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // check conn is closed - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - peer.Stop() - - // 3. check we connect to unconditional peers despite the limit. - for _, peer := range unconditionalPeers { - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(10 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size()) - - for _, peer := range peers { - peer.Stop() - } - for _, peer := range unconditionalPeers { - peer.Stop() - } -} - -func TestSwitchRejectsIncompatiblePeers(t *testing.T) { - sw := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := sw.NodeInfo() - ni.Network = "network-a" - sw.SetNodeInfo(ni) - - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - assert.Equal(t, 0, sw.Peers().Size()) - - conn, err := rp.Dial(sw.NetAddress()) - assert.Nil(t, err) - - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - - assert.Equal(t, 0, sw.Peers().Size()) -} - -type errorTransport struct { - acceptErr error -} - -func (et errorTransport) String() string { - return "error" -} - -func (et errorTransport) Protocols() []Protocol { - return []Protocol{"error"} -} - -func (et errorTransport) Accept() (Connection, error) { - return nil, et.acceptErr -} -func (errorTransport) Dial(context.Context, Endpoint) (Connection, error) { - panic("not implemented") -} -func (errorTransport) Close() error { panic("not implemented") } -func (errorTransport) FlushClose() error { panic("not implemented") } -func (errorTransport) Endpoints() []Endpoint { panic("not implemented") } - -func TestSwitchAcceptRoutineErrorCases(t *testing.T) { - sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - - sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - // TODO(melekes) check we remove our address from addrBook - - sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) -} - -// mockReactor checks that InitPeer never called before RemovePeer. If that's -// not true, InitCalledBeforeRemoveFinished will return true. -type mockReactor struct { - *BaseReactor - - // atomic - removePeerInProgress uint32 - initCalledBeforeRemoveFinished uint32 -} - -func (r *mockReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{{ID: testCh, Priority: 10}} -} - -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { - atomic.StoreUint32(&r.removePeerInProgress, 1) - defer atomic.StoreUint32(&r.removePeerInProgress, 0) - time.Sleep(100 * time.Millisecond) -} - -func (r *mockReactor) InitPeer(peer Peer) Peer { - if atomic.LoadUint32(&r.removePeerInProgress) == 1 { - atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) - } - - return peer -} - -func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { - return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 -} - -// see stopAndRemovePeer -func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { - // make reactor - reactor := &mockReactor{} - reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { - sw.AddReactor("mock", reactor) - return sw - }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // add peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - - // wait till the switch adds rp to the peer set, then stop the peer asynchronously - for { - time.Sleep(20 * time.Millisecond) - if peer := sw.Peers().Get(rp.ID()); peer != nil { - go sw.StopPeerForError(peer, "test") - break - } - } - - // simulate peer reconnecting to us - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - // wait till the switch adds rp to the peer set - time.Sleep(50 * time.Millisecond) - - // make sure reactor.RemovePeer is finished before InitPeer is called - assert.False(t, reactor.InitCalledBeforeRemoveFinished()) -} - -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - - b.Cleanup(func() { - if err := s1.Stop(); err != nil { - b.Error(err) - } - }) - - b.Cleanup(func() { - if err := s2.Stop(); err != nil { - b.Error(err) - } - }) - - // Allow time for goroutines to boot up - time.Sleep(1 * time.Second) - - b.ResetTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } - } - - b.Logf("success: %v, failure: %v", numSuccess, numFailure) -} - -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ - "127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) - assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) -} diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go index b2851646d..d29709a89 100644 --- a/internal/p2p/test_util.go +++ b/internal/p2p/test_util.go @@ -1,42 +1,15 @@ package p2p import ( - "context" "fmt" mrand "math/rand" - "net" - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p/conn" ) -const testCh = 0x01 - //------------------------------------------------ -func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { - sw.peers.Add(peer) //nolint:errcheck // ignore error -} - -func CreateRandomPeer(outbound bool) Peer { - addr, netAddr := CreateRoutableAddr() - p := &peer{ - peerConn: peerConn{outbound: outbound}, - nodeInfo: types.NodeInfo{ - NodeID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - metrics: NopMetrics(), - } - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p -} - // nolint:gosec // G404: Use of weak random number generator func CreateRoutableAddr() (addr string, netAddr *NetAddress) { for { @@ -57,232 +30,3 @@ func CreateRoutableAddr() (addr string, netAddr *NetAddress) { } return } - -//------------------------------------------------------------------ -// Connects switches via arbitrary net.Conn. Used for testing. - -const TestHost = "localhost" - -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. -func MakeConnectedSwitches(cfg *config.P2PConfig, - n int, - initSwitch func(int, *Switch) *Switch, - connect func([]*Switch, int, int), -) []*Switch { - switches := make([]*Switch, n) - for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch, log.TestingLogger()) - } - - if err := StartSwitches(switches); err != nil { - panic(err) - } - - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { - connect(switches, i, j) - } - } - - return switches -} - -// Connect2Switches will connect switches i and j via net.Pipe(). -// Blocks until a connection is established. -// NOTE: caller ensures i and j are within bounds. -func Connect2Switches(switches []*Switch, i, j int) { - switchI := switches[i] - switchJ := switches[j] - - c1, c2 := conn.NetPipe() - - doneCh := make(chan struct{}) - go func() { - err := switchI.addPeerWithConnection(c1) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - go func() { - err := switchJ.addPeerWithConnection(c2) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - <-doneCh - <-doneCh -} - -func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := testInboundPeerConn(sw.transport.(*MConnTransport), conn) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - - p := newPeer( - peerNodeInfo, - pc, - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err = sw.addPeer(p); err != nil { - pc.CloseConn() - return err - } - - return nil -} - -// StartSwitches calls sw.Start() for each given switch. -// It returns the first encountered error. -func StartSwitches(switches []*Switch) error { - for _, s := range switches { - err := s.Start() // start switch and reactors - if err != nil { - return err - } - } - return nil -} - -func MakeSwitch( - cfg *config.P2PConfig, - i int, - network, version string, - initSwitch func(int, *Switch) *Switch, - logger log.Logger, - opts ...SwitchOption, -) *Switch { - - nodeKey := types.GenNodeKey() - nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i)) - addr, err := types.NewNetAddressString( - nodeKey.ID.AddressString(nodeInfo.ListenAddr), - ) - if err != nil { - panic(err) - } - - swLogger := logger.With("switch", i) - t := NewMConnTransport(swLogger, MConnConfig(cfg), - []*ChannelDescriptor{}, MConnTransportOptions{}) - - // TODO: let the config be passed in? - sw := initSwitch(i, NewSwitch(cfg, t, opts...)) - sw.SetLogger(swLogger) - sw.SetNodeKey(nodeKey) - - if err := t.Listen(NewEndpoint(addr)); err != nil { - panic(err) - } - - ni := nodeInfo - ni.Channels = []byte{} - for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) - } - nodeInfo = ni - - // TODO: We need to setup reactors ahead of time so the NodeInfo is properly - // populated and we don't have to do those awkward overrides and setters. - sw.SetNodeInfo(nodeInfo) - - return sw -} - -func testInboundPeerConn( - transport *MConnTransport, - conn net.Conn, -) (peerConn, error) { - return testPeerConn(transport, conn, false, false) -} - -func testPeerConn( - transport *MConnTransport, - rawConn net.Conn, - outbound, persistent bool, -) (pc peerConn, err error) { - - conn := newMConnConnection(transport.logger, rawConn, transport.mConnConfig, transport.channelDescs) - - return newPeerConn(outbound, persistent, conn), nil -} - -//---------------------------------------------------------------- -// rand node info - -func testNodeInfo(id types.NodeID, name string) types.NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") -} - -func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeInfo { - return types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - Network: network, - Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, - Moniker: name, - Other: types.NodeInfoOther{ - TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - }, - } -} - -func getFreePort() int { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return port -} - -type AddrBookMock struct { - Addrs map[string]struct{} - OurAddrs map[string]struct{} - PrivateAddrs map[string]struct{} -} - -var _ AddrBook = (*AddrBookMock)(nil) - -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.Addrs[addr.String()] = struct{}{} - return nil -} -func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } -func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.OurAddrs[addr.String()] - return ok -} -func (book *AddrBookMock) MarkGood(types.NodeID) {} -func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.Addrs[addr.String()] - return ok -} -func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.Addrs, addr.String()) -} -func (book *AddrBookMock) Save() {} -func (book *AddrBookMock) AddPrivateIDs(addrs []string) { - for _, addr := range addrs { - book.PrivateAddrs[addr] = struct{}{} - } -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index a3245dfc8..0b2311fa3 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) //go:generate ../../scripts/mockery_generate.sh Transport|Connection @@ -20,14 +19,6 @@ const ( defaultProtocol Protocol = MConnProtocol ) -// defaultProtocolVersion populates the Block and P2P versions using -// the global values, but not the App. -var defaultProtocolVersion = types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, -} - // Protocol identifies a transport protocol. type Protocol string diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index ac49bbd31..d17796fff 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -1,13 +1,9 @@ package core import ( - "errors" - - "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. @@ -58,52 +54,28 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.Re // Get Peer consensus states. var peerStates []coretypes.PeerStateInfo - switch { - case env.P2PPeers != nil: - peers := env.P2PPeers.Peers().List() - peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) - for _, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState) - if !ok { // peer does not have a state yet - continue - } - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } + peers := env.PeerManager.Peers() + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) + for _, pid := range peers { + peerState, ok := env.ConsensusReactor.GetPeerState(pid) + if !ok { + continue + } + + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + + addr := env.PeerManager.Addresses(pid) + if len(addr) != 0 { peerStates = append(peerStates, coretypes.PeerStateInfo{ // Peer basic info. - NodeAddress: peer.SocketAddr().String(), + NodeAddress: addr[0].String(), // Peer consensus state. PeerState: peerStateJSON, }) } - case env.PeerManager != nil: - peers := env.PeerManager.Peers() - peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) - for _, pid := range peers { - peerState, ok := env.ConsensusReactor.GetPeerState(pid) - if !ok { - continue - } - - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - - addr := env.PeerManager.Addresses(pid) - if len(addr) >= 1 { - peerStates = append(peerStates, coretypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: addr[0].String(), - // Peer consensus state. - PeerState: peerStateJSON, - }) - } - } - default: - return nil, errors.New("no peer system configured") } // Get self round state. diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 11b138eac..f05c34f14 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -51,14 +51,6 @@ type transport interface { NodeInfo() types.NodeInfo } -type peers interface { - AddPersistentPeers([]string) error - AddUnconditionalPeerIDs([]string) error - AddPrivatePeerIDs([]string) error - DialPeersAsync([]string) error - Peers() p2p.IPeerSet -} - type consensusReactor interface { WaitSync() bool GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) @@ -83,7 +75,6 @@ type Environment struct { EvidencePool sm.EvidencePool ConsensusState consensusState ConsensusReactor consensusReactor - P2PPeers peers // Legacy p2p stack P2PTransport transport diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index 8bcc04dd0..fdf4be69b 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -3,9 +3,7 @@ package core import ( "errors" "fmt" - "strings" - "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -13,33 +11,19 @@ import ( // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { - var peers []coretypes.Peer + peerList := env.PeerManager.Peers() - switch { - case env.P2PPeers != nil: - peersList := env.P2PPeers.Peers().List() - peers = make([]coretypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - peers = append(peers, coretypes.Peer{ - ID: peer.ID(), - URL: peer.SocketAddr().String(), - }) + peers := make([]coretypes.Peer, 0, len(peerList)) + for _, peer := range peerList { + addrs := env.PeerManager.Addresses(peer) + if len(addrs) == 0 { + continue } - case env.PeerManager != nil: - peerList := env.PeerManager.Peers() - for _, peer := range peerList { - addrs := env.PeerManager.Addresses(peer) - if len(addrs) == 0 { - continue - } - peers = append(peers, coretypes.Peer{ - ID: peer, - URL: addrs[0].String(), - }) - } - default: - return nil, errors.New("peer management system does not support NetInfo responses") + peers = append(peers, coretypes.Peer{ + ID: peer, + URL: addrs[0].String(), + }) } return &coretypes.ResultNetInfo{ @@ -50,70 +34,6 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo }, nil } -// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(seeds) == 0 { - return &coretypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", coretypes.ErrInvalidRequest) - } - env.Logger.Info("DialSeeds", "seeds", seeds) - if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { - return &coretypes.ResultDialSeeds{}, err - } - return &coretypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil -} - -// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), -// optionally making them persistent. -func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, - peers []string, - persistent, unconditional, private bool) (*coretypes.ResultDialPeers, error) { - - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(peers) == 0 { - return &coretypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", coretypes.ErrInvalidRequest) - } - - ids, err := getIDs(peers) - if err != nil { - return &coretypes.ResultDialPeers{}, err - } - - env.Logger.Info("DialPeers", "peers", peers, "persistent", - persistent, "unconditional", unconditional, "private", private) - - if persistent { - if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if private { - if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if unconditional { - if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if err := env.P2PPeers.DialPeersAsync(peers); err != nil { - return &coretypes.ResultDialPeers{}, err - } - - return &coretypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil -} - // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { @@ -145,18 +65,3 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*core Data: env.genChunks[id], }, nil } - -func getIDs(peers []string) ([]string, error) { - ids := make([]string, 0, len(peers)) - - for _, peer := range peers { - - spl := strings.Split(peer, "@") - if len(spl) != 2 { - return nil, p2p.ErrNetAddressNoID{Addr: peer} - } - ids = append(ids, spl[0]) - - } - return ids, nil -} diff --git a/internal/rpc/core/net_test.go b/internal/rpc/core/net_test.go deleted file mode 100644 index 7c12a69d6..000000000 --- a/internal/rpc/core/net_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package core - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - seeds []string - isErr bool - }{ - {[]string{}, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, - {[]string{"127.0.0.1:41198"}, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} - -func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - sw.SetAddrBook(&p2p.AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{}), - PrivateAddrs: make(map[string]struct{}), - }) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - peers []string - persistence, unconditional, private bool - isErr bool - }{ - {[]string{}, false, false, false, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false}, - {[]string{"127.0.0.1:41198"}, true, true, false, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 1eb50fe4e..73eaaf14c 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -55,7 +55,5 @@ func (env *Environment) GetRoutes() RoutesMap { // AddUnsafeRoutes adds unsafe routes. func (env *Environment) AddUnsafe(routes RoutesMap) { // control API - routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds", false) - routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private", false) routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false) } diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index bdd7ee1fa..b883c6dc2 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -58,6 +58,7 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, VotingPower: votingPower, } } + result := &coretypes.ResultStatus{ NodeInfo: env.P2PTransport.NodeInfo(), SyncInfo: coretypes.SyncInfo{ diff --git a/node/node.go b/node/node.go index 820e0630a..891c5fce6 100644 --- a/node/node.go +++ b/node/node.go @@ -54,10 +54,8 @@ type nodeImpl struct { // network transport *p2p.MConnTransport - sw *p2p.Switch // p2p connections peerManager *p2p.PeerManager router *p2p.Router - addrBook pex.AddrBook // known peers nodeInfo types.NodeInfo nodeKey types.NodeKey // our node privkey isListening bool @@ -292,14 +290,6 @@ func makeNode(cfg *config.Config, return nil, fmt.Errorf("could not create blockchain reactor: %w", err) } - // TODO: Remove this once the switch is removed. - var bcReactorForSwitch p2p.Reactor - if bcReactorShim != nil { - bcReactorForSwitch = bcReactorShim - } else { - bcReactorForSwitch = bcReactor.(p2p.Reactor) - } - // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { @@ -312,29 +302,15 @@ func makeNode(cfg *config.Config, // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - var ( - stateSyncReactor *statesync.Reactor - stateSyncReactorShim *p2p.ReactorShim - - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(stateSyncReactorShim) - peerUpdates = stateSyncReactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, statesync.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - stateSyncReactor = statesync.NewReactor( + ssLogger := logger.With("module", "statesync") + ssReactorShim := p2p.NewReactorShim(ssLogger, "StateSyncShim", statesync.ChannelShims) + channels := makeChannelsFromShims(router, statesync.ChannelShims) + peerUpdates := peerManager.Subscribe() + stateSyncReactor := statesync.NewReactor( genDoc.ChainID, genDoc.InitialHeight, *cfg.StateSync, - stateSyncReactorShim.Logger, + ssLogger, proxyApp.Snapshot(), proxyApp.Query(), channels[statesync.SnapshotChannel], @@ -353,10 +329,10 @@ func makeNode(cfg *config.Config, // transports can either be agnostic to channel descriptors or can be // declared in the constructor. transport.AddChannelDescriptors(mpReactorShim.GetChannels()) - transport.AddChannelDescriptors(bcReactorForSwitch.GetChannels()) + transport.AddChannelDescriptors(bcReactorShim.GetChannels()) transport.AddChannelDescriptors(csReactorShim.GetChannels()) transport.AddChannelDescriptors(evReactorShim.GetChannels()) - transport.AddChannelDescriptors(stateSyncReactorShim.GetChannels()) + transport.AddChannelDescriptors(ssReactorShim.GetChannels()) // Optionally, start the pex reactor // @@ -371,44 +347,14 @@ func makeNode(cfg *config.Config, // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) + var pexReactor service.Service pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if cfg.P2P.UseLegacy { - // setup Transport and Switch - sw = createSwitch( - cfg, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch, - stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) - } else { - addrBook = nil - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) - if err != nil { - return nil, err - } + pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) + if err != nil { + return nil, err } if cfg.RPC.PprofListenAddress != "" { @@ -424,10 +370,8 @@ func makeNode(cfg *config.Config, privValidator: privValidator, transport: transport, - sw: sw, peerManager: peerManager, router: router, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, @@ -456,7 +400,6 @@ func makeNode(cfg *config.Config, ConsensusReactor: csReactor, BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), - P2PPeers: sw, PeerManager: peerManager, GenDoc: genDoc, @@ -468,17 +411,6 @@ func makeNode(cfg *config.Config, }, } - // this is a terrible, because typed nil interfaces are not == - // nil, so this is just cleanup to avoid having a non-nil - // value in the RPC environment that has the semantic - // properties of nil. - if sw == nil { - node.rpcEnv.P2PPeers = nil - } else if peerManager == nil { - node.rpcEnv.PeerManager = nil - } - // end hack - node.rpcEnv.P2PTransport = node node.BaseService = *service.NewBaseService(logger, "Node", node) @@ -525,11 +457,7 @@ func makeSeedNode(cfg *config.Config, return nil, fmt.Errorf("failed to create router: %w", err) } - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) + var pexReactor service.Service // add the pex reactor // FIXME: we add channel descriptors to both the router and the transport but only the router @@ -538,33 +466,9 @@ func makeSeedNode(cfg *config.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if cfg.P2P.UseLegacy { - sw = createSwitch( - cfg, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) - } else { - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) - if err != nil { - return nil, err - } + pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) + if err != nil { + return nil, err } if cfg.RPC.PprofListenAddress != "" { @@ -579,8 +483,6 @@ func makeSeedNode(cfg *config.Config, genesisDoc: genDoc, transport: transport, - sw: sw, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, peerManager: peerManager, @@ -627,15 +529,8 @@ func (n *nodeImpl) OnStart() error { } n.isListening = true - n.Logger.Info("p2p service", "legacy_enabled", n.config.P2P.UseLegacy) - if n.config.P2P.UseLegacy { - // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - if err = n.sw.Start(); err != nil { - return err - } - } else if err = n.router.Start(); err != nil { + if err = n.router.Start(); err != nil { return err } @@ -667,13 +562,7 @@ func (n *nodeImpl) OnStart() error { } } - if n.config.P2P.UseLegacy { - // Always connect to persistent peers - err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) - } - } else if err := n.pexReactor.Start(); err != nil { + if err := n.pexReactor.Start(); err != nil { return err } @@ -794,14 +683,8 @@ func (n *nodeImpl) OnStop() { n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) } - if n.config.P2P.UseLegacy { - if err := n.sw.Stop(); err != nil { - n.Logger.Error("failed to stop switch", "err", err) - } - } else { - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) - } + if err := n.router.Stop(); err != nil { + n.Logger.Error("failed to stop router", "err", err) } if err := n.transport.Close(); err != nil { @@ -1216,12 +1099,3 @@ func makeChannelsFromShims( return channels } - -func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Channel { - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID := range reactorShim.Channels { - channels[chID] = reactorShim.GetChannel(chID) - } - - return channels -} diff --git a/node/setup.go b/node/setup.go index 16aa715c9..8889edc4e 100644 --- a/node/setup.go +++ b/node/setup.go @@ -2,16 +2,13 @@ package node import ( "bytes" - "context" "fmt" "math" - "net" "time" dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" @@ -161,18 +158,8 @@ func createMempoolReactor( channelShims := mempoolv0.GetChannelShims(cfg.Mempool) reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, channelShims) - peerUpdates = peerManager.Subscribe() - } + channels := makeChannelsFromShims(router, channelShims) + peerUpdates := peerManager.Subscribe() switch cfg.Mempool.Version { case config.MempoolV0: @@ -255,23 +242,10 @@ func createEvidenceReactor( return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, evidence.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - evidenceReactor := evidence.NewReactor( logger, - channels[evidence.EvidenceChannel], - peerUpdates, + makeChannelsFromShims(router, evidence.ChannelShims)[evidence.EvidenceChannel], + peerManager.Subscribe(), evidencePool, ) @@ -294,19 +268,8 @@ func createBlockchainReactor( logger = logger.With("module", "blockchain") reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } + channels := makeChannelsFromShims(router, bcv0.ChannelShims) + peerUpdates := peerManager.Subscribe() reactor, err := bcv0.NewReactor( logger, state.Copy(), blockExec, blockStore, csReactor, @@ -357,13 +320,8 @@ func createConsensusReactor( peerUpdates *p2p.PeerUpdates ) - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, consensus.ChannelShims) - peerUpdates = peerManager.Subscribe() - } + channels = makeChannelsFromShims(router, consensus.ChannelShims) + peerUpdates = peerManager.Subscribe() reactor := consensus.NewReactor( logger, @@ -500,142 +458,6 @@ func createRouter( ) } -func createSwitch( - cfg *config.Config, - transport p2p.Transport, - p2pMetrics *p2p.Metrics, - mempoolReactor *p2p.ReactorShim, - bcReactor p2p.Reactor, - stateSyncReactor *p2p.ReactorShim, - consensusReactor *p2p.ReactorShim, - evidenceReactor *p2p.ReactorShim, - proxyApp proxy.AppConns, - nodeInfo types.NodeInfo, - nodeKey types.NodeKey, - p2pLogger log.Logger, -) *p2p.Switch { - - var ( - connFilters = []p2p.ConnFilterFunc{} - peerFilters = []p2p.PeerFilterFunc{} - ) - - if !cfg.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) - } - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - if cfg.FilterPeers { - connFilters = append( - connFilters, - // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - - peerFilters = append( - peerFilters, - // ABCI query for ID filtering. - func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - } - - sw := p2p.NewSwitch( - cfg.P2P, - transport, - p2p.WithMetrics(p2pMetrics), - p2p.SwitchPeerFilters(peerFilters...), - p2p.SwitchConnFilters(connFilters...), - ) - - sw.SetLogger(p2pLogger) - if cfg.Mode != config.ModeSeed { - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) - } - - sw.SetNodeInfo(nodeInfo) - sw.SetNodeKey(nodeKey) - - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", cfg.NodeKeyFile()) - return sw -} - -func createAddrBookAndSetOnSwitch(cfg *config.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { - - addrBook := pex.NewAddrBook(cfg.P2P.AddrBookFile(), cfg.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", cfg.P2P.AddrBookFile())) - - // Add ourselves to addrbook to prevent dialing ourselves - if cfg.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ExternalAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - if cfg.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - - sw.SetAddrBook(addrBook) - - return addrBook, nil -} - -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, cfg *config.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - - reactorConfig := &pex.ReactorConfig{ - Seeds: tmstrings.SplitAndTrimEmpty(cfg.P2P.Seeds, ",", " "), - SeedMode: cfg.Mode == config.ModeSeed, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: cfg.P2P.PersistentPeersMaxDialPeriod, - } - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, reactorConfig) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) - return pexReactor -} - func createPEXReactorV2( cfg *config.Config, logger log.Logger, diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 85302691d..108510b02 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -136,20 +136,6 @@ func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(c.ctx, seeds) -} - -func (c *Local) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*coretypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) -} - func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 4b858d067..a1a42e28d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -131,20 +131,6 @@ func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) -} - -func (c Client) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*coretypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) -} - func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index deffb533a..5c47faa23 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,7 +15,6 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode}, "queueType": {"priority"}, // "fifo", "wdrr" "initialHeight": {0, 1000}, "initialState": { @@ -71,19 +70,6 @@ var ( // Generate generates random testnets using the given RNG. func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} - switch opts.P2P { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - defer func() { - // avoid modifying the global state. - original := make([]interface{}, len(testnetCombinations["p2p"])) - copy(original, testnetCombinations["p2p"]) - testnetCombinations["p2p"] = original - }() - - testnetCombinations["p2p"] = []interface{}{opts.P2P} - case MixedP2PMode: - testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} - } for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) @@ -95,12 +81,6 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { continue } - if len(manifest.Nodes) == 1 { - if opt["p2p"] == HybridP2PMode { - continue - } - } - if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize { continue } @@ -116,20 +96,9 @@ type Options struct { MaxNetworkSize int NumGroups int Directory string - P2P P2PMode Reverse bool } -type P2PMode string - -const ( - NewP2PMode P2PMode = "new" - LegacyP2PMode P2PMode = "legacy" - HybridP2PMode P2PMode = "hybrid" - // mixed means that all combination are generated - MixedP2PMode P2PMode = "mixed" -) - // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ @@ -145,13 +114,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er TxSize: int64(txSize.Choose(r).(int)), } - p2pMode := opt["p2p"].(P2PMode) - switch p2pMode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - default: - return manifest, fmt.Errorf("unknown p2p mode %s", p2pMode) - } - var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": @@ -168,27 +130,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) } - const legacyP2PFactor float64 = 0.5 - // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { node := generateNode(r, manifest, e2e.ModeSeed, 0, false) - - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() < legacyP2PFactor - } - manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node } - var ( - numSyncingNodes = 0 - hybridNumNew = 0 - hybridNumLegacy = 0 - ) + var numSyncingNodes = 0 // Next, we generate validators. We make sure a BFT quorum of validators start // at the initial height, and that we have two archive nodes. We also set up @@ -205,29 +153,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er name := fmt.Sprintf("validator%02d", i) node := generateNode(r, manifest, e2e.ModeValidator, startAt, i <= 2) - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() < legacyP2PFactor - if node.UseLegacyP2P { - hybridNumLegacy++ - if hybridNumNew == 0 { - hybridNumNew++ - hybridNumLegacy-- - node.UseLegacyP2P = false - } - } else { - hybridNumNew++ - if hybridNumLegacy == 0 { - hybridNumNew-- - hybridNumLegacy++ - node.UseLegacyP2P = true - - } - } - } - manifest.Nodes[name] = node if startAt == 0 { @@ -259,13 +184,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er } node := generateNode(r, manifest, e2e.ModeFull, startAt, false) - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() > legacyP2PFactor - } - manifest.Nodes[fmt.Sprintf("full%02d", i)] = node } @@ -336,13 +254,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er r, startAt+(5*int64(i)), lightProviders, ) - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() < legacyP2PFactor - } - manifest.Nodes[fmt.Sprintf("light%02d", i)] = node } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index c38b6b20b..b7f790259 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -5,15 +5,14 @@ import ( "math/rand" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) func TestGenerator(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: MixedP2PMode}) + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{}) require.NoError(t, err) - require.True(t, len(manifests) >= 64, "insufficient combinations") + require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests)) // this just means that the numbers reported by the test // failures map to the test cases that you'd see locally. @@ -41,71 +40,4 @@ func TestGenerator(t *testing.T) { require.True(t, numStateSyncs <= 2) }) } - - t.Run("Hybrid", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: HybridP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - require.True(t, len(m.Nodes) > 1) - - var numLegacy, numNew int - for _, node := range m.Nodes { - if node.UseLegacyP2P { - numLegacy++ - } else { - numNew++ - } - } - - assert.True(t, numLegacy >= 1, "not enough legacy nodes [%d/%d]", - numLegacy, len(m.Nodes)) - assert.True(t, numNew >= 1, "not enough new nodes [%d/%d]", - numNew, len(m.Nodes)) - }) - } - }) - t.Run("UnmixedP2P", func(t *testing.T) { - t.Run("New", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: NewP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - for name, node := range m.Nodes { - t.Run(name, func(t *testing.T) { - require.False(t, node.UseLegacyP2P) - }) - } - }) - } - }) - t.Run("Legacy", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: LegacyP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - for name, node := range m.Nodes { - t.Run(name, func(t *testing.T) { - require.True(t, node.UseLegacyP2P) - }) - } - }) - } - }) - }) } diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 4668f6a8f..38f36d0da 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -38,20 +38,6 @@ func NewCLI() *CLI { SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - var err error - - p2pMode, err := cmd.Flags().GetString("p2p") - if err != nil { - return err - } - - switch mode := P2PMode(p2pMode); mode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - cli.opts.P2P = mode - default: - return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) - } - return cli.generate() }, } @@ -60,8 +46,6 @@ func NewCLI() *CLI { _ = cli.root.MarkPersistentFlagRequired("dir") cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order") cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups") - cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), - "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1, "Minimum network size (nodes)") cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 6eb32e56f..f73a18859 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -1,7 +1,6 @@ # This testnet is run by CI, and attempts to cover a broad range of # functionality with a single network. -disable_legacy_p2p = false evidence = 5 initial_height = 1000 initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 4492f4037..d5c9cb7f2 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -145,9 +145,6 @@ type ManifestNode struct { // This is helpful when debugging a specific problem. This overrides the network // level. LogLevel string `toml:"log_level"` - - // UseLegacyP2P enables use of the legacy p2p layer for this node. - UseLegacyP2P bool `toml:"use_legacy_p2p"` } // Stateless reports whether m is a node that does not own state, including light and seed nodes. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index b54dd2bf0..3a75b169e 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -96,7 +96,6 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation LogLevel string - UseLegacyP2P bool QueueType string HasStarted bool } @@ -182,7 +181,6 @@ func LoadTestnet(file string) (*Testnet, error) { Perturbations: []Perturbation{}, LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, - UseLegacyP2P: nodeManifest.UseLegacyP2P, } if node.StartAt == testnet.InitialHeight { diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 5a8407ab2..3b1184e9c 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -238,7 +238,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) cfg.P2P.AddrBookStrict = false - cfg.P2P.UseLegacy = node.UseLegacyP2P cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second @@ -354,7 +353,6 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "snapshot_interval": node.SnapshotInterval, "retain_blocks": node.RetainBlocks, "key_type": node.PrivvalKey.Type(), - "use_legacy_p2p": node.UseLegacyP2P, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go deleted file mode 100644 index 6d5548fc7..000000000 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ /dev/null @@ -1,35 +0,0 @@ -// nolint: gosec -package addrbook - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" -) - -var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true) - -func Fuzz(data []byte) int { - addr := new(p2p.NetAddress) - if err := json.Unmarshal(data, addr); err != nil { - return -1 - } - - // Fuzz AddAddress. - err := addrBook.AddAddress(addr, addr) - if err != nil { - return 0 - } - - // Also, make sure PickAddress always returns a non-nil address. - bias := rand.Intn(100) - if p := addrBook.PickAddress(bias); p == nil { - panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", - bias, addrBook.Size())) - } - - return 1 -} diff --git a/test/fuzz/p2p/addrbook/fuzz_test.go b/test/fuzz/p2p/addrbook/fuzz_test.go deleted file mode 100644 index 4ec7aebd9..000000000 --- a/test/fuzz/p2p/addrbook/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package addrbook_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/addrbook" -) - -const testdataCasesDir = "testdata/cases" - -func TestAddrbookTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - addrbook.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go deleted file mode 100644 index 1166f9bd7..000000000 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ /dev/null @@ -1,59 +0,0 @@ -// nolint: gosec -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(baseDir string) { - log.SetFlags(0) - - // create "corpus" directory - corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - - // create corpus - privKey := ed25519.GenPrivKey() - addrs := []*p2p.NetAddress{ - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656}, - } - - for i, addr := range addrs { - filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i)) - - bz, err := json.Marshal(addr) - if err != nil { - log.Fatalf("can't marshal %v: %v", addr, err) - } - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %v to %q: %v", addr, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/addrbook/testdata/cases/empty b/test/fuzz/p2p/addrbook/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/fuzz/p2p/pex/fuzz_test.go b/test/fuzz/p2p/pex/fuzz_test.go deleted file mode 100644 index 8a194e730..000000000 --- a/test/fuzz/p2p/pex/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package pex_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/pex" -) - -const testdataCasesDir = "testdata/cases" - -func TestPexTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - pex.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go deleted file mode 100644 index e90216864..000000000 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ /dev/null @@ -1,84 +0,0 @@ -// nolint: gosec -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "math/rand" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(rootDir string) { - log.SetFlags(0) - - corpusDir := filepath.Join(rootDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - sizes := []int{0, 1, 2, 17, 5, 31} - - // Make the PRNG predictable - rand.Seed(10) - - for _, n := range sizes { - var addrs []*p2p.NetAddress - - // IPv4 addresses - for i := 0; i < n; i++ { - privKey := ed25519.GenPrivKey() - addr := fmt.Sprintf( - "%s@%v.%v.%v.%v:26656", - types.NodeIDFromPubKey(privKey.PubKey()), - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - ) - netAddr, _ := types.NewNetAddressString(addr) - addrs = append(addrs, netAddr) - } - - // IPv6 addresses - privKey := ed25519.GenPrivKey() - ipv6a, err := types.NewNetAddressString( - fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey()))) - if err != nil { - log.Fatalf("can't create a new netaddress: %v", err) - } - addrs = append(addrs, ipv6a) - - msg := tmp2p.PexMessage{ - Sum: &tmp2p.PexMessage_PexResponse{ - PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)}, - }, - } - bz, err := msg.Marshal() - if err != nil { - log.Fatalf("unable to marshal: %v", err) - } - - filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %X to %q: %v", bz, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go deleted file mode 100644 index 388361a4e..000000000 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ /dev/null @@ -1,95 +0,0 @@ -package pex - -import ( - "net" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" -) - -var ( - pexR *pex.Reactor - peer p2p.Peer - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) - -func init() { - addrB := pex.NewAddrBook("./testdata/addrbook1", false) - pexR = pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false}) - pexR.SetLogger(logger) - peer = newFuzzPeer() - pexR.AddPeer(peer) - - cfg := config.DefaultP2PConfig() - cfg.PexReactor = true - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - return sw - }, logger) - pexR.SetSwitch(sw) -} - -func Fuzz(data []byte) int { - if len(data) == 0 { - return -1 - } - - pexR.Receive(pex.PexChannel, peer, data) - - if !peer.IsRunning() { - // do not increase priority for msgs which lead to peer being stopped - return 0 - } - - return 1 -} - -type fuzzPeer struct { - *service.BaseService - m map[string]interface{} -} - -var _ p2p.Peer = (*fuzzPeer)(nil) - -func newFuzzPeer() *fuzzPeer { - fp := &fuzzPeer{m: make(map[string]interface{})} - fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp) - return fp -} - -var privKey = ed25519.GenPrivKey() -var nodeID = types.NodeIDFromPubKey(privKey.PubKey()) -var defaultNodeInfo = types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, - }, - NodeID: nodeID, - ListenAddr: "127.0.0.1:0", - Moniker: "foo1", -} - -func (fp *fuzzPeer) FlushStop() {} -func (fp *fuzzPeer) ID() types.NodeID { return nodeID } -func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) } -func (fp *fuzzPeer) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""} -} -func (fp *fuzzPeer) IsOutbound() bool { return false } -func (fp *fuzzPeer) IsPersistent() bool { return false } -func (fp *fuzzPeer) CloseConn() error { return nil } -func (fp *fuzzPeer) NodeInfo() types.NodeInfo { return defaultNodeInfo } -func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } -func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { - return types.NewNetAddress(fp.ID(), fp.RemoteAddr()) -} -func (fp *fuzzPeer) Send(byte, []byte) bool { return true } -func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } -func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } -func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } diff --git a/test/fuzz/p2p/pex/testdata/addrbook1 b/test/fuzz/p2p/pex/testdata/addrbook1 deleted file mode 100644 index acf3e721d..000000000 --- a/test/fuzz/p2p/pex/testdata/addrbook1 +++ /dev/null @@ -1,1705 +0,0 @@ -{ - "Key": "badd73ebd4eeafbaefc01e0c", - "Addrs": [ - { - "Addr": { - "IP": "233.174.138.192", - "Port": 48186 - }, - "Src": { - "IP": "198.37.90.115", - "Port": 29492 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692278-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 55 - ] - }, - { - "Addr": { - "IP": "181.28.96.104", - "Port": 26776 - }, - "Src": { - "IP": "183.12.35.241", - "Port": 26794 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692289-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "141.85.194.118", - "Port": 39768 - }, - "Src": { - "IP": "120.130.90.63", - "Port": 61750 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692383-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 185 - ] - }, - { - "Addr": { - "IP": "167.72.9.155", - "Port": 9542 - }, - "Src": { - "IP": "95.158.40.108", - "Port": 14929 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692604-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 250 - ] - }, - { - "Addr": { - "IP": "124.118.94.27", - "Port": 50333 - }, - "Src": { - "IP": "208.169.57.96", - "Port": 19754 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692046-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 0 - ] - }, - { - "Addr": { - "IP": "158.197.4.226", - "Port": 25979 - }, - "Src": { - "IP": "3.129.219.107", - "Port": 50374 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692211-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "170.42.135.37", - "Port": 34524 - }, - "Src": { - "IP": "73.125.53.212", - "Port": 49691 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692241-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 14 - ] - }, - { - "Addr": { - "IP": "234.69.254.147", - "Port": 31885 - }, - "Src": { - "IP": "167.106.61.34", - "Port": 22187 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692609-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 213 - ] - }, - { - "Addr": { - "IP": "32.176.173.90", - "Port": 17250 - }, - "Src": { - "IP": "118.91.243.12", - "Port": 26781 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692273-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 35 - ] - }, - { - "Addr": { - "IP": "162.154.114.145", - "Port": 13875 - }, - "Src": { - "IP": "198.178.108.166", - "Port": 59623 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692373-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 216 - ] - }, - { - "Addr": { - "IP": "67.128.167.93", - "Port": 50513 - }, - "Src": { - "IP": "104.93.115.28", - "Port": 48298 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692399-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "132.175.221.206", - "Port": 61037 - }, - "Src": { - "IP": "112.49.189.65", - "Port": 56186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692422-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 31 - ] - }, - { - "Addr": { - "IP": "155.49.24.238", - "Port": 26261 - }, - "Src": { - "IP": "97.10.121.246", - "Port": 8694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692473-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 23 - ] - }, - { - "Addr": { - "IP": "22.215.7.233", - "Port": 32487 - }, - "Src": { - "IP": "214.236.105.23", - "Port": 26870 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692572-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 255 - ] - }, - { - "Addr": { - "IP": "253.170.228.231", - "Port": 5002 - }, - "Src": { - "IP": "225.49.137.209", - "Port": 16908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692619-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 161 - ] - }, - { - "Addr": { - "IP": "162.126.204.39", - "Port": 62618 - }, - "Src": { - "IP": "250.214.168.131", - "Port": 3237 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69203-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 7 - ] - }, - { - "Addr": { - "IP": "83.154.228.215", - "Port": 23508 - }, - "Src": { - "IP": "66.33.77.170", - "Port": 52207 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692153-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "132.49.63.65", - "Port": 53651 - }, - "Src": { - "IP": "250.164.163.212", - "Port": 8612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692253-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 98 - ] - }, - { - "Addr": { - "IP": "200.168.34.12", - "Port": 61901 - }, - "Src": { - "IP": "133.185.186.115", - "Port": 14186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692488-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 214 - ] - }, - { - "Addr": { - "IP": "31.93.45.219", - "Port": 61036 - }, - "Src": { - "IP": "176.191.214.170", - "Port": 33402 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692024-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 68 - ] - }, - { - "Addr": { - "IP": "250.189.27.93", - "Port": 51665 - }, - "Src": { - "IP": "93.161.116.107", - "Port": 53482 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692196-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 42 - ] - }, - { - "Addr": { - "IP": "50.7.17.126", - "Port": 64300 - }, - "Src": { - "IP": "233.234.64.214", - "Port": 61061 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692444-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 13 - ] - }, - { - "Addr": { - "IP": "88.85.81.64", - "Port": 34834 - }, - "Src": { - "IP": "4.240.150.250", - "Port": 63064 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692248-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 195 - ] - }, - { - "Addr": { - "IP": "242.117.244.198", - "Port": 4363 - }, - "Src": { - "IP": "149.29.34.42", - "Port": 62567 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692263-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "245.155.175.114", - "Port": 37262 - }, - "Src": { - "IP": "75.85.36.49", - "Port": 7101 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692313-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "224.184.241.26", - "Port": 55870 - }, - "Src": { - "IP": "52.15.194.216", - "Port": 4733 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692327-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "43.178.26.188", - "Port": 55914 - }, - "Src": { - "IP": "103.250.250.35", - "Port": 1566 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692577-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "102.117.172.117", - "Port": 35855 - }, - "Src": { - "IP": "114.152.204.187", - "Port": 21156 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692158-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "39.33.41.199", - "Port": 51600 - }, - "Src": { - "IP": "119.65.88.38", - "Port": 41239 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692188-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 24 - ] - }, - { - "Addr": { - "IP": "63.164.56.227", - "Port": 1660 - }, - "Src": { - "IP": "169.54.47.92", - "Port": 2818 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692227-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 10 - ] - }, - { - "Addr": { - "IP": "50.183.223.115", - "Port": 26910 - }, - "Src": { - "IP": "115.98.199.4", - "Port": 8767 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692201-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "132.94.203.167", - "Port": 53156 - }, - "Src": { - "IP": "17.195.234.168", - "Port": 29405 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692294-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "135.194.230.212", - "Port": 14340 - }, - "Src": { - "IP": "160.2.241.10", - "Port": 36553 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692363-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 63 - ] - }, - { - "Addr": { - "IP": "116.53.200.25", - "Port": 27092 - }, - "Src": { - "IP": "219.104.163.247", - "Port": 50476 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692543-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "125.77.44.185", - "Port": 55291 - }, - "Src": { - "IP": "77.15.232.117", - "Port": 6934 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692589-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "27.221.35.172", - "Port": 26418 - }, - "Src": { - "IP": "252.18.49.70", - "Port": 9835 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692068-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "133.225.167.135", - "Port": 59468 - }, - "Src": { - "IP": "110.223.163.74", - "Port": 22576 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69213-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 164 - ] - }, - { - "Addr": { - "IP": "155.131.178.240", - "Port": 60476 - }, - "Src": { - "IP": "143.82.157.1", - "Port": 43821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692173-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - }, - { - "Addr": { - "IP": "207.13.48.52", - "Port": 28549 - }, - "Src": { - "IP": "238.224.177.29", - "Port": 44100 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692594-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 113 - ] - }, - { - "Addr": { - "IP": "91.137.2.184", - "Port": 44887 - }, - "Src": { - "IP": "72.131.70.84", - "Port": 29960 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692627-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "169.59.252.76", - "Port": 57711 - }, - "Src": { - "IP": "194.132.91.119", - "Port": 18037 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692478-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 124 - ] - }, - { - "Addr": { - "IP": "25.174.143.229", - "Port": 41540 - }, - "Src": { - "IP": "58.215.132.148", - "Port": 64950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692534-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 88 - ] - }, - { - "Addr": { - "IP": "71.239.78.239", - "Port": 46938 - }, - "Src": { - "IP": "156.98.186.169", - "Port": 32046 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692116-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 154 - ] - }, - { - "Addr": { - "IP": "94.137.107.61", - "Port": 20756 - }, - "Src": { - "IP": "101.201.138.179", - "Port": 22877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692414-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 233 - ] - }, - { - "Addr": { - "IP": "216.62.174.112", - "Port": 60162 - }, - "Src": { - "IP": "225.114.119.144", - "Port": 1575 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692464-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 132 - ] - }, - { - "Addr": { - "IP": "65.183.81.125", - "Port": 17511 - }, - "Src": { - "IP": "12.96.14.61", - "Port": 42308 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692308-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 153 - ] - }, - { - "Addr": { - "IP": "142.26.87.52", - "Port": 41967 - }, - "Src": { - "IP": "60.124.157.139", - "Port": 20727 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692321-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 29 - ] - }, - { - "Addr": { - "IP": "13.77.198.44", - "Port": 54508 - }, - "Src": { - "IP": "142.73.70.174", - "Port": 19525 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692553-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 170 - ] - }, - { - "Addr": { - "IP": "63.192.219.12", - "Port": 46603 - }, - "Src": { - "IP": "26.136.66.29", - "Port": 38924 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692558-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 203 - ] - }, - { - "Addr": { - "IP": "120.82.251.151", - "Port": 43723 - }, - "Src": { - "IP": "136.104.122.219", - "Port": 47452 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692599-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 103 - ] - }, - { - "Addr": { - "IP": "74.79.96.159", - "Port": 46646 - }, - "Src": { - "IP": "218.60.242.116", - "Port": 5361 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692145-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "194.65.211.174", - "Port": 43464 - }, - "Src": { - "IP": "87.5.112.153", - "Port": 56348 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692163-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "237.158.179.80", - "Port": 32231 - }, - "Src": { - "IP": "210.240.52.244", - "Port": 29142 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692183-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 27 - ] - }, - { - "Addr": { - "IP": "81.157.122.4", - "Port": 9917 - }, - "Src": { - "IP": "213.226.144.152", - "Port": 29950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692614-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 33 - ] - }, - { - "Addr": { - "IP": "180.147.73.220", - "Port": 367 - }, - "Src": { - "IP": "32.229.253.215", - "Port": 62165 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692529-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "83.110.235.17", - "Port": 33231 - }, - "Src": { - "IP": "230.54.162.85", - "Port": 51569 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692563-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 234 - ] - }, - { - "Addr": { - "IP": "100.252.20.2", - "Port": 1633 - }, - "Src": { - "IP": "52.136.47.198", - "Port": 31916 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692644-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 254 - ] - }, - { - "Addr": { - "IP": "74.5.247.79", - "Port": 18703 - }, - "Src": { - "IP": "200.247.68.128", - "Port": 55844 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692378-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 160 - ] - }, - { - "Addr": { - "IP": "17.220.231.87", - "Port": 59015 - }, - "Src": { - "IP": "54.207.49.4", - "Port": 17877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692404-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "156.194.57.127", - "Port": 18944 - }, - "Src": { - "IP": "154.94.235.84", - "Port": 61610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692439-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 32 - ] - }, - { - "Addr": { - "IP": "137.57.172.158", - "Port": 32031 - }, - "Src": { - "IP": "144.160.225.126", - "Port": 43225 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692568-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 4 - ] - }, - { - "Addr": { - "IP": "101.220.101.200", - "Port": 26480 - }, - "Src": { - "IP": "130.225.42.1", - "Port": 2522 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692637-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 173 - ] - }, - { - "Addr": { - "IP": "136.233.185.164", - "Port": 34011 - }, - "Src": { - "IP": "112.127.216.43", - "Port": 55317 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692649-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "101.189.107.148", - "Port": 28671 - }, - "Src": { - "IP": "213.55.140.235", - "Port": 2547 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692178-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 72 - ] - }, - { - "Addr": { - "IP": "61.190.60.64", - "Port": 58467 - }, - "Src": { - "IP": "206.86.120.31", - "Port": 54422 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692358-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 191 - ] - }, - { - "Addr": { - "IP": "227.51.127.223", - "Port": 52754 - }, - "Src": { - "IP": "124.24.12.47", - "Port": 59878 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692393-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 122 - ] - }, - { - "Addr": { - "IP": "101.19.152.238", - "Port": 47491 - }, - "Src": { - "IP": "211.30.216.184", - "Port": 17610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692135-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "182.198.35.238", - "Port": 15065 - }, - "Src": { - "IP": "239.67.104.149", - "Port": 43039 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692268-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 179 - ] - }, - { - "Addr": { - "IP": "233.12.68.51", - "Port": 47544 - }, - "Src": { - "IP": "203.224.119.48", - "Port": 23337 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692454-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 51 - ] - }, - { - "Addr": { - "IP": "181.30.35.80", - "Port": 500 - }, - "Src": { - "IP": "174.200.32.161", - "Port": 10174 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692503-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 69 - ] - }, - { - "Addr": { - "IP": "49.104.89.21", - "Port": 54774 - }, - "Src": { - "IP": "245.95.238.161", - "Port": 14339 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692654-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 120 - ] - }, - { - "Addr": { - "IP": "65.150.169.199", - "Port": 11589 - }, - "Src": { - "IP": "150.110.183.207", - "Port": 17694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692041-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 182 - ] - }, - { - "Addr": { - "IP": "84.203.198.48", - "Port": 47122 - }, - "Src": { - "IP": "141.209.147.221", - "Port": 26085 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692056-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "220.10.106.180", - "Port": 27439 - }, - "Src": { - "IP": "124.170.244.46", - "Port": 5249 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692125-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 166 - ] - }, - { - "Addr": { - "IP": "120.208.32.34", - "Port": 27224 - }, - "Src": { - "IP": "64.194.118.103", - "Port": 24388 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69251-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 149 - ] - }, - { - "Addr": { - "IP": "245.182.67.231", - "Port": 58067 - }, - "Src": { - "IP": "62.108.238.220", - "Port": 41851 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692522-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "50.81.160.105", - "Port": 8113 - }, - "Src": { - "IP": "129.187.68.121", - "Port": 58612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692284-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 94 - ] - }, - { - "Addr": { - "IP": "101.116.47.155", - "Port": 20287 - }, - "Src": { - "IP": "94.34.167.170", - "Port": 41821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692299-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 134 - ] - }, - { - "Addr": { - "IP": "159.253.213.86", - "Port": 5222 - }, - "Src": { - "IP": "124.47.162.125", - "Port": 45742 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692429-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 25 - ] - }, - { - "Addr": { - "IP": "124.72.81.213", - "Port": 35723 - }, - "Src": { - "IP": "201.65.186.55", - "Port": 26602 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692493-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "77.216.197.130", - "Port": 49129 - }, - "Src": { - "IP": "245.160.14.27", - "Port": 38908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692517-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 58 - ] - }, - { - "Addr": { - "IP": "175.46.154.0", - "Port": 15297 - }, - "Src": { - "IP": "6.10.7.13", - "Port": 9657 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692583-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 89 - ] - }, - { - "Addr": { - "IP": "176.71.131.235", - "Port": 14342 - }, - "Src": { - "IP": "1.36.215.198", - "Port": 21709 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692206-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "34.211.134.186", - "Port": 31608 - }, - "Src": { - "IP": "187.87.12.183", - "Port": 32977 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692221-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 71 - ] - }, - { - "Addr": { - "IP": "238.63.227.107", - "Port": 49502 - }, - "Src": { - "IP": "185.51.127.143", - "Port": 22728 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692483-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 61 - ] - }, - { - "Addr": { - "IP": "160.65.76.45", - "Port": 27307 - }, - "Src": { - "IP": "170.175.198.16", - "Port": 44759 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692051-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 36 - ] - }, - { - "Addr": { - "IP": "152.22.79.90", - "Port": 25861 - }, - "Src": { - "IP": "216.183.31.190", - "Port": 9185 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692409-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 163 - ] - }, - { - "Addr": { - "IP": "200.2.175.37", - "Port": 57270 - }, - "Src": { - "IP": "108.20.254.94", - "Port": 32812 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692434-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "111.16.237.10", - "Port": 45200 - }, - "Src": { - "IP": "215.82.246.115", - "Port": 42333 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692469-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "166.217.195.221", - "Port": 4579 - }, - "Src": { - "IP": "148.153.131.183", - "Port": 13848 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692498-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 78 - ] - }, - { - "Addr": { - "IP": "1.226.156.147", - "Port": 61660 - }, - "Src": { - "IP": "169.138.16.69", - "Port": 23455 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692548-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "108.209.27.58", - "Port": 59102 - }, - "Src": { - "IP": "140.27.139.90", - "Port": 52154 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692014-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 53 - ] - }, - { - "Addr": { - "IP": "221.244.202.95", - "Port": 5032 - }, - "Src": { - "IP": "230.152.141.80", - "Port": 19457 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692168-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "55.87.1.138", - "Port": 39686 - }, - "Src": { - "IP": "55.22.167.132", - "Port": 35663 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692258-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "209.53.148.74", - "Port": 18502 - }, - "Src": { - "IP": "195.108.121.25", - "Port": 16730 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692304-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 180 - ] - }, - { - "Addr": { - "IP": "21.66.206.236", - "Port": 10771 - }, - "Src": { - "IP": "236.195.50.16", - "Port": 30697 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692368-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 22 - ] - }, - { - "Addr": { - "IP": "190.87.236.91", - "Port": 58378 - }, - "Src": { - "IP": "72.224.218.34", - "Port": 44817 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692459-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 127 - ] - }, - { - "Addr": { - "IP": "197.172.79.170", - "Port": 24958 - }, - "Src": { - "IP": "71.22.4.12", - "Port": 28558 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692036-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "160.176.234.94", - "Port": 47013 - }, - "Src": { - "IP": "212.172.24.59", - "Port": 29594 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692062-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 99 - ] - }, - { - "Addr": { - "IP": "170.206.180.18", - "Port": 26212 - }, - "Src": { - "IP": "228.135.62.18", - "Port": 26164 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692234-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - } - ] -} diff --git a/test/fuzz/p2p/pex/testdata/cases/empty b/test/fuzz/p2p/pex/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 From 5703ae2fb3dc1aede7af118f8becfbb1ad392368 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 5 Oct 2021 20:19:12 +0200 Subject: [PATCH 067/174] e2e: automatically prune old app snapshots (#7034) This PR tackles the case of using the e2e application in a long lived testnet. The application continually saves snapshots (usually every 100 blocks) which after a while bloats the size of the application. This PR prunes older snapshots so that only the most recent 10 snapshots remain. --- test/e2e/app/app.go | 4 ++++ test/e2e/app/snapshots.go | 24 ++++++++++++++++++++++++ test/e2e/generator/generate.go | 2 +- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 0a0194c98..e7ba110e9 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -184,6 +184,10 @@ func (app *Application) Commit() abci.ResponseCommit { panic(err) } app.logger.Info("Created state sync snapshot", "height", snapshot.Height) + err = app.snapshots.Prune(maxSnapshotCount) + if err != nil { + app.logger.Error("Failed to prune snapshots", "err", err) + } } retainHeight := int64(0) if app.cfg.RetainBlocks > 0 { diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 4ef20375f..6a9c0e0dc 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -16,6 +16,9 @@ import ( const ( snapshotChunkSize = 1e6 + + // Keep only the most recent 10 snapshots. Older snapshots are pruned + maxSnapshotCount = 10 ) // SnapshotStore stores state sync snapshots. Snapshots are stored simply as @@ -105,6 +108,27 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { return snapshot, nil } +// Prune removes old snapshots ensuring only the most recent n snapshots remain +func (s *SnapshotStore) Prune(n int) error { + s.Lock() + defer s.Unlock() + // snapshots are appended to the metadata struct, hence pruning removes from + // the front of the array + i := 0 + for ; i < len(s.metadata)-n; i++ { + h := s.metadata[i].Height + if err := os.Remove(filepath.Join(s.dir, fmt.Sprintf("%v.json", h))); err != nil { + return err + } + } + + // update metadata by removing the deleted snapshots + pruned := make([]abci.Snapshot, len(s.metadata[i:])) + copy(pruned, s.metadata[i:]) + s.metadata = pruned + return nil +} + // List lists available snapshots. func (s *SnapshotStore) List() ([]*abci.Snapshot, error) { s.RLock() diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 5c47faa23..0a2d39665 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -53,7 +53,7 @@ var ( e2e.StateSyncRPC: 45, } nodePersistIntervals = uniformChoice{0, 1, 5} - nodeSnapshotIntervals = uniformChoice{0, 3} + nodeSnapshotIntervals = uniformChoice{0, 5} nodeRetainBlocks = uniformChoice{0, 2 * int(e2e.EvidenceAgeHeight), 4 * int(e2e.EvidenceAgeHeight)} nodePerturbations = probSetChoice{ "disconnect": 0.1, From 3ea81bfaa7c04a5a993bca1d97e1b1f4d8be8e4a Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 5 Oct 2021 16:09:31 -0400 Subject: [PATCH 068/174] p2p: remove wdrr queue (#7064) This code hasn't been battle tested, and seems to have grown increasingly flaky int tests. Given our general direction of reducing queue complexity over the next couple of releases I think it makes sense to remove it. --- CHANGELOG_PENDING.md | 1 + config/config.go | 2 +- internal/p2p/pqueue_test.go | 3 + internal/p2p/router.go | 20 +-- internal/p2p/router_init_test.go | 8 - internal/p2p/wdrr_queue.go | 287 ------------------------------- internal/p2p/wdrr_queue_test.go | 208 ---------------------- test/e2e/generator/generate.go | 2 +- test/e2e/pkg/testnet.go | 2 +- 9 files changed, 11 insertions(+), 522 deletions(-) delete mode 100644 internal/p2p/wdrr_queue.go delete mode 100644 internal/p2p/wdrr_queue_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0a4c90f40..c44c58ef0 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -24,6 +24,7 @@ Special thanks to external contributors on this release: - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release (@tychoish) + - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) - Blockchain Protocol diff --git a/config/config.go b/config/config.go index a393e6edc..349e5a6f7 100644 --- a/config/config.go +++ b/config/config.go @@ -710,7 +710,7 @@ type P2PConfig struct { //nolint: maligned TestDialFail bool `mapstructure:"test-dial-fail"` // Makes it possible to configure which queue backend the p2p - // layer uses. Options are: "fifo", "priority" and "wdrr", + // layer uses. Options are: "fifo" and "priority", // with the default being "priority". QueueType string `mapstructure:"queue-type"` } diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index ddb7addbe..c038408ef 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -4,9 +4,12 @@ import ( "testing" "time" + gogotypes "github.com/gogo/protobuf/types" "github.com/tendermint/tendermint/libs/log" ) +type testMessage = gogotypes.StringValue + func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 chDescs := []ChannelDescriptor{ diff --git a/internal/p2p/router.go b/internal/p2p/router.go index d68f16c4f..ef1eef564 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -133,8 +133,8 @@ type RouterOptions struct { // no timeout. HandshakeTimeout time.Duration - // QueueType must be "wdrr" (Weighed Deficit Round Robin), "priority", or - // "fifo". Defaults to "fifo". + // QueueType must be, "priority", or "fifo". Defaults to + // "fifo". QueueType string // MaxIncomingConnectionAttempts rate limits the number of incoming connection @@ -176,7 +176,6 @@ type RouterOptions struct { const ( queueTypeFifo = "fifo" queueTypePriority = "priority" - queueTypeWDRR = "wdrr" ) // Validate validates router options. @@ -184,8 +183,8 @@ func (o *RouterOptions) Validate() error { switch o.QueueType { case "": o.QueueType = queueTypeFifo - case queueTypeFifo, queueTypeWDRR, queueTypePriority: - // passI me + case queueTypeFifo, queueTypePriority: + // pass default: return fmt.Errorf("queue type %q is not supported", o.QueueType) } @@ -347,17 +346,6 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { return q }, nil - case queueTypeWDRR: - return func(size int) queue { - if size%2 != 0 { - size++ - } - - q := newWDRRScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) - q.start() - return q - }, nil - default: return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType) } diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index 3622c0cc1..b90d2a3dd 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -38,14 +38,6 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { require.True(t, ok) defer q.close() }) - t.Run("WDRR", func(t *testing.T) { - opts := RouterOptions{QueueType: queueTypeWDRR} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) - require.NoError(t, err) - q, ok := r.queueFactory(1).(*wdrrScheduler) - require.True(t, ok) - defer q.close() - }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) diff --git a/internal/p2p/wdrr_queue.go b/internal/p2p/wdrr_queue.go deleted file mode 100644 index 1b75ffce8..000000000 --- a/internal/p2p/wdrr_queue.go +++ /dev/null @@ -1,287 +0,0 @@ -package p2p - -import ( - "fmt" - "sort" - "strconv" - - "github.com/gogo/protobuf/proto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -// wrappedEnvelope wraps a p2p Envelope with its precomputed size. -type wrappedEnvelope struct { - envelope Envelope - size uint -} - -// assert the WDRR scheduler implements the queue interface at compile-time -var _ queue = (*wdrrScheduler)(nil) - -// wdrrQueue implements a Weighted Deficit Round Robin (WDRR) scheduling -// algorithm via the queue interface. A WDRR queue is created per peer, where -// the queue will have N number of flows. Each flow corresponds to a p2p Channel, -// so there are n input flows and a single output source, the peer's connection. -// -// The WDRR scheduler contains a shared buffer with a fixed capacity. -// -// Each flow has the following: -// - quantum: The number of bytes that is added to the deficit counter of the -// flow in each round. The flow can send at most quantum bytes at a time. Each -// flow has its own unique quantum, which gives the queue its weighted nature. -// A higher quantum corresponds to a higher weight/priority. The quantum is -// computed as MaxSendBytes * Priority. -// - deficit counter: The number of bytes that the flow is allowed to transmit -// when it is its turn. -// -// See: https://en.wikipedia.org/wiki/Deficit_round_robin -type wdrrScheduler struct { - logger log.Logger - metrics *Metrics - chDescs []ChannelDescriptor - capacity uint - size uint - chPriorities map[ChannelID]uint - buffer map[ChannelID][]wrappedEnvelope - quanta map[ChannelID]uint - deficits map[ChannelID]uint - - closer *tmsync.Closer - doneCh *tmsync.Closer - - enqueueCh chan Envelope - dequeueCh chan Envelope -} - -func newWDRRScheduler( - logger log.Logger, - m *Metrics, - chDescs []ChannelDescriptor, - enqueueBuf, dequeueBuf, capacity uint, -) *wdrrScheduler { - - // copy each ChannelDescriptor and sort them by channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) - copy(chDescsCopy, chDescs) - sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority > chDescsCopy[j].Priority }) - - var ( - buffer = make(map[ChannelID][]wrappedEnvelope) - chPriorities = make(map[ChannelID]uint) - quanta = make(map[ChannelID]uint) - deficits = make(map[ChannelID]uint) - ) - - for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) - chPriorities[chID] = uint(chDesc.Priority) - buffer[chID] = make([]wrappedEnvelope, 0) - quanta[chID] = chDesc.MaxSendBytes * uint(chDesc.Priority) - } - - return &wdrrScheduler{ - logger: logger.With("queue", "wdrr"), - metrics: m, - capacity: capacity, - chPriorities: chPriorities, - chDescs: chDescsCopy, - buffer: buffer, - quanta: quanta, - deficits: deficits, - closer: tmsync.NewCloser(), - doneCh: tmsync.NewCloser(), - enqueueCh: make(chan Envelope, enqueueBuf), - dequeueCh: make(chan Envelope, dequeueBuf), - } -} - -// enqueue returns an unbuffered write-only channel which a producer can send on. -func (s *wdrrScheduler) enqueue() chan<- Envelope { - return s.enqueueCh -} - -// dequeue returns an unbuffered read-only channel which a consumer can read from. -func (s *wdrrScheduler) dequeue() <-chan Envelope { - return s.dequeueCh -} - -func (s *wdrrScheduler) closed() <-chan struct{} { - return s.closer.Done() -} - -// close closes the WDRR queue. After this call enqueue() will block, so the -// caller must select on closed() as well to avoid blocking forever. The -// enqueue() and dequeue() along with the internal channels will NOT be closed. -// Note, close() will block until all externally spawned goroutines have exited. -func (s *wdrrScheduler) close() { - s.closer.Close() - <-s.doneCh.Done() -} - -// start starts the WDRR queue process in a blocking goroutine. This must be -// called before the queue can start to process and accept Envelopes. -func (s *wdrrScheduler) start() { - go s.process() -} - -// process starts a blocking WDRR scheduler process, where we continuously -// evaluate if we need to attempt to enqueue an Envelope or schedule Envelopes -// to be dequeued and subsequently read and sent on the source connection. -// Internally, each p2p Channel maps to a flow, where each flow has a deficit -// and a quantum. -// -// For each Envelope requested to be enqueued, we evaluate if there is sufficient -// capacity in the shared buffer to add the Envelope. If so, it is added. -// Otherwise, we evaluate all flows of lower priority where we attempt find an -// existing Envelope in the shared buffer of sufficient size that can be dropped -// in place of the incoming Envelope. If there is no such Envelope that can be -// dropped, then the incoming Envelope is dropped. -// -// When there is nothing to be enqueued, we perform the WDRR algorithm and -// determine which Envelopes can be dequeued. For each Envelope that can be -// dequeued, it is sent on the dequeueCh. Specifically, for each flow, if it is -// non-empty, its deficit counter is incremented by its quantum value. Then, the -// value of the deficit counter is a maximal amount of bytes that can be sent at -// this round. If the deficit counter is greater than the Envelopes's message -// size at the head of the queue (HoQ), this envelope can be sent and the value -// of the counter is decremented by the message's size. Then, the size of the -// next Envelopes's message is compared to the counter value, etc. Once the flow -// is empty or the value of the counter is insufficient, the scheduler will skip -// to the next flow. If the flow is empty, the value of the deficit counter is -// reset to 0. -// -// XXX/TODO: Evaluate the single goroutine scheduler mechanism. In other words, -// evaluate the effectiveness and performance of having a single goroutine -// perform handling both enqueueing and dequeueing logic. Specifically, there -// is potentially contention between reading off of enqueueCh and trying to -// enqueue while also attempting to perform the WDRR algorithm and find the next -// set of Envelope(s) to send on the dequeueCh. Alternatively, we could consider -// separate scheduling goroutines, but then that requires the use of mutexes and -// possibly a degrading performance. -func (s *wdrrScheduler) process() { - defer s.doneCh.Close() - - for { - select { - case <-s.closer.Done(): - return - - case e := <-s.enqueueCh: - // attempt to enqueue the incoming Envelope - chIDStr := strconv.Itoa(int(e.channelID)) - wEnv := wrappedEnvelope{envelope: e, size: uint(proto.Size(e.Message))} - msgSize := wEnv.size - - s.metrics.PeerPendingSendBytes.With("peer_id", string(e.To)).Add(float64(msgSize)) - - // If we're at capacity, we need to either drop the incoming Envelope or - // an Envelope from a lower priority flow. Otherwise, we add the (wrapped) - // envelope to the flow's queue. - if s.size+wEnv.size > s.capacity { - chPriority := s.chPriorities[e.channelID] - - var ( - canDrop bool - dropIdx int - dropChID ChannelID - ) - - // Evaluate all lower priority flows and determine if there exists an - // Envelope that is of equal or greater size that we can drop in favor - // of the incoming Envelope. - for i := len(s.chDescs) - 1; i >= 0 && uint(s.chDescs[i].Priority) < chPriority && !canDrop; i-- { - currChID := ChannelID(s.chDescs[i].ID) - flow := s.buffer[currChID] - - for j := 0; j < len(flow) && !canDrop; j++ { - if flow[j].size >= wEnv.size { - canDrop = true - dropIdx = j - dropChID = currChID - break - } - } - } - - // If we can drop an existing Envelope, drop it and enqueue the incoming - // Envelope. - if canDrop { - chIDStr = strconv.Itoa(int(dropChID)) - chPriority = s.chPriorities[dropChID] - msgSize = s.buffer[dropChID][dropIdx].size - - // Drop Envelope for the lower priority flow and update the queue's - // buffer size - s.size -= msgSize - s.buffer[dropChID] = append(s.buffer[dropChID][:dropIdx], s.buffer[dropChID][dropIdx+1:]...) - - // add the incoming Envelope and update queue's buffer size - s.size += wEnv.size - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - } - - // We either dropped the incoming Enevelope or one from an existing - // lower priority flow. - s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1) - s.logger.Debug( - "dropped envelope", - "ch_id", chIDStr, - "priority", chPriority, - "capacity", s.capacity, - "msg_size", msgSize, - ) - } else { - // we have sufficient capacity to enqueue the incoming Envelope - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.size += wEnv.size - } - - default: - // perform the WDRR algorithm - for _, chDesc := range s.chDescs { - chID := ChannelID(chDesc.ID) - - // only consider non-empty flows - if len(s.buffer[chID]) > 0 { - // bump flow's quantum - s.deficits[chID] += s.quanta[chID] - - // grab the flow's current deficit counter and HoQ (wrapped) Envelope - d := s.deficits[chID] - we := s.buffer[chID][0] - - // While the flow is non-empty and we can send the current Envelope - // on the dequeueCh: - // - // 1. send the Envelope - // 2. update the scheduler's shared buffer's size - // 3. update the flow's deficit - // 4. remove from the flow's queue - // 5. grab the next HoQ Envelope and flow's deficit - for len(s.buffer[chID]) > 0 && d >= we.size { - s.metrics.PeerSendBytesTotal.With( - "chID", fmt.Sprint(chID), - "peer_id", string(we.envelope.To)).Add(float64(we.size)) - s.dequeueCh <- we.envelope - s.size -= we.size - s.deficits[chID] -= we.size - s.buffer[chID] = s.buffer[chID][1:] - - if len(s.buffer[chID]) > 0 { - d = s.deficits[chID] - we = s.buffer[chID][0] - } - } - } - - // reset the flow's deficit to zero if it is empty - if len(s.buffer[chID]) == 0 { - s.deficits[chID] = 0 - } - } - } - } -} diff --git a/internal/p2p/wdrr_queue_test.go b/internal/p2p/wdrr_queue_test.go deleted file mode 100644 index d49c77e76..000000000 --- a/internal/p2p/wdrr_queue_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package p2p - -import ( - "math" - "math/rand" - "testing" - "time" - - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -type testMessage = gogotypes.StringValue - -func TestWDRRQueue_EqualWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, - {ID: 0x02, Priority: 1, MaxSendBytes: 4}, - {ID: 0x03, Priority: 1, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 1000, 1000, 120) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - closer := tmsync.NewCloser() - - go func() { - timout := 10 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - maxMsgs := 5000 - minMsgs := 1000 - - for _, chDesc := range chDescs { - total := rng.Intn(maxMsgs-minMsgs) + minMsgs // total = rand[minMsgs, maxMsgs) - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - var ( - sum float64 - stdDev float64 - ) - - for _, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - sum += successRate - successRates[chID] = successRate - - // require some messages dropped - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - numFlows := float64(len(peerQueue.buffer)) - mean := sum / numFlows - - for _, successRate := range successRates { - stdDev += math.Pow(successRate-mean, 2) - } - - stdDev = math.Sqrt(stdDev / numFlows) - require.Less(t, stdDev, 0.02, "expected success rate standard deviation to be less than 2%") -} - -func TestWDRRQueue_DecreasingWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 18, MaxSendBytes: 4}, - {ID: 0x02, Priority: 10, MaxSendBytes: 4}, - {ID: 0x03, Priority: 2, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 0, 0, 500) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - for _, chDesc := range chDescs { - total := 1000 - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - closer := tmsync.NewCloser() - - go func() { - timout := 20 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - for i, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - successRates[chID] = successRate - - // Require some messages dropped. Note, the top weighted flows may not have - // any dropped if lower priority non-empty queues always exist. - if i > 2 { - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - // require channel 0x01 to have the highest success rate due to its weight - ch01Rate := successRates[ChannelID(chDescs[0].ID)] - for i := 1; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch01Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x02 to have the 2nd highest success rate due to its weight - ch02Rate := successRates[ChannelID(chDescs[1].ID)] - for i := 2; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch02Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x03 to have the 3rd highest success rate due to its weight - ch03Rate := successRates[ChannelID(chDescs[2].ID)] - for i := 3; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch03Rate, successRates[ChannelID(chDescs[i].ID)]) - } -} diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 0a2d39665..21b3ebe0a 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,7 +15,7 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "queueType": {"priority"}, // "fifo", "wdrr" + "queueType": {"priority"}, // "fifo" "initialHeight": {0, 1000}, "initialState": { map[string]string{}, diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 3a75b169e..1daa8b87c 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -347,7 +347,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid mempool version %q", n.Mempool) } switch n.QueueType { - case "", "priority", "wdrr", "fifo": + case "", "priority", "fifo": default: return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType) } From 851d2e3bdeb4d5a508a67cd42b36cb0328a3f817 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 5 Oct 2021 16:23:15 -0400 Subject: [PATCH 069/174] mempool,rpc: add removetx rpc method (#7047) Addresses one of the concerns with #7041. Provides a mechanism (via the RPC interface) to delete a single transaction, described by its hash, from the mempool. The method returns an error if the transaction cannot be found. Once the transaction is removed it remains in the cache and cannot be resubmitted until the cache is cleared or it expires from the cache. --- CHANGELOG_PENDING.md | 2 ++ internal/consensus/replay_stubs.go | 1 + internal/mempool/cache.go | 12 +++++----- internal/mempool/mempool.go | 4 ++++ internal/mempool/mock/mempool.go | 1 + internal/mempool/tx.go | 15 ------------ internal/mempool/v0/cache_test.go | 4 ++-- internal/mempool/v0/clist_mempool.go | 22 ++++++++++-------- internal/mempool/v0/clist_mempool_test.go | 4 ++-- internal/mempool/v0/reactor.go | 4 ++-- internal/mempool/v1/mempool.go | 28 +++++++++++++++++------ internal/mempool/v1/mempool_test.go | 12 +++++----- internal/mempool/v1/reactor.go | 4 ++-- internal/mempool/v1/tx.go | 21 ++++++++--------- internal/mempool/v1/tx_test.go | 12 +++++----- internal/rpc/core/mempool.go | 4 ++++ internal/rpc/core/routes.go | 1 + light/rpc/client.go | 4 ++++ rpc/client/http/http.go | 8 +++++++ rpc/client/interface.go | 1 + rpc/client/local/local.go | 4 ++++ rpc/openapi/openapi.yaml | 25 ++++++++++++++++++++ types/mempool.go | 10 ++++---- types/tx.go | 12 +++++----- 24 files changed, 137 insertions(+), 78 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c44c58ef0..d80e3ae61 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -30,6 +30,8 @@ Special thanks to external contributors on this release: ### FEATURES +- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) + ### IMPROVEMENTS ### BUG FIXES diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 361ac6ec8..bc8c11cd9 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -24,6 +24,7 @@ func (emptyMempool) Size() int { return 0 } func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } +func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 43174f106..3cd45d2bc 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -31,14 +31,14 @@ var _ TxCache = (*LRUTxCache)(nil) type LRUTxCache struct { mtx tmsync.Mutex size int - cacheMap map[[TxKeySize]byte]*list.Element + cacheMap map[types.TxKey]*list.Element list *list.List } func NewLRUTxCache(cacheSize int) *LRUTxCache { return &LRUTxCache{ size: cacheSize, - cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), + cacheMap: make(map[types.TxKey]*list.Element, cacheSize), list: list.New(), } } @@ -53,7 +53,7 @@ func (c *LRUTxCache) Reset() { c.mtx.Lock() defer c.mtx.Unlock() - c.cacheMap = make(map[[TxKeySize]byte]*list.Element, c.size) + c.cacheMap = make(map[types.TxKey]*list.Element, c.size) c.list.Init() } @@ -61,7 +61,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { c.mtx.Lock() defer c.mtx.Unlock() - key := TxKey(tx) + key := tx.Key() moved, ok := c.cacheMap[key] if ok { @@ -72,7 +72,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { if c.list.Len() >= c.size { front := c.list.Front() if front != nil { - frontKey := front.Value.([TxKeySize]byte) + frontKey := front.Value.(types.TxKey) delete(c.cacheMap, frontKey) c.list.Remove(front) } @@ -88,7 +88,7 @@ func (c *LRUTxCache) Remove(tx types.Tx) { c.mtx.Lock() defer c.mtx.Unlock() - key := TxKey(tx) + key := tx.Key() e := c.cacheMap[key] delete(c.cacheMap, key) diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index d679b3506..6e3955dc3 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -32,6 +32,10 @@ type Mempool interface { // its validity and whether it should be added to the mempool. CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. + RemoveTxByKey(txKey types.TxKey) error + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes // bytes total with the condition that the total gasWanted must be less than // maxGas. diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index bf9ce8bd0..8e6f0c7bf 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -20,6 +20,7 @@ func (Mempool) Size() int { return 0 } func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } +func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (Mempool) Update( diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index 860d3d3b4..adafdf85e 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,24 +1,9 @@ package mempool import ( - "crypto/sha256" - "github.com/tendermint/tendermint/types" ) -// TxKeySize defines the size of the transaction's key used for indexing. -const TxKeySize = sha256.Size - -// TxKey is the fixed length array key used as an index. -func TxKey(tx types.Tx) [TxKeySize]byte { - return sha256.Sum256(tx) -} - -// TxHashFromBytes returns the hash of a transaction from raw bytes. -func TxHashFromBytes(tx []byte) []byte { - return types.Tx(tx).Hash() -} - // TxInfo are parameters that get passed when attempting to add a tx to the // mempool. type TxInfo struct { diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go index 389c0806c..5bf2c7603 100644 --- a/internal/mempool/v0/cache_test.go +++ b/internal/mempool/v0/cache_test.go @@ -61,7 +61,7 @@ func TestCacheAfterUpdate(t *testing.T) { require.NotEqual(t, len(tc.txsInCache), counter, "cache larger than expected on testcase %d", tcIndex) - nodeVal := node.Value.([sha256.Size]byte) + nodeVal := node.Value.(types.TxKey) expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) // Reference for reading the errors: // >>> sha256('\x00').hexdigest() @@ -71,7 +71,7 @@ func TestCacheAfterUpdate(t *testing.T) { // >>> sha256('\x02').hexdigest() // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) + require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) counter++ node = node.Next() } diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index f3571ede0..7816730c1 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -3,6 +3,7 @@ package v0 import ( "bytes" "context" + "errors" "fmt" "sync" "sync/atomic" @@ -240,7 +241,7 @@ func (mem *CListMempool) CheckTx( // Note it's possible a tx is still in the cache but no longer in the mempool // (eg. after committing a block, txs are removed from mempool but not cache), // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { + if e, ok := mem.txsMap.Load(tx.Key()); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) // TODO: consider punishing peer for dups, @@ -327,7 +328,7 @@ func (mem *CListMempool) reqResCb( // - resCbFirstTime (lock not held) if tx is valid func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) - mem.txsMap.Store(mempool.TxKey(memTx.tx), e) + mem.txsMap.Store(memTx.tx.Key(), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } @@ -338,7 +339,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) { func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() - mem.txsMap.Delete(mempool.TxKey(tx)) + mem.txsMap.Delete(tx.Key()) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { @@ -347,13 +348,16 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC } // RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey [mempool.TxKeySize]byte, removeFromCache bool) { +func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { if e, ok := mem.txsMap.Load(txKey); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) + mem.removeTx(memTx.tx, e.(*clist.CElement), false) + return nil } + return errors.New("transaction not found") } + return errors.New("invalid transaction found") } func (mem *CListMempool) isFull(txSize int) error { @@ -409,7 +413,7 @@ func (mem *CListMempool) resCbFirstTime( mem.addTx(memTx) mem.logger.Debug( "added good transaction", - "tx", mempool.TxHashFromBytes(tx), + "tx", types.Tx(tx).Hash(), "res", r, "height", memTx.height, "total", mem.Size(), @@ -419,7 +423,7 @@ func (mem *CListMempool) resCbFirstTime( // ignore bad transaction mem.logger.Debug( "rejected bad transaction", - "tx", mempool.TxHashFromBytes(tx), + "tx", types.Tx(tx).Hash(), "peerID", peerP2PID, "res", r, "err", postCheckErr, @@ -460,7 +464,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { // Good, nothing to do. } else { // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", mempool.TxHashFromBytes(tx), "res", r, "err", postCheckErr) + mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) // NOTE: we remove tx from the cache because it might be good later mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) } @@ -598,7 +602,7 @@ func (mem *CListMempool) Update( // Mempool after: // 100 // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { + if e, ok := mem.txsMap.Load(tx.Key()); ok { mem.removeTx(tx, e.(*clist.CElement), false) } } diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index e55942e48..b61a8333e 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -544,9 +544,9 @@ func TestMempoolTxsBytes(t *testing.T) { err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) require.NoError(t, err) assert.EqualValues(t, 9, mp.SizeBytes()) - mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true) + assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) assert.EqualValues(t, 9, mp.SizeBytes()) - mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true) + assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) assert.EqualValues(t, 8, mp.SizeBytes()) } diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index b8aac3b5c..80362a04f 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -171,7 +171,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { for _, tx := range protoTxs { if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -378,7 +378,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } r.Logger.Debug( "gossiped tx to peer", - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index 34e524159..a12fbc51b 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -3,6 +3,7 @@ package v1 import ( "bytes" "context" + "errors" "fmt" "sync/atomic" "time" @@ -256,7 +257,7 @@ func (txmp *TxMempool) CheckTx( return err } - txHash := mempool.TxKey(tx) + txHash := tx.Key() // We add the transaction to the mempool's cache and if the transaction already // exists, i.e. false is returned, then we check if we've seen this transaction @@ -304,6 +305,19 @@ func (txmp *TxMempool) CheckTx( return nil } +func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + txmp.Lock() + defer txmp.Unlock() + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { + txmp.removeTx(wtx, false) + return nil + } + + return errors.New("transaction not found") +} + // Flush flushes out the mempool. It acquires a read-lock, fetches all the // transactions currently in the transaction store and removes each transaction // from the store and all indexes and finally resets the cache. @@ -451,7 +465,7 @@ func (txmp *TxMempool) Update( } // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(mempool.TxKey(tx)); wtx != nil { + if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { txmp.removeTx(wtx, false) } } @@ -629,7 +643,7 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) tx := req.GetCheckTx().Tx wtx := txmp.recheckCursor.Value.(*WrappedTx) if !bytes.Equal(tx, wtx.tx) { - panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), mempool.TxKey(tx))) + panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), types.Tx(tx).Key())) } // Only evaluate transactions that have not been removed. This can happen @@ -647,7 +661,7 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) txmp.logger.Debug( "existing transaction no longer valid; failed re-CheckTx callback", "priority", wtx.priority, - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(wtx.tx)), + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), "err", err, "code", checkTxRes.CheckTx.Code, ) @@ -784,13 +798,13 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { // the height and time based indexes. func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { now := time.Now() - expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx) + expiredTxs := make(map[types.TxKey]*WrappedTx) if txmp.config.TTLNumBlocks > 0 { purgeIdx := -1 for i, wtx := range txmp.heightIndex.txs { if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[mempool.TxKey(wtx.tx)] = wtx + expiredTxs[wtx.tx.Key()] = wtx purgeIdx = i } else { // since the index is sorted, we know no other txs can be be purged @@ -807,7 +821,7 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { purgeIdx := -1 for i, wtx := range txmp.timestampIndex.txs { if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[mempool.TxKey(wtx.tx)] = wtx + expiredTxs[wtx.tx.Key()] = wtx purgeIdx = i } else { // since the index is sorted, we know no other txs can be be purged diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index 3b7eb02d0..8bed5520a 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -226,10 +226,10 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - txMap := make(map[[mempool.TxKeySize]byte]testTx) + txMap := make(map[types.TxKey]testTx) priorities := make([]int64, len(tTxs)) for i, tTx := range tTxs { - txMap[mempool.TxKey(tTx.tx)] = tTx + txMap[tTx.tx.Key()] = tTx priorities[i] = tTx.priority } @@ -241,7 +241,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { ensurePrioritized := func(reapedTxs types.Txs) { reapedPriorities := make([]int64, len(reapedTxs)) for i, rTx := range reapedTxs { - reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + reapedPriorities[i] = txMap[rTx.Key()].priority } require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) @@ -276,10 +276,10 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - txMap := make(map[[mempool.TxKeySize]byte]testTx) + txMap := make(map[types.TxKey]testTx) priorities := make([]int64, len(tTxs)) for i, tTx := range tTxs { - txMap[mempool.TxKey(tTx.tx)] = tTx + txMap[tTx.tx.Key()] = tTx priorities[i] = tTx.priority } @@ -291,7 +291,7 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { ensurePrioritized := func(reapedTxs types.Txs) { reapedPriorities := make([]int64, len(reapedTxs)) for i, rTx := range reapedTxs { - reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + reapedPriorities[i] = txMap[rTx.Key()].priority } require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index 75f89d07e..eff5b7ec0 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -178,7 +178,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { for _, tx := range protoTxs { if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -386,7 +386,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } r.Logger.Debug( "gossiped tx to peer", - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go index 15173b91f..c5b7ca82f 100644 --- a/internal/mempool/v1/tx.go +++ b/internal/mempool/v1/tx.go @@ -6,7 +6,6 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) @@ -17,7 +16,7 @@ type WrappedTx struct { tx types.Tx // hash defines the transaction hash and the primary key used in the mempool - hash [mempool.TxKeySize]byte + hash types.TxKey // height defines the height at which the transaction was validated at height int64 @@ -66,14 +65,14 @@ func (wtx *WrappedTx) Size() int { // need mutative access. type TxStore struct { mtx tmsync.RWMutex - hashTxs map[[mempool.TxKeySize]byte]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application + hashTxs map[types.TxKey]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application } func NewTxStore() *TxStore { return &TxStore{ senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[[mempool.TxKeySize]byte]*WrappedTx), + hashTxs: make(map[types.TxKey]*WrappedTx), } } @@ -110,7 +109,7 @@ func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { } // GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx { +func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { txs.mtx.RLock() defer txs.mtx.RUnlock() @@ -119,7 +118,7 @@ func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx { // IsTxRemoved returns true if a transaction by hash is marked as removed and // false otherwise. -func (txs *TxStore) IsTxRemoved(hash [mempool.TxKeySize]byte) bool { +func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { txs.mtx.RLock() defer txs.mtx.RUnlock() @@ -142,7 +141,7 @@ func (txs *TxStore) SetTx(wtx *WrappedTx) { txs.senderTxs[wtx.sender] = wtx } - txs.hashTxs[mempool.TxKey(wtx.tx)] = wtx + txs.hashTxs[wtx.tx.Key()] = wtx } // RemoveTx removes a *WrappedTx from the transaction store. It deletes all @@ -155,13 +154,13 @@ func (txs *TxStore) RemoveTx(wtx *WrappedTx) { delete(txs.senderTxs, wtx.sender) } - delete(txs.hashTxs, mempool.TxKey(wtx.tx)) + delete(txs.hashTxs, wtx.tx.Key()) wtx.removed = true } // TxHasPeer returns true if a transaction by hash has a given peer ID and false // otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool { +func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { txs.mtx.RLock() defer txs.mtx.RUnlock() @@ -179,7 +178,7 @@ func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool // We return true if we've already recorded the given peer for this transaction // and false otherwise. If the transaction does not exist by hash, we return // (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID uint16) (*WrappedTx, bool) { +func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { txs.mtx.Lock() defer txs.mtx.Unlock() diff --git a/internal/mempool/v1/tx_test.go b/internal/mempool/v1/tx_test.go index c5d488669..fb4beafab 100644 --- a/internal/mempool/v1/tx_test.go +++ b/internal/mempool/v1/tx_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/types" ) func TestTxStore_GetTxBySender(t *testing.T) { @@ -39,7 +39,7 @@ func TestTxStore_GetTxByHash(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() res := txs.GetTxByHash(key) require.Nil(t, res) @@ -58,7 +58,7 @@ func TestTxStore_SetTx(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() txs.SetTx(wtx) res := txs.GetTxByHash(key) @@ -81,10 +81,10 @@ func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() txs.SetTx(wtx) - res, ok := txs.GetOrSetPeerByTxHash(mempool.TxKey([]byte("test_tx_2")), 15) + res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15) require.Nil(t, res) require.False(t, ok) @@ -110,7 +110,7 @@ func TestTxStore_RemoveTx(t *testing.T) { txs.SetTx(wtx) - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() res := txs.GetTxByHash(key) require.NotNil(t, res) diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 6cd05348f..c20f02032 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -151,3 +151,7 @@ func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes. } return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil } + +func (env *Environment) RemoveTx(ctx *rpctypes.Context, txkey types.TxKey) error { + return env.Mempool.RemoveTxByKey(txkey) +} diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 73eaaf14c..fe99d2118 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -28,6 +28,7 @@ func (env *Environment) GetRoutes() RoutesMap { "block_results": rpc.NewRPCFunc(env.BlockResults, "height", true), "commit": rpc.NewRPCFunc(env.Commit, "height", true), "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx", true), + "remove_tx": rpc.NewRPCFunc(env.RemoveTx, "txkey", false), "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", true), "tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), "block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), diff --git a/light/rpc/client.go b/light/rpc/client.go index 4461028cd..dc745542e 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -212,6 +212,10 @@ func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultChe return c.next.CheckTx(ctx, tx) } +func (c *Client) RemoveTx(ctx context.Context, txKey types.TxKey) error { + return c.next.RemoveTx(ctx, txKey) +} + func (c *Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.next.NetInfo(ctx) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 65d0f434b..d0c1d5621 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -315,6 +315,14 @@ func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.Re return result, nil } +func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { + _, err := c.caller.Call(ctx, "remove_tx", map[string]interface{}{"tx_key": txKey}, nil) + if err != nil { + return err + } + return nil +} + func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { result := new(coretypes.ResultNetInfo) _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 7b6a9bd20..474eb9937 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -146,6 +146,7 @@ type MempoolClient interface { UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error) CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error) + RemoveTx(context.Context, types.TxKey) error } // EvidenceClient is used for submitting an evidence of the malicious diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 108510b02..21ca6e6f1 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -116,6 +116,10 @@ func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultChec return c.env.CheckTx(c.ctx, tx) } +func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { + return c.env.Mempool.RemoveTxByKey(txKey) +} + func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(c.ctx) } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index a32d44986..83d85be8f 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -237,6 +237,31 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + + /remove_tx: + get: + summary: Removes a transaction from the mempool. + tags: + - TxKey + operationId: remove_tx + parameters: + - in: query + name: txKey + required: true + schema: + type: string + example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + description: The transaction key + responses: + "200": + description: empty response. + "500": + description: empty error. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /subscribe: get: summary: Subscribe for events via WebSocket. diff --git a/types/mempool.go b/types/mempool.go index c739796af..fa0f8a208 100644 --- a/types/mempool.go +++ b/types/mempool.go @@ -1,14 +1,16 @@ package types import ( + "crypto/sha256" "errors" "fmt" ) -var ( - // ErrTxInCache is returned to the client if we saw tx earlier - ErrTxInCache = errors.New("tx already exists in cache") -) +// ErrTxInCache is returned to the client if we saw tx earlier +var ErrTxInCache = errors.New("tx already exists in cache") + +// TxKey is the fixed length array key used as an index. +type TxKey [sha256.Size]byte // ErrTxTooLarge defines an error when a transaction is too big to be sent in a // message to other peers. diff --git a/types/tx.go b/types/tx.go index 92df92f13..19ee41dac 100644 --- a/types/tx.go +++ b/types/tx.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "crypto/sha256" "errors" "fmt" @@ -16,15 +17,14 @@ import ( // Might we want types here ? type Tx []byte +// Key produces a fixed-length key for use in indexing. +func (tx Tx) Key() TxKey { return sha256.Sum256(tx) } + // Hash computes the TMHASH hash of the wire encoded transaction. -func (tx Tx) Hash() []byte { - return tmhash.Sum(tx) -} +func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } // String returns the hex-encoded transaction as a string. -func (tx Tx) String() string { - return fmt.Sprintf("Tx{%X}", []byte(tx)) -} +func (tx Tx) String() string { return fmt.Sprintf("Tx{%X}", []byte(tx)) } // Txs is a slice of Tx. type Txs []Tx From 109814c85aa93ca735c63e62f55cd825b75808ac Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 5 Oct 2021 14:10:11 -0700 Subject: [PATCH 070/174] Clarify decision record for ADR-065. (#7062) While discussing a question about the indexing interface (#7044), we found some confusion about the intent of the design decisions in ADR 065. Based on discussion with the original authors of the ADR, this commit adds some language to the Decisions section to spell out the intentions more clearly, and to call out future work that this ADR did not explicitly decide about. --- .../adr-065-custom-event-indexing.md | 34 +++++++++++++++---- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/docs/architecture/adr-065-custom-event-indexing.md b/docs/architecture/adr-065-custom-event-indexing.md index b5c86ecfa..83a96de48 100644 --- a/docs/architecture/adr-065-custom-event-indexing.md +++ b/docs/architecture/adr-065-custom-event-indexing.md @@ -25,6 +25,7 @@ - April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778) - May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106) - Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair) +- Oct 5, 2021: Clarify goals and implementation changes (@creachadair) ## Status @@ -73,19 +74,38 @@ the database used. We will adopt a similar approach to that of the Cosmos SDK's `KVStore` state listening described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-038-state-listening.md). -Namely, we will perform the following: +We will implement the following changes: - Introduce a new interface, `EventSink`, that all data sinks must implement. - Augment the existing `tx_index.indexer` configuration to now accept a series - of one or more indexer types, i.e sinks. + of one or more indexer types, i.e., sinks. - Combine the current `TxIndexer` and `BlockIndexer` into a single `KVEventSink` that implements the `EventSink` interface. -- Introduce an additional `EventSink` that is backed by [PostgreSQL](https://www.postgresql.org/). - - Implement the necessary schemas to support both block and transaction event - indexing. +- Introduce an additional `EventSink` implementation that is backed by + [PostgreSQL](https://www.postgresql.org/). + - Implement the necessary schemas to support both block and transaction event indexing. - Update `IndexerService` to use a series of `EventSinks`. -- Proxy queries to the relevant sink's native query layer. -- Update all relevant RPC methods. + +In addition: + +- The Postgres indexer implementation will _not_ implement the proprietary `kv` + query language. Users wishing to write queries against the Postgres indexer + will connect to the underlying DBMS directly and use SQL queries based on the + indexing schema. + + Future custom indexer implementations will not be required to support the + proprietary query language either. + +- For now, the existing `kv` indexer will be left in place with its current + query support, but will be marked as deprecated in a subsequent release, and + the documentation will be updated to encourage users who need to query the + event index to migrate to the Postgres indexer. + +- In the future we may remove the `kv` indexer entirely, or replace it with a + different implementation; that decision is deferred as future work. + +- In the future, we may remove the index query endpoints from the RPC service + entirely; that decision is deferred as future work, but recommended. ## Detailed Design From 72aee478471f74222994ee92dcbeb600c84fa8cd Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 6 Oct 2021 00:08:54 -0400 Subject: [PATCH 071/174] ci: 0.35.x nightly should run from master and checkout the release branch (#7067) Nightly branches run CI from master branch, and the configuration misses checking out the correct ref. --- .github/workflows/e2e-nightly-35x.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index b78812e92..13d4b3dc8 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -26,6 +26,8 @@ jobs: go-version: '1.17' - uses: actions/checkout@v2.3.4 + with: + ref: 'v0.35.x' - name: Build working-directory: test/e2e From 0ef1a12186ab64a02f0005e3add1f2045e6afe84 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 6 Oct 2021 00:11:19 -0400 Subject: [PATCH 072/174] ci: fix p2p configuration for e2e tests (#7066) My earlier p2p cleanup code removed support for the p2p tests from the e2e generator and runner, but missed removing the CI configuration. This patch remedies that. --- .github/workflows/e2e-nightly-master.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 95cb859e8..28f8b3e4b 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -16,7 +16,6 @@ jobs: strategy: fail-fast: false matrix: - p2p: ['legacy', 'new', 'hybrid'] group: ['00', '01', '02', '03'] runs-on: ubuntu-latest timeout-minutes: 60 @@ -35,11 +34,11 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + run: ./build/generator -g 4 -d networks/nightly/ - - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml e2e-nightly-fail-2: needs: e2e-nightly-test-2 From e4d6f6df096fe565b6eb70ac44c4f3873882ab64 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 6 Oct 2021 11:59:21 +0200 Subject: [PATCH 073/174] docs: create separate releases doc (#7040) --- CONTRIBUTING.md | 144 ---------------------------------------- RELEASES.md | 161 +++++++++++++++++++++++++++++++++++++++++++++ version/version.go | 2 +- 3 files changed, 162 insertions(+), 145 deletions(-) create mode 100644 RELEASES.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 33b8cf6a7..16bef07cc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -227,150 +227,6 @@ Fixes #nnnn Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! -### Release procedure - -#### A note about backport branches -Tendermint's `master` branch is under active development. -Releases are specified using tags and are built from long-lived "backport" branches. -Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, -and the backport branches have names like `v0.34.x` or `v0.33.x` -(literally, `x`; it is not a placeholder in this case). - -As non-breaking changes land on `master`, they should also be backported (cherry-picked) -to these backport branches. - -We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport -to the needed branch. There should be a label for any backport branch that you'll be targeting. -To notify the bot to backport a pull request, mark the pull request with -the label `S:backport-to-`. -Once the original pull request is merged, the bot will try to cherry-pick the pull request -to the backport branch. If the bot fails to backport, it will open a pull request. -The author of the original pull request is responsible for solving the conflicts and -merging the pull request. - -#### Creating a backport branch - -If this is the first release candidate for a major release, you get to have the honor of creating -the backport branch! - -Note that, after creating the backport branch, you'll also need to update the tags on `master` -so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag -that is "greater than" the backport branches tags. See #6072 for more context. - -In the following example, we'll assume that we're making a backport branch for -the 0.35.x line. - -1. Start on `master` -2. Create the backport branch: - `git checkout -b v0.35.x` -3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: - `git tag -a v0.36.0-dev; git push v0.36.0-dev` -4. Create a new workflow to run the e2e nightlies for this backport branch. - (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml - for an example.) - -#### Release candidates - -Before creating an official release, especially a major release, we may want to create a -release candidate (RC) for our friends and partners to test out. We use git tags to -create RCs, and we build them off of backport branches. - -Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end -(for example, `v0.35.0-rc0`). - -(Note that branches and tags _cannot_ have the same names, so it's important that these branches -have distinct names from the tags/release names.) - -If this is the first RC for a major release, you'll have to make a new backport branch (see above). -Otherwise: - -1. Start from the backport branch (e.g. `v0.35.x`). -1. Run the integration tests and the e2e nightlies - (which can be triggered from the Github UI; - e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). -1. Prepare the changelog: - - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary -1. Open a PR with these changes against the backport branch. -1. Once these changes have landed on the backport branch, be sure to pull them back down locally. -2. Once you have the changes locally, create the new tag, specifying a name and a tag "message": - `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` -3. Push the tag back up to origin: - `git push origin v0.35.0-rc0` - Now the tag should be available on the repo's releases page. -4. Future RCs will continue to be built off of this branch. - -Note that this process should only be used for "true" RCs-- -release candidates that, if successful, will be the next release. -For more experimental "RCs," create a new, short-lived branch and tag that instead. - -#### Major release - -This major release process assumes that this release was preceded by release candidates. -If there were no release candidates, begin by creating a backport branch, as described above. - -1. Start on the backport branch (e.g. `v0.35.x`) -2. Run integration tests and the e2e nightlies. -3. Prepare the release: - - "Squash" changes from the changelog entries for the RCs into a single entry, - and add all changes included in `CHANGELOG_PENDING.md`. - (Squashing includes both combining all entries, as well as removing or simplifying - any intra-RC changes. It may also help to alphabetize the entries by package name.) - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary -4. Open a PR with these changes against the backport branch. -5. Once these changes are on the backport branch, push a tag with prepared release details. - This will trigger the actual release `v0.35.0`. - - `git tag -a v0.35.0 -m 'Release v0.35.0'` - - `git push origin v0.35.0` -7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. -8. Add the release to the documentation site generator config (see - [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: - - Start on branch `master`. - - Add a new line at the bottom of [`docs/versions`](./docs/versions) to - ensure the newest release is the default for the landing page. - - Add a new entry to `themeConfig.versions` in - [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the - release in the dropdown versions menu. - -#### Minor release (point releases) - -Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master. -As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches. - -Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate. - -To create a minor release: - -1. Checkout the long-lived backport branch: `git checkout v0.35.x` -2. Run integration tests (`make test_integrations`) and the nightlies. -3. Check out a new branch and prepare the release: - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues - - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - Reset the `CHANGELOG_PENDING.md` - - Bump the ABCI version number, if necessary. - (Note that ABCI follows semver, and that ABCI versions are the only versions - which can change during minor releases, and only field additions are valid minor changes.) -4. Open a PR with these changes that will land them back on `v0.35.x` -5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - - `git tag -a v0.35.1 -m 'Release v0.35.1'` - - `git push origin v0.35.1` -6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - - Remove all `R:minor` labels from the pull requests that were included in the release. - - Do not merge the backport branch into master. - ## Testing ### Unit tests diff --git a/RELEASES.md b/RELEASES.md new file mode 100644 index 000000000..8d9bc2b8e --- /dev/null +++ b/RELEASES.md @@ -0,0 +1,161 @@ +# Releases + +Tendermint uses [semantic versioning](https://semver.org/) with each release following +a `vX.Y.Z` format. The `master` branch is used for active development and thus it's +advisable not to build against it. + +The latest changes are always initially merged into `master`. +Releases are specified using tags and are built from long-lived "backport" branches +that are cut from `master` when the release process begins. +Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, +and the backport branches have names like `v0.34.x` or `v0.33.x` +(literally, `x`; it is not a placeholder in this case). Tendermint only +maintains the last two releases at a time (the oldest release is predominantly +just security patches). + +## Backporting + +As non-breaking changes land on `master`, they should also be backported +to these backport branches. + +We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport +to the needed branch. There should be a label for any backport branch that you'll be targeting. +To notify the bot to backport a pull request, mark the pull request with the label corresponding +to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`. +Once the original pull request is merged, the bot will try to cherry-pick the pull request +to the backport branch. If the bot fails to backport, it will open a pull request. +The author of the original pull request is responsible for solving the conflicts and +merging the pull request. + +### Creating a backport branch + +If this is the first release candidate for a major release, you get to have the honor of creating +the backport branch! + +Note that, after creating the backport branch, you'll also need to update the +tags on `master` so that `go mod` is able to order the branches correctly. You +should tag `master` with a "dev" tag that is "greater than" the backport +branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072) +for more context. + +In the following example, we'll assume that we're making a backport branch for +the 0.35.x line. + +1. Start on `master` +2. Create and push the backport branch: + `git checkout -b v0.35.x; git push origin v0.35.x` +3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: + `git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."; git push origin v0.36.0-dev` +4. Create a new workflow (still on master) to run e2e nightlies for the new backport branch. + (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml + for an example.) +5. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the + backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x` + [label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered. + +## Release candidates + +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to +create RCs, and we build them off of backport branches. + +Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end +(for example, `v0.35.0-rc0`). + +(Note that branches and tags _cannot_ have the same names, so it's important that these branches +have distinct names from the tags/release names.) + +If this is the first RC for a major release, you'll have to make a new backport branch (see above). +Otherwise: + +1. Start from the backport branch (e.g. `v0.35.x`). +2. Run the integration tests and the e2e nightlies + (which can be triggered from the Github UI; + e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). +3. Prepare the changelog: + - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have + it's own changelog section. These will be squashed when the final candidate is released. + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary. + Check the changelog for breaking changes in these components. + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes have landed on the backport branch, be sure to pull them back down locally. +6. Once you have the changes locally, create the new tag, specifying a name and a tag "message": + `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` +7. Push the tag back up to origin: + `git push origin v0.35.0-rc0` + Now the tag should be available on the repo's releases page. +8. Future RCs will continue to be built off of this branch. + +Note that this process should only be used for "true" RCs-- +release candidates that, if successful, will be the next release. +For more experimental "RCs," create a new, short-lived branch and tag that instead. + +## Major release + +This major release process assumes that this release was preceded by release candidates. +If there were no release candidates, begin by creating a backport branch, as described above. + +1. Start on the backport branch (e.g. `v0.35.x`) +2. Run integration tests (`make test_integrations`) and the e2e nightlies. +3. Prepare the release: + - "Squash" changes from the changelog entries for the RCs into a single entry, + and add all changes included in `CHANGELOG_PENDING.md`. + (Squashing includes both combining all entries, as well as removing or simplifying + any intra-RC changes. It may also help to alphabetize the entries by package name.) + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes are on the backport branch, push a tag with prepared release details. + This will trigger the actual release `v0.35.0`. + - `git tag -a v0.35.0 -m 'Release v0.35.0'` + - `git push origin v0.35.0` +6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +7. Add the release to the documentation site generator config (see + [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: + - Start on branch `master`. + - Add a new line at the bottom of [`docs/versions`](./docs/versions) to + ensure the newest release is the default for the landing page. + - Add a new entry to `themeConfig.versions` in + [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the + release in the dropdown versions menu. + +## Minor release (point releases) + +Minor releases are done differently from major releases: They are built off of +long-lived backport branches, rather than from master. As non-breaking changes +land on `master`, they should also be backported into these backport branches. + +Minor releases don't have release candidates by default, although any tricky +changes may merit a release candidate. + +To create a minor release: + +1. Checkout the long-lived backport branch: `git checkout v0.35.x` +2. Run integration tests (`make test_integrations`) and the nightlies. +3. Check out a new branch and prepare the release: + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues + - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - Reset the `CHANGELOG_PENDING.md` + - Bump the TMDefaultVersion in `version.go` + - Bump the ABCI version number, if necessary. + (Note that ABCI follows semver, and that ABCI versions are the only versions + which can change during minor releases, and only field additions are valid minor changes.) +4. Open a PR with these changes that will land them back on `v0.35.x` +5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. + - `git tag -a v0.35.1 -m 'Release v0.35.1'` + - `git push origin v0.35.1` +6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. + - Remove all `R:minor` labels from the pull requests that were included in the release. + - Do not merge the backport branch into master. diff --git a/version/version.go b/version/version.go index 3fb08652e..e42952f77 100644 --- a/version/version.go +++ b/version/version.go @@ -10,7 +10,7 @@ const ( // TMVersionDefault is the used as the fallback version of Tendermint Core // when not using git describe. It is formatted with semantic versioning. - TMVersionDefault = "0.34.11" + TMVersionDefault = "0.35.0-unreleased" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0" From e53f92ba9cfceb49c1588775f6a9c700c22d4c0a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Oct 2021 09:14:55 -0400 Subject: [PATCH 074/174] build(deps): Bump github.com/adlio/schema from 1.1.13 to 1.1.14 (#7069) Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.1.13 to 1.1.14. - [Release notes](https://github.com/adlio/schema/releases) - [Commits](https://github.com/adlio/schema/compare/v1.1.13...v1.1.14) --- updated-dependencies: - dependency-name: github.com/adlio/schema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 ++---- go.sum | 62 ++++++++++++++++++++++++++++++++++++---------------------- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 4eff73283..cfd8e7fe7 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,7 @@ go 1.16 require ( github.com/BurntSushi/toml v0.4.1 - github.com/Workiva/go-datastructures v1.0.53 - github.com/adlio/schema v1.1.13 + github.com/adlio/schema v1.1.14 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/fortytw2/leaktest v1.3.0 @@ -20,7 +19,6 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/lib/pq v1.10.3 github.com/libp2p/go-buffer-pool v0.0.2 - github.com/minio/highwayhash v1.0.2 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible @@ -36,7 +34,7 @@ require ( github.com/tendermint/tm-db v0.6.4 github.com/vektra/mockery/v2 v2.9.4 golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 - golang.org/x/net v0.0.0-20210917221730-978cfadd31cf + golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.41.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect diff --git a/go.sum b/go.sum index c6a87e16f..e767033e5 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -52,8 +53,8 @@ contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EU dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Antonboom/errname v0.1.4 h1:lGSlI42Gm4bI1e+IITtXJXvxFM8N7naWimVFKcb0McY= github.com/Antonboom/errname v0.1.4/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -72,8 +73,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -85,10 +86,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= -github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/adlio/schema v1.1.13 h1:LeNMVg5Z1FX+Qgz8tJUijBLRdcpbFUElz+d1489On98= -github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE= +github.com/adlio/schema v1.1.14 h1:lIjyp5/2wSuEOmeQGNPpaRsVGZRqz9A/B+PaMtEotaU= +github.com/adlio/schema v1.1.14/go.mod h1:hQveFEMiDlG/M9yz9RAajnH5DzT6nAfqOG9YkEQU2pg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -126,6 +125,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= @@ -160,9 +160,11 @@ github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzF github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= @@ -173,8 +175,9 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= +github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -192,6 +195,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -253,6 +257,7 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= @@ -574,6 +579,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -592,7 +598,6 @@ github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3 github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -645,7 +650,6 @@ github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0 github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -664,6 +668,7 @@ github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7p github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -676,6 +681,7 @@ github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinK github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mroth/weightedrand v0.4.1 h1:rHcbUBopmi/3x4nnrvwGJBhX9d0vk+KgoLUZeDP6YyI= github.com/mroth/weightedrand v0.4.1/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= @@ -724,12 +730,14 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= @@ -748,7 +756,6 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -832,6 +839,7 @@ github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dms github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -842,7 +850,6 @@ github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxr github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -906,6 +913,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= @@ -918,7 +926,6 @@ github.com/tetafro/godot v1.4.9 h1:wsNd0RuUxISqqudFqchsSsMqsM188DoZVPBeKl87tP0= github.com/tetafro/godot v1.4.9/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -929,8 +936,8 @@ github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -950,6 +957,8 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g= github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1107,6 +1116,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -1115,8 +1125,9 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1163,6 +1174,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1176,8 +1188,10 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1202,6 +1216,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1217,6 +1232,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1226,8 +1242,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1328,7 +1345,6 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= From 5bf30bb0491f28b0377657df5ead0c88d8384a50 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 6 Oct 2021 15:17:44 -0400 Subject: [PATCH 075/174] p2p: cleanup transport interface (#7071) This is another batch of things to cleanup in the legacy P2P system. --- CHANGELOG_PENDING.md | 2 +- .../commands/reset_priv_validator.go | 17 +-- cmd/tendermint/commands/run_node.go | 2 - cmd/tendermint/commands/testnet.go | 1 - config/config.go | 105 ++------------ config/config_test.go | 20 --- config/toml.go | 50 ------- internal/p2p/conn/connection.go | 133 ------------------ internal/p2p/conn/connection_test.go | 32 +---- internal/p2p/conn_set.go | 82 ----------- internal/p2p/mock/peer.go | 9 +- internal/p2p/mocks/connection.go | 69 +-------- internal/p2p/mocks/peer.go | 15 -- internal/p2p/router.go | 3 +- internal/p2p/shim.go | 13 -- internal/p2p/transport.go | 27 +--- internal/p2p/transport_mconn.go | 49 ++----- internal/p2p/transport_mconn_test.go | 1 - internal/p2p/transport_memory.go | 40 +----- internal/p2p/transport_test.go | 51 ++----- internal/p2p/types.go | 1 - node/node.go | 4 - node/setup.go | 22 +-- rpc/client/helpers.go | 2 +- test/e2e/runner/setup.go | 1 - 25 files changed, 52 insertions(+), 699 deletions(-) delete mode 100644 internal/p2p/conn_set.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index d80e3ae61..9d7ea96ae 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -17,7 +17,7 @@ Special thanks to external contributors on this release: - P2P Protocol - [p2p] \#7035 Remove legacy P2P routing implementation and - associated configuration (@tychoish) + associated configuration options (@tychoish) - Go API diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 8745e55d8..5f3e54700 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -37,7 +37,7 @@ var ResetPrivValidatorCmd = &cobra.Command{ // XXX: this is totally unsafe. // it's only suitable for testnets. func resetAll(cmd *cobra.Command, args []string) error { - return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(), + return ResetAll(config.DBDir(), config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger) } @@ -49,12 +49,7 @@ func resetPrivValidator(cmd *cobra.Command, args []string) error { // ResetAll removes address book files plus all data, and resets the privValdiator data. // Exported so other CLI tools can use it. -func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error { - if keepAddrBook { - logger.Info("The address book remains intact") - } else { - removeAddrBook(addrBookFile, logger) - } +func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger) error { if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { @@ -87,11 +82,3 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err } return nil } - -func removeAddrBook(addrBookFile string, logger log.Logger) { - if err := os.Remove(addrBookFile); err == nil { - logger.Info("Removed existing address book", "file", addrBookFile) - } else if !os.IsNotExist(err) { - logger.Info("Error removing address book", "file", addrBookFile, "err", err) - } -} diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 1e310f5eb..0c6d3271e 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -84,8 +84,6 @@ func AddNodeFlags(cmd *cobra.Command) { "node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().String("p2p.unconditional-peer-ids", - config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index a7307b38f..ef46f5428 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -226,7 +226,6 @@ func testnetFiles(cmd *cobra.Command, args []string) error { for i := 0; i < nValidators+nNonValidators; i++ { nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) - config.P2P.AddrBookStrict = false config.P2P.AllowDuplicateIP = true if populatePersistentPeers { persistentPeersWithoutSelf := make([]string, 0) diff --git a/config/config.go b/config/config.go index 349e5a6f7..0e204b6e6 100644 --- a/config/config.go +++ b/config/config.go @@ -53,16 +53,14 @@ var ( defaultPrivValKeyName = "priv_validator_key.json" defaultPrivValStateName = "priv_validator_state.json" - defaultNodeKeyName = "node_key.json" - defaultAddrBookName = "addrbook.json" + defaultNodeKeyName = "node_key.json" defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName) defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName) defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName) defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName) - defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) - defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) + defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) ) // Config defines the top level configuration for a Tendermint node @@ -141,9 +139,6 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.RPC.ValidateBasic(); err != nil { return fmt.Errorf("error in [rpc] section: %w", err) } - if err := cfg.P2P.ValidateBasic(); err != nil { - return fmt.Errorf("error in [p2p] section: %w", err) - } if err := cfg.Mempool.ValidateBasic(); err != nil { return fmt.Errorf("error in [mempool] section: %w", err) } @@ -646,25 +641,6 @@ type P2PConfig struct { //nolint: maligned // UPNP port forwarding UPNP bool `mapstructure:"upnp"` - // Path to address book - AddrBook string `mapstructure:"addr-book-file"` - - // Set true for strict address routability rules - // Set false for private or local networks - AddrBookStrict bool `mapstructure:"addr-book-strict"` - - // Maximum number of inbound peers - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"` - - // Maximum number of outbound peers to connect to, excluding persistent peers. - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"` - // MaxConnections defines the maximum number of connected peers (inbound and // outbound). MaxConnections uint16 `mapstructure:"max-connections"` @@ -673,24 +649,6 @@ type P2PConfig struct { //nolint: maligned // attempts per IP address. MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"` - // List of node IDs, to which a connection will be (re)established ignoring any existing limits - UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"` - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"` - - // Time to wait before flushing messages out on the connection - FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` - - // Maximum size of a message packet payload, in bytes - MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"` - - // Rate at which packets can be sent, in bytes/second - SendRate int64 `mapstructure:"send-rate"` - - // Rate at which packets can be received, in bytes/second - RecvRate int64 `mapstructure:"recv-rate"` - // Set true to enable the peer-exchange reactor PexReactor bool `mapstructure:"pex"` @@ -721,28 +679,14 @@ func DefaultP2PConfig() *P2PConfig { ListenAddress: "tcp://0.0.0.0:26656", ExternalAddress: "", UPNP: false, - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumInboundPeers: 40, - MaxNumOutboundPeers: 10, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PersistentPeersMaxDialPeriod: 0 * time.Second, - FlushThrottleTimeout: 100 * time.Millisecond, - // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. - // The IP header and the TCP header take up 20 bytes each at least (unless - // optional header fields are used) and thus the max for (non-Jumbo frame) - // Ethernet is 1500 - 20 -20 = 1460 - // Source: https://stackoverflow.com/a/3074427/820520 - MaxPacketMsgPayloadSize: 1400, - SendRate: 5120000, // 5 mB/s - RecvRate: 5120000, // 5 mB/s - PexReactor: true, - AllowDuplicateIP: false, - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - QueueType: "priority", + PexReactor: true, + AllowDuplicateIP: false, + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + QueueType: "priority", } } @@ -750,43 +694,10 @@ func DefaultP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.FlushThrottleTimeout = 10 * time.Millisecond cfg.AllowDuplicateIP = true return cfg } -// AddrBookFile returns the full path to the address book -func (cfg *P2PConfig) AddrBookFile() string { - return rootify(cfg.AddrBook, cfg.RootDir) -} - -// ValidateBasic performs basic validation (checking param bounds, etc.) and -// returns an error if any check fails. -func (cfg *P2PConfig) ValidateBasic() error { - if cfg.MaxNumInboundPeers < 0 { - return errors.New("max-num-inbound-peers can't be negative") - } - if cfg.MaxNumOutboundPeers < 0 { - return errors.New("max-num-outbound-peers can't be negative") - } - if cfg.FlushThrottleTimeout < 0 { - return errors.New("flush-throttle-timeout can't be negative") - } - if cfg.PersistentPeersMaxDialPeriod < 0 { - return errors.New("persistent-peers-max-dial-period can't be negative") - } - if cfg.MaxPacketMsgPayloadSize < 0 { - return errors.New("max-packet-msg-payload-size can't be negative") - } - if cfg.SendRate < 0 { - return errors.New("send-rate can't be negative") - } - if cfg.RecvRate < 0 { - return errors.New("recv-rate can't be negative") - } - return nil -} - //----------------------------------------------------------------------------- // MempoolConfig diff --git a/config/config_test.go b/config/config_test.go index 79d627910..db0fb5967 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -82,26 +82,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { } } -func TestP2PConfigValidateBasic(t *testing.T) { - cfg := TestP2PConfig() - assert.NoError(t, cfg.ValidateBasic()) - - fieldsToTest := []string{ - "MaxNumInboundPeers", - "MaxNumOutboundPeers", - "FlushThrottleTimeout", - "MaxPacketMsgPayloadSize", - "SendRate", - "RecvRate", - } - - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) - } -} - func TestMempoolConfigValidateBasic(t *testing.T) { cfg := TestMempoolConfig() assert.NoError(t, cfg.ValidateBasic()) diff --git a/config/toml.go b/config/toml.go index ee4c30004..1159640fb 100644 --- a/config/toml.go +++ b/config/toml.go @@ -296,62 +296,12 @@ persistent-peers = "{{ .P2P.PersistentPeers }}" # UPNP port forwarding upnp = {{ .P2P.UPNP }} -# Path to address book -# TODO: Remove once p2p refactor is complete in favor of peer store. -addr-book-file = "{{ js .P2P.AddrBook }}" - -# Set true for strict address routability rules -# Set false for private or local networks -addr-book-strict = {{ .P2P.AddrBookStrict }} - -# Maximum number of inbound peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }} - -# Maximum number of outbound peers to connect to, excluding persistent peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }} - # Maximum number of connections (inbound and outbound). max-connections = {{ .P2P.MaxConnections }} # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -# TODO: Remove once p2p refactor is complete. -# ref: https://github.com/tendermint/tendermint/issues/5670 -unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" - -# Time to wait before flushing messages out on the connection -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" - -# Maximum size of a message packet payload, in bytes -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} - -# Rate at which packets can be sent, in bytes/second -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -send-rate = {{ .P2P.SendRate }} - -# Rate at which packets can be received, in bytes/second -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -recv-rate = {{ .P2P.RecvRate }} - # Set true to enable the peer-exchange reactor pex = {{ .P2P.PexReactor }} diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 339ad7469..0ab7b7546 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -64,15 +64,11 @@ initialization of the connection. There are two methods for sending messages: func (m MConnection) Send(chID byte, msgBytes []byte) bool {} - func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} `Send(chID, msgBytes)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`, or until the request times out. The message `msg` is serialized using Protobuf. -`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the -channel's queue is full. - Inbound message bytes are handled with an onReceive callback function. */ type MConnection struct { @@ -265,43 +261,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { return false } -// FlushStop replicates the logic of OnStop. -// It additionally ensures that all successful -// .Send() calls will get flushed before closing -// the connection. -func (c *MConnection) FlushStop() { - if c.stopServices() { - return - } - - // this block is unique to FlushStop - { - // wait until the sendRoutine exits - // so we dont race on calling sendSomePacketMsgs - <-c.doneSendRoutine - - // Send and flush all pending msgs. - // Since sendRoutine has exited, we can call this - // safely - eof := c.sendSomePacketMsgs() - for !eof { - eof = c.sendSomePacketMsgs() - } - c.flush() - - // Now we can close the connection - } - - c.conn.Close() - - // We can't close pong safely here because - // recvRoutine may write to it after we've stopped. - // Though it doesn't need to get closed at all, - // we close it @ recvRoutine. - - // c.Stop() -} - // OnStop implements BaseService func (c *MConnection) OnStop() { if c.stopServices() { @@ -375,49 +334,6 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { return success } -// Queues a message to be sent to channel. -// Nonblocking, returns true if successful. -func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { - if !c.IsRunning() { - return false - } - - c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", msgBytes) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) - return false - } - - ok = channel.trySendBytes(msgBytes) - if ok { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } - - return ok -} - -// CanSend returns true if you can send more data onto the chID, false -// otherwise. Use only as a heuristic. -func (c *MConnection) CanSend(chID byte) bool { - if !c.IsRunning() { - return false - } - - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) - return false - } - return channel.canSend() -} - // sendRoutine polls for packets to send from channels. func (c *MConnection) sendRoutine() { defer c._recover() @@ -682,13 +598,6 @@ func (c *MConnection) maxPacketMsgSize() int { return len(bz) } -type ConnectionStatus struct { - Duration time.Duration - SendMonitor flow.Status - RecvMonitor flow.Status - Channels []ChannelStatus -} - type ChannelStatus struct { ID byte SendQueueCapacity int @@ -697,24 +606,6 @@ type ChannelStatus struct { RecentlySent int64 } -func (c *MConnection) Status() ConnectionStatus { - var status ConnectionStatus - status.Duration = time.Since(c.created) - status.SendMonitor = c.sendMonitor.Status() - status.RecvMonitor = c.recvMonitor.Status() - status.Channels = make([]ChannelStatus, len(c.channels)) - for i, channel := range c.channels { - status.Channels[i] = ChannelStatus{ - ID: channel.desc.ID, - SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), - Priority: channel.desc.Priority, - RecentlySent: atomic.LoadInt64(&channel.recentlySent), - } - } - return status -} - //----------------------------------------------------------------------------- type ChannelDescriptor struct { @@ -800,30 +691,6 @@ func (ch *Channel) sendBytes(bytes []byte) bool { } } -// Queues message to send to this channel. -// Nonblocking, returns true if successful. -// Goroutine-safe -func (ch *Channel) trySendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - default: - return false - } -} - -// Goroutine-safe -func (ch *Channel) loadSendQueueSize() (size int) { - return int(atomic.LoadInt32(&ch.sendQueueSize)) -} - -// Goroutine-safe -// Use only as a heuristic. -func (ch *Channel) canSend() bool { - return ch.loadSendQueueSize() < defaultSendQueueCapacity -} - // Returns true if any PacketMsgs are pending to be sent. // Call before calling nextPacketMsg() // Goroutine-safe diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 6d009f85c..6a9c0988f 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -69,9 +69,6 @@ func TestMConnectionSendFlushStop(t *testing.T) { errCh <- err }() - // stop the conn - it should flush all conns - clientConn.FlushStop() - timer := time.NewTimer(3 * time.Second) select { case <-errCh: @@ -97,16 +94,14 @@ func TestMConnectionSend(t *testing.T) { if err != nil { t.Error(err) } - assert.True(t, mconn.CanSend(0x01)) msg = []byte("Spider-Man") - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) if err != nil { t.Error(err) } - assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown") } @@ -145,20 +140,6 @@ func TestMConnectionReceive(t *testing.T) { } } -func TestMConnectionStatus(t *testing.T) { - server, client := NetPipe() - t.Cleanup(closeAll(t, client, server)) - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) - - status := mconn.Status() - assert.NotNil(t, status) - assert.Zero(t, status.Channels[0].SendQueueSize) -} - func TestMConnectionPongTimeoutResultsInError(t *testing.T) { server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) @@ -514,18 +495,15 @@ func TestMConnectionTrySend(t *testing.T) { msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) require.NoError(t, err) - assert.True(t, mconn.CanSend(0x01)) - assert.True(t, mconn.TrySend(0x01, msg)) - assert.False(t, mconn.CanSend(0x01)) + assert.True(t, mconn.Send(0x01, msg)) go func() { - mconn.TrySend(0x01, msg) + mconn.Send(0x01, msg) resultCh <- "TrySend" }() - assert.False(t, mconn.CanSend(0x01)) - assert.False(t, mconn.TrySend(0x01, msg)) + assert.False(t, mconn.Send(0x01, msg)) assert.Equal(t, "TrySend", <-resultCh) } diff --git a/internal/p2p/conn_set.go b/internal/p2p/conn_set.go deleted file mode 100644 index 987d9f968..000000000 --- a/internal/p2p/conn_set.go +++ /dev/null @@ -1,82 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// ConnSet is a lookup table for connections and all their ips. -type ConnSet interface { - Has(net.Conn) bool - HasIP(net.IP) bool - Set(net.Conn, []net.IP) - Remove(net.Conn) - RemoveAddr(net.Addr) -} - -type connSetItem struct { - conn net.Conn - ips []net.IP -} - -type connSet struct { - tmsync.RWMutex - - conns map[string]connSetItem -} - -// NewConnSet returns a ConnSet implementation. -func NewConnSet() ConnSet { - return &connSet{ - conns: map[string]connSetItem{}, - } -} - -func (cs *connSet) Has(c net.Conn) bool { - cs.RLock() - defer cs.RUnlock() - - _, ok := cs.conns[c.RemoteAddr().String()] - - return ok -} - -func (cs *connSet) HasIP(ip net.IP) bool { - cs.RLock() - defer cs.RUnlock() - - for _, c := range cs.conns { - for _, known := range c.ips { - if known.Equal(ip) { - return true - } - } - } - - return false -} - -func (cs *connSet) Remove(c net.Conn) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, c.RemoteAddr().String()) -} - -func (cs *connSet) RemoveAddr(addr net.Addr) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, addr.String()) -} - -func (cs *connSet) Set(c net.Conn, ips []net.IP) { - cs.Lock() - defer cs.Unlock() - - cs.conns[c.RemoteAddr().String()] = connSetItem{ - conn: c, - ips: ips, - } -} diff --git a/internal/p2p/mock/peer.go b/internal/p2p/mock/peer.go index cede51768..366f1d7fd 100644 --- a/internal/p2p/mock/peer.go +++ b/internal/p2p/mock/peer.go @@ -4,7 +4,6 @@ import ( "net" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) @@ -51,10 +50,10 @@ func (mp *Peer) NodeInfo() types.NodeInfo { ListenAddr: mp.addr.DialString(), } } -func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp *Peer) ID() types.NodeID { return mp.id } -func (mp *Peer) IsOutbound() bool { return mp.Outbound } -func (mp *Peer) IsPersistent() bool { return mp.Persistent } + +func (mp *Peer) ID() types.NodeID { return mp.id } +func (mp *Peer) IsOutbound() bool { return mp.Outbound } +func (mp *Peer) IsPersistent() bool { return mp.Persistent } func (mp *Peer) Get(key string) interface{} { if value, ok := mp.kv[key]; ok { return value diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 6c6174117..03f7b5b33 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -5,11 +5,8 @@ package mocks import ( context "context" - conn "github.com/tendermint/tendermint/internal/p2p/conn" - - crypto "github.com/tendermint/tendermint/crypto" - mock "github.com/stretchr/testify/mock" + crypto "github.com/tendermint/tendermint/crypto" p2p "github.com/tendermint/tendermint/internal/p2p" @@ -35,20 +32,6 @@ func (_m *Connection) Close() error { return r0 } -// FlushClose provides a mock function with given fields: -func (_m *Connection) FlushClose() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Handshake provides a mock function with given fields: _a0, _a1, _a2 func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { ret := _m.Called(_a0, _a1, _a2) @@ -138,35 +121,14 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { } // SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { +func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) error { ret := _m.Called(_a0, _a1) - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { + var r0 error + if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) error); ok { r0 = rf(_a0, _a1) } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Status provides a mock function with given fields: -func (_m *Connection) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) + r0 = ret.Error(0) } return r0 @@ -185,24 +147,3 @@ func (_m *Connection) String() string { return r0 } - -// TrySendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go index b905c1156..3e5d20325 100644 --- a/internal/p2p/mocks/peer.go +++ b/internal/p2p/mocks/peer.go @@ -3,7 +3,6 @@ package mocks import ( - conn "github.com/tendermint/tendermint/internal/p2p/conn" log "github.com/tendermint/tendermint/libs/log" mock "github.com/stretchr/testify/mock" @@ -272,20 +271,6 @@ func (_m *Peer) Start() error { return r0 } -// Status provides a mock function with given fields: -func (_m *Peer) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) - } - - return r0 -} - // Stop provides a mock function with given fields: func (_m *Peer) Stop() error { ret := _m.Called() diff --git a/internal/p2p/router.go b/internal/p2p/router.go index ef1eef564..ed6119431 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -960,8 +960,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) continue } - _, err = conn.SendMessage(envelope.channelID, bz) - if err != nil { + if err = conn.SendMessage(envelope.channelID, bz); err != nil { return err } diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go index c9cdc2756..9f82bc11a 100644 --- a/internal/p2p/shim.go +++ b/internal/p2p/shim.go @@ -4,8 +4,6 @@ import ( "sort" "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" ) @@ -74,17 +72,6 @@ func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { } } -// MConnConfig returns an MConnConfig based on the defaults, with fields updated -// from the P2PConfig. -func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = cfg.FlushThrottleTimeout - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - return mConfig -} - // GetChannels implements the legacy Reactor interface for getting a slice of all // the supported ChannelDescriptors. func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index 0b2311fa3..8440889d4 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -7,7 +7,6 @@ import ( "net" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/types" ) @@ -82,19 +81,7 @@ type Connection interface { ReceiveMessage() (ChannelID, []byte, error) // SendMessage sends a message on the connection. Returns io.EOF if closed. - // - // FIXME: For compatibility with the legacy P2P stack, it returns an - // additional boolean false if the message timed out waiting to be accepted - // into the send buffer. This should be removed. - SendMessage(ChannelID, []byte) (bool, error) - - // TrySendMessage is a non-blocking version of SendMessage that returns - // immediately if the message buffer is full. It returns true if the message - // was accepted. - // - // FIXME: This method is here for backwards-compatibility with the legacy - // P2P stack and should be removed. - TrySendMessage(ChannelID, []byte) (bool, error) + SendMessage(ChannelID, []byte) error // LocalEndpoint returns the local endpoint for the connection. LocalEndpoint() Endpoint @@ -105,18 +92,6 @@ type Connection interface { // Close closes the connection. Close() error - // FlushClose flushes all pending sends and then closes the connection. - // - // FIXME: This only exists for backwards-compatibility with the current - // MConnection implementation. There should really be a separate Flush() - // method, but there is no easy way to synchronously flush pending data with - // the current MConnection code. - FlushClose() error - - // Status returns the current connection status. - // FIXME: Only here for compatibility with the current Peer code. - Status() conn.ConnectionStatus - // Stringer is used to display the connection, e.g. in logs. // // Without this, the logger may use reflection to access and display diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index eca261476..de643bdb9 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -377,32 +377,21 @@ func (c *mConnConnection) String() string { } // SendMessage implements Connection. -func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) + return fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) } select { case err := <-c.errorCh: - return false, err + return err case <-c.closeCh: - return false, io.EOF + return io.EOF default: - return c.mconn.Send(byte(chID), msg), nil - } -} + if ok := c.mconn.Send(byte(chID), msg); !ok { + return errors.New("sending message timed out") + } -// TrySendMessage implements Connection. -func (c *mConnConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) - } - select { - case err := <-c.errorCh: - return false, err - case <-c.closeCh: - return false, io.EOF - default: - return c.mconn.TrySend(byte(chID), msg), nil + return nil } } @@ -442,14 +431,6 @@ func (c *mConnConnection) RemoteEndpoint() Endpoint { return endpoint } -// Status implements Connection. -func (c *mConnConnection) Status() conn.ConnectionStatus { - if c.mconn == nil { - return conn.ConnectionStatus{} - } - return c.mconn.Status() -} - // Close implements Connection. func (c *mConnConnection) Close() error { var err error @@ -463,17 +444,3 @@ func (c *mConnConnection) Close() error { }) return err } - -// FlushClose implements Connection. -func (c *mConnConnection) FlushClose() error { - var err error - c.closeOnce.Do(func() { - if c.mconn != nil && c.mconn.IsRunning() { - c.mconn.FlushStop() - } else { - err = c.conn.Close() - } - close(c.closeCh) - }) - return err -} diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index 06cd93c0a..f4d7198ed 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -195,7 +195,6 @@ func TestMConnTransport_Listen(t *testing.T) { _ = conn.Close() <-dialedChan - time.Sleep(time.Minute) // closing the connection should not error require.NoError(t, peerConn.Close()) diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 09a387254..853288c59 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/crypto" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -262,11 +261,6 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { } } -// Status implements Connection. -func (c *MemoryConnection) Status() conn.ConnectionStatus { - return conn.ConnectionStatus{} -} - // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, @@ -316,42 +310,21 @@ func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { } // SendMessage implements Connection. -func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): - return false, io.EOF + return io.EOF default: } select { case c.sendCh <- memoryMessage{channelID: chID, message: msg}: c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil + return nil case <-c.closer.Done(): - return false, io.EOF - } -} - -// TrySendMessage implements Connection. -func (c *MemoryConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - // Check close first, since channels are buffered. Otherwise, below select - // may non-deterministically return non-error even when closed. - select { - case <-c.closer.Done(): - return false, io.EOF - default: - } - - select { - case c.sendCh <- memoryMessage{channelID: chID, message: msg}: - c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil - case <-c.closer.Done(): - return false, io.EOF - default: - return false, nil + return io.EOF } } @@ -366,8 +339,3 @@ func (c *MemoryConnection) Close() error { } return nil } - -// FlushClose implements Connection. -func (c *MemoryConnection) FlushClose() error { - return c.Close() -} diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index 1b8ab77f5..cdfb57c70 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -315,22 +315,16 @@ func TestConnection_FlushClose(t *testing.T) { b := makeTransport(t) ab, _ := dialAcceptHandshake(t, a, b) - // FIXME: FlushClose should be removed (and replaced by separate Flush - // and Close calls if necessary). We can't reliably test it, so we just - // make sure it closes both ends and that it's idempotent. - err := ab.FlushClose() + err := ab.Close() require.NoError(t, err) _, _, err = ab.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - - err = ab.FlushClose() - require.NoError(t, err) }) } @@ -355,9 +349,8 @@ func TestConnection_SendReceive(t *testing.T) { ab, ba := dialAcceptHandshake(t, a, b) // Can send and receive a to b. - ok, err := ab.SendMessage(chID, []byte("foo")) + err := ab.SendMessage(chID, []byte("foo")) require.NoError(t, err) - require.True(t, ok) ch, msg, err := ba.ReceiveMessage() require.NoError(t, err) @@ -365,30 +358,20 @@ func TestConnection_SendReceive(t *testing.T) { require.Equal(t, chID, ch) // Can send and receive b to a. - _, err = ba.SendMessage(chID, []byte("bar")) + err = ba.SendMessage(chID, []byte("bar")) require.NoError(t, err) _, msg, err = ab.ReceiveMessage() require.NoError(t, err) require.Equal(t, []byte("bar"), msg) - // TrySendMessage also works. - ok, err = ba.TrySendMessage(chID, []byte("try")) - require.NoError(t, err) - require.True(t, ok) - - ch, msg, err = ab.ReceiveMessage() - require.NoError(t, err) - require.Equal(t, []byte("try"), msg) - require.Equal(t, chID, ch) - // Connections should still be active after closing the transports. err = a.Close() require.NoError(t, err) err = b.Close() require.NoError(t, err) - _, err = ab.SendMessage(chID, []byte("still here")) + err = ab.SendMessage(chID, []byte("still here")) require.NoError(t, err) ch, msg, err = ba.ReceiveMessage() require.NoError(t, err) @@ -403,34 +386,18 @@ func TestConnection_SendReceive(t *testing.T) { _, _, err = ab.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.TrySendMessage(chID, []byte("closed try")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + + err = ab.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) _, _, err = ba.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.TrySendMessage(chID, []byte("closed try")) + + err = ba.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.SendMessage(chID, []byte("closed")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - }) -} - -func TestConnection_Status(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { - a := makeTransport(t) - b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) - - // FIXME: This isn't implemented in all transports, so for now we just - // check that it doesn't panic, which isn't really much of a test. - ab.Status() }) } diff --git a/internal/p2p/types.go b/internal/p2p/types.go index 403f43528..388ff2253 100644 --- a/internal/p2p/types.go +++ b/internal/p2p/types.go @@ -5,4 +5,3 @@ import ( ) type ChannelDescriptor = conn.ChannelDescriptor -type ConnectionStatus = conn.ConnectionStatus diff --git a/node/node.go b/node/node.go index 891c5fce6..eeb301614 100644 --- a/node/node.go +++ b/node/node.go @@ -1043,10 +1043,6 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt QueueType: conf.P2P.QueueType, } - if conf.P2P.MaxNumInboundPeers > 0 { - opts.MaxIncomingConnectionAttempts = conf.P2P.MaxIncomingConnectionAttempts - } - if conf.FilterPeers && proxyApp != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ diff --git a/node/setup.go b/node/setup.go index 8889edc4e..893d7ccde 100644 --- a/node/setup.go +++ b/node/setup.go @@ -3,7 +3,6 @@ package node import ( "bytes" "fmt" - "math" "time" dbm "github.com/tendermint/tm-db" @@ -18,6 +17,7 @@ import ( mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -344,11 +344,9 @@ func createConsensusReactor( func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { return p2p.NewMConnTransport( - logger, p2p.MConnConfig(cfg.P2P), []*p2p.ChannelDescriptor{}, + logger, conn.DefaultMConnConfig(), []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(cfg.P2P.MaxNumInboundPeers + - len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")), - ), + MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), }, ) } @@ -365,20 +363,6 @@ func createPeerManager( switch { case cfg.P2P.MaxConnections > 0: maxConns = cfg.P2P.MaxConnections - - case cfg.P2P.MaxNumInboundPeers > 0 && cfg.P2P.MaxNumOutboundPeers > 0: - x := cfg.P2P.MaxNumInboundPeers + cfg.P2P.MaxNumOutboundPeers - if x > math.MaxUint16 { - return nil, fmt.Errorf( - "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)", - cfg.P2P.MaxNumInboundPeers, - cfg.P2P.MaxNumOutboundPeers, - math.MaxUint16, - ) - } - - maxConns = uint16(x) - default: maxConns = 64 } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 289d947a9..58e48dbba 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -79,7 +79,7 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( select { case event := <-eventCh: - return event.Data.(types.TMEventData), nil + return event.Data, nil case <-ctx.Done(): return nil, errors.New("timed out waiting for event") } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 3b1184e9c..9bf76c874 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -237,7 +237,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) - cfg.P2P.AddrBookStrict = false cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second From 1f438f205a10c83a4588b4fd838b3235684b1782 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 7 Oct 2021 19:07:35 -0400 Subject: [PATCH 076/174] e2e: improve network connectivity (#7077) This tweaks the connectivity of test configurations, in hopes that more will be viable. Additionally reduces the prevalence of testing the legacy mempool. --- test/e2e/generator/generate.go | 34 ++++++++++++----------------- test/e2e/generator/generate_test.go | 4 ++-- test/e2e/pkg/testnet.go | 2 +- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 21b3ebe0a..75342f4e7 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -44,9 +44,10 @@ var ( "tcp": 20, "unix": 10, } - // FIXME: v2 disabled due to flake - nodeBlockSyncs = uniformChoice{"v0"} // "v2" - nodeMempools = uniformChoice{"v0", "v1"} + nodeMempools = weightedChoice{ + "v0": 20, + "v1": 80, + } nodeStateSyncs = weightedChoice{ e2e.StateSyncDisabled: 10, e2e.StateSyncP2P: 45, @@ -54,8 +55,12 @@ var ( } nodePersistIntervals = uniformChoice{0, 1, 5} nodeSnapshotIntervals = uniformChoice{0, 5} - nodeRetainBlocks = uniformChoice{0, 2 * int(e2e.EvidenceAgeHeight), 4 * int(e2e.EvidenceAgeHeight)} - nodePerturbations = probSetChoice{ + nodeRetainBlocks = uniformChoice{ + 0, + 2 * int(e2e.EvidenceAgeHeight), + 4 * int(e2e.EvidenceAgeHeight), + } + nodePerturbations = probSetChoice{ "disconnect": 0.1, "pause": 0.1, "kill": 0.1, @@ -236,13 +241,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // choose one of the seeds manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) - } else if i > 0 { + } else if i > 1 && r.Float64() >= 0.5 { peers := uniformSetChoice(peerNames[:i]) - if manifest.Nodes[name].StateSync == e2e.StateSyncP2P { - manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) - } else { - manifest.Nodes[name].PersistentPeers = peers.Choose(r) - } + manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) } } @@ -250,9 +251,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er for i := 1; i <= numLightClients; i++ { startAt := manifest.InitialHeight + 5 - node := generateLightNode( - r, startAt+(5*int64(i)), lightProviders, - ) + node := generateLightNode(r, startAt+(5*int64(i)), lightProviders) manifest.Nodes[fmt.Sprintf("light%02d", i)] = node @@ -278,8 +277,7 @@ func generateNode( Database: nodeDatabases.Choose(r), ABCIProtocol: nodeABCIProtocols.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), - BlockSync: nodeBlockSyncs.Choose(r).(string), - Mempool: nodeMempools.Choose(r).(string), + Mempool: nodeMempools.Choose(r), StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), @@ -326,10 +324,6 @@ func generateNode( } } - if node.StateSync != e2e.StateSyncDisabled { - node.BlockSync = "v0" - } - return &node } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index b7f790259..79a20f27e 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -31,8 +31,8 @@ func TestGenerator(t *testing.T) { } if node.StateSync != e2e.StateSyncDisabled { require.Zero(t, node.Seeds, node.StateSync) - require.True(t, len(node.PersistentPeers) >= 2) - require.Equal(t, "v0", node.BlockSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) } }) diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 1daa8b87c..a6d5b2485 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -172,7 +172,7 @@ func LoadTestnet(file string) (*Testnet, error) { ABCIProtocol: ProtocolBuiltin, PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - BlockSync: nodeManifest.BlockSync, + BlockSync: "v0", Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, From 4ca130d226189361651bd91d1738c623048a6f46 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 8 Oct 2021 09:15:13 +0200 Subject: [PATCH 077/174] cli: allow node operator to rollback last state (#7033) --- CHANGELOG_PENDING.md | 1 + cmd/tendermint/commands/reindex_event.go | 11 +- cmd/tendermint/commands/rollback.go | 46 +++++++ cmd/tendermint/main.go | 1 + internal/state/rollback.go | 87 ++++++++++++++ internal/state/rollback_test.go | 146 +++++++++++++++++++++++ internal/state/state.go | 2 + test/e2e/app/app.go | 6 + 8 files changed, 295 insertions(+), 5 deletions(-) create mode 100644 cmd/tendermint/commands/rollback.go create mode 100644 internal/state/rollback.go create mode 100644 internal/state/rollback_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9d7ea96ae..fafb9cea7 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -30,6 +30,7 @@ Special thanks to external contributors on this release: ### FEATURES +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. - [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) ### IMPROVEMENTS diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 9545e32d7..58f11657b 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -29,11 +29,12 @@ var ReIndexEventCmd = &cobra.Command{ Use: "reindex-event", Short: "reindex events to the event store backends", Long: ` - reindex-event is an offline tooling to re-index block and tx events to the eventsinks, - you can run this command when the event store backend dropped/disconnected or you want to replace the backend. - The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the - default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits - either or both arguments. +reindex-event is an offline tooling to re-index block and tx events to the eventsinks, +you can run this command when the event store backend dropped/disconnected or you want to +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning +the tooling will reindex until the latest block height(inclusive). User can omit +either or both arguments. `, Example: ` tendermint reindex-event diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go new file mode 100644 index 000000000..5aff232be --- /dev/null +++ b/cmd/tendermint/commands/rollback.go @@ -0,0 +1,46 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state" +) + +var RollbackStateCmd = &cobra.Command{ + Use: "rollback", + Short: "rollback tendermint state by one height", + Long: ` +A state rollback is performed to recover from an incorrect application state transition, +when Tendermint has persisted an incorrect app hash and is thus unable to make +progress. Rollback overwrites a state at height n with the state at height n - 1. +The application should also roll back to height n - 1. No blocks are removed, so upon +restarting Tendermint the transactions in block n will be re-executed against the +application. +`, + RunE: func(cmd *cobra.Command, args []string) error { + height, hash, err := RollbackState(config) + if err != nil { + return fmt.Errorf("failed to rollback state: %w", err) + } + + fmt.Printf("Rolled back state to height %d and hash %v", height, hash) + return nil + }, +} + +// RollbackState takes the state at the current height n and overwrites it with the state +// at height n - 1. Note state here refers to tendermint state not application state. +// Returns the latest state height and app hash alongside an error if there was one. +func RollbackState(config *cfg.Config) (int64, []byte, error) { + // use the parsed config to load the block and state store + blockStore, stateStore, err := loadStateAndBlockStore(config) + if err != nil { + return -1, nil, err + } + + // rollback the last state + return state.Rollback(blockStore, stateStore) +} diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 9f908a1ed..52a00e4c0 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -29,6 +29,7 @@ func main() { cmd.GenNodeKeyCmd, cmd.VersionCmd, cmd.InspectCmd, + cmd.RollbackStateCmd, cmd.MakeKeyMigrateCommand(), debug.DebugCmd, cli.NewCompletionCmd(rootCmd, true), diff --git a/internal/state/rollback.go b/internal/state/rollback.go new file mode 100644 index 000000000..6e13da0e2 --- /dev/null +++ b/internal/state/rollback.go @@ -0,0 +1,87 @@ +package state + +import ( + "errors" + "fmt" + + "github.com/tendermint/tendermint/version" +) + +// Rollback overwrites the current Tendermint state (height n) with the most +// recent previous state (height n - 1). +// Note that this function does not affect application state. +func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { + invalidState, err := ss.Load() + if err != nil { + return -1, nil, err + } + if invalidState.IsEmpty() { + return -1, nil, errors.New("no state found") + } + + rollbackHeight := invalidState.LastBlockHeight + rollbackBlock := bs.LoadBlockMeta(rollbackHeight) + if rollbackBlock == nil { + return -1, nil, fmt.Errorf("block at height %d not found", rollbackHeight) + } + + previousValidatorSet, err := ss.LoadValidators(rollbackHeight - 1) + if err != nil { + return -1, nil, err + } + + previousParams, err := ss.LoadConsensusParams(rollbackHeight) + if err != nil { + return -1, nil, err + } + + valChangeHeight := invalidState.LastHeightValidatorsChanged + // this can only happen if the validator set changed since the last block + if valChangeHeight > rollbackHeight { + valChangeHeight = rollbackHeight + } + + paramsChangeHeight := invalidState.LastHeightConsensusParamsChanged + // this can only happen if params changed from the last block + if paramsChangeHeight > rollbackHeight { + paramsChangeHeight = rollbackHeight + } + + // build the new state from the old state and the prior block + rolledBackState := State{ + Version: Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: previousParams.Version.AppVersion, + }, + Software: version.TMVersion, + }, + // immutable fields + ChainID: invalidState.ChainID, + InitialHeight: invalidState.InitialHeight, + + LastBlockHeight: invalidState.LastBlockHeight - 1, + LastBlockID: rollbackBlock.Header.LastBlockID, + LastBlockTime: rollbackBlock.Header.Time, + + NextValidators: invalidState.Validators, + Validators: invalidState.LastValidators, + LastValidators: previousValidatorSet, + LastHeightValidatorsChanged: valChangeHeight, + + ConsensusParams: previousParams, + LastHeightConsensusParamsChanged: paramsChangeHeight, + + LastResultsHash: rollbackBlock.Header.LastResultsHash, + AppHash: rollbackBlock.Header.AppHash, + } + + // persist the new state. This overrides the invalid one. NOTE: this will also + // persist the validator set and consensus params over the existing structures, + // but both should be the same + if err := ss.Save(rolledBackState); err != nil { + return -1, nil, fmt.Errorf("failed to save rolled back state: %w", err) + } + + return rolledBackState.LastBlockHeight, rolledBackState.AppHash, nil +} diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go new file mode 100644 index 000000000..ae5c8ee84 --- /dev/null +++ b/internal/state/rollback_test.go @@ -0,0 +1,146 @@ +package state_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +func TestRollback(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + var ( + height int64 = 100 + appVersion uint64 = 10 + ) + + valSet, _ := factory.RandValidatorSet(5, 10) + + params := types.DefaultConsensusParams() + params.Version.AppVersion = appVersion + newParams := types.DefaultConsensusParams() + newParams.Block.MaxBytes = 10000 + + initialState := state.State{ + Version: state.Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 10, + }, + Software: version.TMVersion, + }, + ChainID: factory.DefaultTestChainID, + InitialHeight: 10, + LastBlockID: factory.MakeBlockID(), + AppHash: factory.RandomHash(), + LastResultsHash: factory.RandomHash(), + LastBlockHeight: height, + LastValidators: valSet, + Validators: valSet.CopyIncrementProposerPriority(1), + NextValidators: valSet.CopyIncrementProposerPriority(2), + LastHeightValidatorsChanged: height + 1, + ConsensusParams: *params, + LastHeightConsensusParamsChanged: height + 1, + } + require.NoError(t, stateStore.Bootstrap(initialState)) + + height++ + block := &types.BlockMeta{ + Header: types.Header{ + Height: height, + AppHash: initialState.AppHash, + LastBlockID: initialState.LastBlockID, + LastResultsHash: initialState.LastResultsHash, + }, + } + blockStore.On("LoadBlockMeta", height).Return(block) + + appVersion++ + newParams.Version.AppVersion = appVersion + nextState := initialState.Copy() + nextState.LastBlockHeight = height + nextState.Version.Consensus.App = appVersion + nextState.LastBlockID = factory.MakeBlockID() + nextState.AppHash = factory.RandomHash() + nextState.LastValidators = initialState.Validators + nextState.Validators = initialState.NextValidators + nextState.NextValidators = initialState.NextValidators.CopyIncrementProposerPriority(1) + nextState.ConsensusParams = *newParams + nextState.LastHeightConsensusParamsChanged = height + 1 + nextState.LastHeightValidatorsChanged = height + 1 + + // update the state + require.NoError(t, stateStore.Save(nextState)) + + // rollback the state + rollbackHeight, rollbackHash, err := state.Rollback(blockStore, stateStore) + require.NoError(t, err) + require.EqualValues(t, int64(100), rollbackHeight) + require.EqualValues(t, initialState.AppHash, rollbackHash) + blockStore.AssertExpectations(t) + + // assert that we've recovered the prior state + loadedState, err := stateStore.Load() + require.NoError(t, err) + require.EqualValues(t, initialState, loadedState) +} + +func TestRollbackNoState(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "no state found") +} + +func TestRollbackNoBlocks(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + var ( + height int64 = 100 + appVersion uint64 = 10 + ) + + valSet, _ := factory.RandValidatorSet(5, 10) + + params := types.DefaultConsensusParams() + params.Version.AppVersion = appVersion + newParams := types.DefaultConsensusParams() + newParams.Block.MaxBytes = 10000 + + initialState := state.State{ + Version: state.Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 10, + }, + Software: version.TMVersion, + }, + ChainID: factory.DefaultTestChainID, + InitialHeight: 10, + LastBlockID: factory.MakeBlockID(), + AppHash: factory.RandomHash(), + LastResultsHash: factory.RandomHash(), + LastBlockHeight: height, + LastValidators: valSet, + Validators: valSet.CopyIncrementProposerPriority(1), + NextValidators: valSet.CopyIncrementProposerPriority(2), + LastHeightValidatorsChanged: height + 1, + ConsensusParams: *params, + LastHeightConsensusParamsChanged: height + 1, + } + require.NoError(t, stateStore.Save(initialState)) + blockStore.On("LoadBlockMeta", height).Return(nil) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "block at height 100 not found") +} diff --git a/internal/state/state.go b/internal/state/state.go index 5862162d1..ce9990cc2 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -64,6 +64,8 @@ func VersionFromProto(v tmstate.Version) Version { // Instead, use state.Copy() or updateState(...). // NOTE: not goroutine-safe. type State struct { + // FIXME: This can be removed as TMVersion is a constant, and version.Consensus should + // eventually be replaced by VersionParams in ConsensusParams Version Version // immutable diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index e7ba110e9..395c87024 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -116,6 +117,11 @@ func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitCh } resp := abci.ResponseInitChain{ AppHash: app.state.Hash, + ConsensusParams: &types.ConsensusParams{ + Version: &types.VersionParams{ + AppVersion: 1, + }, + }, } if resp.Validators, err = app.validatorUpdates(0); err != nil { panic(err) From 1b5bb5348f0bf660efd294ecf059458251ee0511 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 8 Oct 2021 08:49:17 -0400 Subject: [PATCH 078/174] p2p: cleanup unused arguments (#7079) This is mostly just reading through the output of uparam, after noticing that there were a few places where we were ignoring some arguments. --- internal/p2p/router.go | 18 +++++++++--------- internal/statesync/block_queue.go | 2 +- internal/statesync/block_queue_test.go | 8 ++++---- internal/statesync/reactor.go | 2 +- node/node.go | 10 +++++----- node/setup.go | 3 --- test/e2e/node/main.go | 4 ++-- 7 files changed, 22 insertions(+), 25 deletions(-) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index ed6119431..8b96bc93a 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -610,7 +610,7 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { // The Router should do the handshake and have a final ack/fail // message to make sure both ends have accepted the connection, such // that it can be coordinated with the peer manager. - peerInfo, _, err := r.handshakePeer(ctx, conn, "") + peerInfo, err := r.handshakePeer(ctx, conn, "") switch { case errors.Is(err, context.Canceled): return @@ -704,7 +704,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { return } - peerInfo, _, err := r.handshakePeer(ctx, conn, address.NodeID) + peerInfo, err := r.handshakePeer(ctx, conn, address.NodeID) switch { case errors.Is(err, context.Canceled): conn.Close() @@ -799,7 +799,7 @@ func (r *Router) handshakePeer( ctx context.Context, conn Connection, expectID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { +) (types.NodeInfo, error) { if r.options.HandshakeTimeout > 0 { var cancel context.CancelFunc @@ -809,27 +809,27 @@ func (r *Router) handshakePeer( peerInfo, peerKey, err := conn.Handshake(ctx, r.nodeInfo, r.privKey) if err != nil { - return peerInfo, peerKey, err + return peerInfo, err } if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err) + return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", + return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) } if expectID != "" && expectID != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q", + return peerInfo, fmt.Errorf("expected to connect with peer %q, got %q", expectID, peerInfo.NodeID) } if err := r.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ + return peerInfo, ErrRejected{ err: err, id: peerInfo.ID(), isIncompatible: true, } } - return peerInfo, peerKey, nil + return peerInfo, nil } func (r *Router) runWithPeerMutex(fn func() error) error { diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go index 56ed3c376..80b0ffbd5 100644 --- a/internal/statesync/block_queue.go +++ b/internal/statesync/block_queue.go @@ -200,7 +200,7 @@ func (q *blockQueue) retry(height int64) { // Success is called when a light block has been successfully verified and // processed -func (q *blockQueue) success(height int64) { +func (q *blockQueue) success() { q.mtx.Lock() defer q.mtx.Unlock() if q.terminal != nil && q.verifyHeight == q.terminal.Height { diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index dc5e2bc82..ad28efac9 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -58,7 +58,7 @@ loop: // assert that the queue serializes the blocks require.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() } } @@ -105,7 +105,7 @@ func TestBlockQueueWithFailures(t *testing.T) { queue.retry(resp.block.Height) } else { trackingHeight-- - queue.success(resp.block.Height) + queue.success() } case <-queue.done(): @@ -223,7 +223,7 @@ func TestBlockQueueStopTime(t *testing.T) { // assert that the queue serializes the blocks assert.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() case <-queue.done(): wg.Wait() @@ -268,7 +268,7 @@ loop: case resp := <-queue.verifyNext(): require.GreaterOrEqual(t, resp.block.Height, initialHeight) - queue.success(resp.block.Height) + queue.success() } } } diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 70b12b7fa..98ace6e37 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -509,7 +509,7 @@ func (r *Reactor) backfill( } trustedBlockID = resp.block.LastBlockID - queue.success(resp.block.Height) + queue.success() r.Logger.Info("backfill: verified and stored light block", "height", resp.block.Height) lastValidatorSet = resp.block.ValidatorSet diff --git a/node/node.go b/node/node.go index eeb301614..580121050 100644 --- a/node/node.go +++ b/node/node.go @@ -235,7 +235,7 @@ func makeNode(cfg *config.Config, p2pLogger := logger.With("module", "p2p") transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID) + peerManager, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { return nil, fmt.Errorf("failed to create peer manager: %w", err) } @@ -283,7 +283,7 @@ func makeNode(cfg *config.Config, // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. bcReactorShim, bcReactor, err := createBlockchainReactor( - logger, cfg, state, blockExec, blockStore, csReactor, + logger, state, blockExec, blockStore, csReactor, peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, ) if err != nil { @@ -352,7 +352,7 @@ func makeNode(cfg *config.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) + pexReactor, err = createPEXReactorV2(logger, peerManager, router) if err != nil { return nil, err } @@ -446,7 +446,7 @@ func makeSeedNode(cfg *config.Config, p2pLogger := logger.With("module", "p2p") transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID) + peerManager, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { return nil, fmt.Errorf("failed to create peer manager: %w", err) } @@ -466,7 +466,7 @@ func makeSeedNode(cfg *config.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) + pexReactor, err = createPEXReactorV2(logger, peerManager, router) if err != nil { return nil, err } diff --git a/node/setup.go b/node/setup.go index 893d7ccde..b7a2d19af 100644 --- a/node/setup.go +++ b/node/setup.go @@ -254,7 +254,6 @@ func createEvidenceReactor( func createBlockchainReactor( logger log.Logger, - cfg *config.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, @@ -354,7 +353,6 @@ func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, - p2pLogger log.Logger, nodeID types.NodeID, ) (*p2p.PeerManager, error) { @@ -443,7 +441,6 @@ func createRouter( } func createPEXReactorV2( - cfg *config.Config, logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 222a6883f..b5d9debe9 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -79,7 +79,7 @@ func run(configFile string) error { case string(e2e.ModeLight): err = startLightNode(cfg) case string(e2e.ModeSeed): - err = startSeedNode(cfg) + err = startSeedNode() default: err = startNode(cfg) } @@ -140,7 +140,7 @@ func startNode(cfg *Config) error { return n.Start() } -func startSeedNode(cfg *Config) error { +func startSeedNode() error { tmcfg, nodeLogger, err := setupNode() if err != nil { return fmt.Errorf("failed to setup config: %w", err) From f2a8f5e054cf99ebe246818bb6d71f41f9a30faa Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 8 Oct 2021 09:42:23 -0400 Subject: [PATCH 079/174] e2e: abci protocol should be consistent across networks (#7078) It seems weird in retrospect that we allow networks to contain applications that use different ABCI protocols. --- test/e2e/generator/generate.go | 3 +-- test/e2e/pkg/manifest.go | 12 ++++++------ test/e2e/pkg/testnet.go | 10 ++++++---- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 75342f4e7..61b4bf7d3 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -108,6 +108,7 @@ type Options struct { func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), + ABCIProtocol: nodeABCIProtocols.Choose(r), InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: &map[string]int64{}, @@ -275,7 +276,6 @@ func generateNode( Mode: string(mode), StartAt: startAt, Database: nodeDatabases.Choose(r), - ABCIProtocol: nodeABCIProtocols.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), Mempool: nodeMempools.Choose(r), StateSync: e2e.StateSyncDisabled, @@ -332,7 +332,6 @@ func generateLightNode(r *rand.Rand, startAt int64, providers []string) *e2e.Man Mode: string(e2e.ModeLight), StartAt: startAt, Database: nodeDatabases.Choose(r), - ABCIProtocol: "builtin", PersistInterval: ptrUint64(0), PersistentPeers: providers, } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index d5c9cb7f2..16b99cfda 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -65,6 +65,12 @@ type Manifest struct { // Number of bytes per tx. Default is 1kb (1024) TxSize int64 + + // ABCIProtocol specifies the protocol used to communicate with the ABCI + // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. + // builtin will build a complete Tendermint node into the application and + // launch it instead of launching a separate Tendermint process. + ABCIProtocol string `toml:"abci_protocol"` } // ManifestNode represents a node in a testnet manifest. @@ -87,12 +93,6 @@ type ManifestNode struct { // "rocksdb", "boltdb", or "badgerdb". Defaults to goleveldb. Database string `toml:"database"` - // ABCIProtocol specifies the protocol used to communicate with the ABCI - // application: "unix", "tcp", "grpc", or "builtin". Defaults to unix. - // builtin will build a complete Tendermint node into the application and - // launch it instead of launching a separate Tendermint process. - ABCIProtocol string `toml:"abci_protocol"` - // PrivvalProtocol specifies the protocol used to sign consensus messages: // "file", "unix", "tcp", or "grpc". Defaults to "file". For tcp and unix, the ABCI // application will launch a remote signer client in a separate goroutine. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index a6d5b2485..ee6a283aa 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -71,6 +71,7 @@ type Testnet struct { Evidence int LogLevel string TxSize int64 + ABCIProtocol string } // Node represents a Tendermint node in a testnet. @@ -140,6 +141,7 @@ func LoadTestnet(file string) (*Testnet, error) { KeyType: "ed25519", LogLevel: manifest.LogLevel, TxSize: manifest.TxSize, + ABCIProtocol: manifest.ABCIProtocol, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -150,6 +152,9 @@ func LoadTestnet(file string) (*Testnet, error) { if manifest.InitialHeight > 0 { testnet.InitialHeight = manifest.InitialHeight } + if testnet.ABCIProtocol == "" { + testnet.ABCIProtocol = string(ProtocolBuiltin) + } // Set up nodes, in alphabetical order (IPs and ports get same order). nodeNames := []string{} @@ -169,7 +174,7 @@ func LoadTestnet(file string) (*Testnet, error) { ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, Database: "goleveldb", - ABCIProtocol: ProtocolBuiltin, + ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, BlockSync: "v0", @@ -192,9 +197,6 @@ func LoadTestnet(file string) (*Testnet, error) { if nodeManifest.Database != "" { node.Database = nodeManifest.Database } - if nodeManifest.ABCIProtocol != "" { - node.ABCIProtocol = Protocol(nodeManifest.ABCIProtocol) - } if nodeManifest.PrivvalProtocol != "" { node.PrivvalProtocol = Protocol(nodeManifest.PrivvalProtocol) } From 59404003eeb47797af8a9af8cb026abb7b68b0af Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 8 Oct 2021 16:53:54 +0200 Subject: [PATCH 080/174] p2p: rename pexV2 to pex (#7088) --- crypto/secp256k1/secp256k1_nocgo.go | 1 + internal/consensus/wal_fuzz.go | 1 + internal/libs/sync/deadlock.go | 1 + internal/libs/sync/sync.go | 1 + internal/p2p/conn/conn_go110.go | 1 + internal/p2p/conn/conn_notgo110.go | 1 + internal/p2p/pex/doc.go | 9 ++---- internal/p2p/pex/reactor.go | 43 ++++++++++++-------------- internal/p2p/pex/reactor_test.go | 12 +++---- node/node.go | 4 +-- node/setup.go | 4 +-- rpc/jsonrpc/client/integration_test.go | 1 + 12 files changed, 38 insertions(+), 41 deletions(-) diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index cba9bbe4c..6b52dc5d2 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -1,3 +1,4 @@ +//go:build !libsecp256k1 // +build !libsecp256k1 package secp256k1 diff --git a/internal/consensus/wal_fuzz.go b/internal/consensus/wal_fuzz.go index e15097c30..06d894a81 100644 --- a/internal/consensus/wal_fuzz.go +++ b/internal/consensus/wal_fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package consensus diff --git a/internal/libs/sync/deadlock.go b/internal/libs/sync/deadlock.go index 637d6fbb1..21b5130ba 100644 --- a/internal/libs/sync/deadlock.go +++ b/internal/libs/sync/deadlock.go @@ -1,3 +1,4 @@ +//go:build deadlock // +build deadlock package sync diff --git a/internal/libs/sync/sync.go b/internal/libs/sync/sync.go index a0880e7de..c6e7101c6 100644 --- a/internal/libs/sync/sync.go +++ b/internal/libs/sync/sync.go @@ -1,3 +1,4 @@ +//go:build !deadlock // +build !deadlock package sync diff --git a/internal/p2p/conn/conn_go110.go b/internal/p2p/conn/conn_go110.go index 682188101..459c3169b 100644 --- a/internal/p2p/conn/conn_go110.go +++ b/internal/p2p/conn/conn_go110.go @@ -1,3 +1,4 @@ +//go:build go1.10 // +build go1.10 package conn diff --git a/internal/p2p/conn/conn_notgo110.go b/internal/p2p/conn/conn_notgo110.go index ed642eb54..21dffad2c 100644 --- a/internal/p2p/conn/conn_notgo110.go +++ b/internal/p2p/conn/conn_notgo110.go @@ -1,3 +1,4 @@ +//go:build !go1.10 // +build !go1.10 package conn diff --git a/internal/p2p/pex/doc.go b/internal/p2p/pex/doc.go index dc4f5d37a..70a5f6174 100644 --- a/internal/p2p/pex/doc.go +++ b/internal/p2p/pex/doc.go @@ -7,19 +7,14 @@ The PEX reactor is a continuous service which periodically requests addresses and serves addresses to other peers. There are two versions of this service aligning with the two p2p frameworks that Tendermint currently supports. -V1 is coupled with the Switch (which handles peer connections and routing of -messages) and, alongside exchanging peer information in the form of port/IP -pairs, also has the responsibility of dialing peers and ensuring that a -node has a sufficient amount of peers connected. - -V2 is embedded with the new p2p stack and uses the peer manager to advertise +The reactor is embedded with the new p2p stack and uses the peer manager to advertise peers as well as add new peers to the peer store. The V2 reactor passes a different set of proto messages which include a list of [urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of endpoints that each peer uses. The V2 reactor has backwards compatibility with V1. It can also handle V1 messages. -The V2 reactor is able to tweak the intensity of it's search by decreasing or +The reactor is able to tweak the intensity of it's search by decreasing or increasing the interval between each request. It tracks connected peers via a linked list, sending a request to the node at the front of the list and adding it to the back of the list once a response is received. Using this method, a diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 300a6022d..f5c03adf8 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -17,7 +17,7 @@ import ( ) var ( - _ service.Service = (*ReactorV2)(nil) + _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) @@ -73,11 +73,6 @@ func ChannelDescriptor() conn.ChannelDescriptor { } } -// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor -// is Reactor. -// -// FIXME: Rename this when Reactor is removed, and consider moving to p2p/. -// // The peer exchange or PEX reactor supports the peer manager by sending // requests to other peers for addresses that can be given to the peer manager // and at the same time advertises addresses to peers that need more. @@ -86,7 +81,7 @@ func ChannelDescriptor() conn.ChannelDescriptor { // increasing the interval between each request. It tracks connected peers via // a linked list, sending a request to the node at the front of the list and // adding it to the back of the list once a response is received. -type ReactorV2 struct { +type Reactor struct { service.BaseService peerManager *p2p.PeerManager @@ -125,14 +120,14 @@ type ReactorV2 struct { } // NewReactor returns a reference to a new reactor. -func NewReactorV2( +func NewReactor( logger log.Logger, peerManager *p2p.PeerManager, pexCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, -) *ReactorV2 { +) *Reactor { - r := &ReactorV2{ + r := &Reactor{ peerManager: peerManager, pexCh: pexCh, peerUpdates: peerUpdates, @@ -150,7 +145,7 @@ func NewReactorV2( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *ReactorV2) OnStart() error { +func (r *Reactor) OnStart() error { go r.processPexCh() go r.processPeerUpdates() return nil @@ -158,7 +153,7 @@ func (r *ReactorV2) OnStart() error { // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *ReactorV2) OnStop() { +func (r *Reactor) OnStop() { // Close closeCh to signal to all spawned goroutines to gracefully exit. All // p2p Channels should execute Close(). close(r.closeCh) @@ -172,7 +167,7 @@ func (r *ReactorV2) OnStop() { // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *ReactorV2) processPexCh() { +func (r *Reactor) processPexCh() { defer r.pexCh.Close() for { @@ -202,7 +197,7 @@ func (r *ReactorV2) processPexCh() { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *ReactorV2) processPeerUpdates() { +func (r *Reactor) processPeerUpdates() { defer r.peerUpdates.Close() for { @@ -218,7 +213,7 @@ func (r *ReactorV2) processPeerUpdates() { } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { +func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -337,7 +332,7 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { // // FIXME: We may want to cache and parallelize this, but for now we'll just rely // on the operating system to cache it for us. -func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { +func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { limit := len(addresses) pexAddresses := make([]protop2p.PexAddress, 0, limit) @@ -380,7 +375,7 @@ func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -407,7 +402,7 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we // send a request for addresses. -func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { +func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -424,7 +419,7 @@ func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } } -func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { +func (r *Reactor) waitUntilNextRequest() <-chan time.Time { return time.After(time.Until(r.nextRequestTime)) } @@ -432,7 +427,7 @@ func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { // peer a request for more peer addresses. The function then moves the // peer into the requestsSent bucket and calculates when the next request // time should be -func (r *ReactorV2) sendRequestForPeers() { +func (r *Reactor) sendRequestForPeers() { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { @@ -480,7 +475,7 @@ func (r *ReactorV2) sendRequestForPeers() { // new nodes will plummet to a very small number, meaning the interval expands // to its upper bound. // CONTRACT: Must use a write lock as nextRequestTime is updated -func (r *ReactorV2) calculateNextRequestTime() { +func (r *Reactor) calculateNextRequestTime() { // check if the peer store is full. If so then there is no need // to send peer requests too often if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { @@ -516,7 +511,7 @@ func (r *ReactorV2) calculateNextRequestTime() { r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) } -func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { +func (r *Reactor) markPeerRequest(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { @@ -529,7 +524,7 @@ func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { return nil } -func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { +func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() // check if a request to this peer was sent @@ -546,7 +541,7 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { // all addresses must use a MCONN protocol for the peer to be considered part of the // legacy p2p pex system -func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool { +func (r *Reactor) isLegacyPeer(peer types.NodeID) bool { for _, addr := range r.peerManager.Addresses(peer) { if addr.Protocol != p2p.MConnProtocol { return false diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 63e479e6a..28476e67d 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -272,7 +272,7 @@ func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { } type singleTestReactor struct { - reactor *pex.ReactorV2 + reactor *pex.Reactor pexInCh chan p2p.Envelope pexOutCh chan p2p.Envelope pexErrCh chan p2p.PeerError @@ -301,7 +301,7 @@ func setupSingle(t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates) + reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates) require.NoError(t, reactor.Start()) t.Cleanup(func() { err := reactor.Stop() @@ -327,7 +327,7 @@ type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*pex.ReactorV2 + reactors map[types.NodeID]*pex.Reactor pexChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate @@ -370,7 +370,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(t, networkOpts), - reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes), + reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), @@ -394,7 +394,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { if idx < opts.MockNodes { rts.mocks = append(rts.mocks, nodeID) } else { - rts.reactors[nodeID] = pex.NewReactorV2( + rts.reactors[nodeID] = pex.NewReactor( rts.logger.With("nodeID", nodeID), rts.network.Nodes[nodeID].PeerManager, rts.pexChannels[nodeID], @@ -452,7 +452,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) - r.reactors[nodeID] = pex.NewReactorV2( + r.reactors[nodeID] = pex.NewReactor( r.logger.With("nodeID", nodeID), r.network.Nodes[nodeID].PeerManager, r.pexChannels[nodeID], diff --git a/node/node.go b/node/node.go index 580121050..a6b8d8210 100644 --- a/node/node.go +++ b/node/node.go @@ -352,7 +352,7 @@ func makeNode(cfg *config.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - pexReactor, err = createPEXReactorV2(logger, peerManager, router) + pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { return nil, err } @@ -466,7 +466,7 @@ func makeSeedNode(cfg *config.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - pexReactor, err = createPEXReactorV2(logger, peerManager, router) + pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { return nil, err } diff --git a/node/setup.go b/node/setup.go index b7a2d19af..f6b9c028d 100644 --- a/node/setup.go +++ b/node/setup.go @@ -440,7 +440,7 @@ func createRouter( ) } -func createPEXReactorV2( +func createPEXReactor( logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, @@ -452,7 +452,7 @@ func createPEXReactorV2( } peerUpdates := peerManager.Subscribe() - return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil + return pex.NewReactor(logger, peerManager, channel, peerUpdates), nil } func makeNodeInfo( diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go index 228bbb460..26f24d255 100644 --- a/rpc/jsonrpc/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -1,3 +1,4 @@ +//go:build release // +build release // The code in here is comprehensive as an integration From 3646b635d39cff885363b6c498b3ebe0e2f789b2 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 8 Oct 2021 12:29:20 -0400 Subject: [PATCH 081/174] p2p, types: remove legacy NetAddress type (#7084) --- internal/consensus/state_test.go | 23 +-- internal/p2p/errors.go | 10 +- internal/p2p/mock/peer.go | 69 ------- internal/p2p/mocks/peer.go | 319 ------------------------------ internal/p2p/netaddress.go | 11 -- internal/p2p/pex/errors.go | 78 -------- internal/p2p/router.go | 2 - internal/p2p/router_test.go | 6 +- internal/p2p/test_util.go | 32 --- internal/p2p/transport.go | 13 +- node/node.go | 4 +- types/netaddress.go | 329 ------------------------------- types/netaddress_test.go | 183 ----------------- types/node_id.go | 3 +- types/node_info.go | 76 +++++-- types/node_info_test.go | 77 ++++++++ 16 files changed, 168 insertions(+), 1067 deletions(-) delete mode 100644 internal/p2p/mock/peer.go delete mode 100644 internal/p2p/mocks/peer.go delete mode 100644 internal/p2p/netaddress.go delete mode 100644 internal/p2p/pex/errors.go delete mode 100644 internal/p2p/test_util.go delete mode 100644 types/netaddress.go delete mode 100644 types/netaddress_test.go diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 9b8f68a5e..a1db8276d 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -13,7 +13,6 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - p2pmock "github.com/tendermint/tendermint/internal/p2p/mock" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -1864,7 +1863,8 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { // create dummy peer cs, _ := randState(config, 1) - peer := p2pmock.NewPeer(nil) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) // 1) new block part parts := types.NewPartSetFromData(tmrand.Bytes(100), 10) @@ -1875,26 +1875,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{msg, "peer2"}) // sending the part with the same height, but different round msg.Round = 1 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) // sending the part from the smaller height msg.Height = 0 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) // sending the part from the bigger height msg.Height = 3 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) select { case <-cs.statsMsgQueue: @@ -1909,18 +1909,19 @@ func TestStateOutputVoteStats(t *testing.T) { cs, vss := randState(config, 2) // create dummy peer - peer := p2pmock.NewPeer(nil) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) voteMessage := &VoteMessage{vote} - cs.handleMsg(msgInfo{voteMessage, peer.ID()}) + cs.handleMsg(msgInfo{voteMessage, peerID}) statsMessage := <-cs.statsMsgQueue require.Equal(t, voteMessage, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}) @@ -1929,7 +1930,7 @@ func TestStateOutputVoteStats(t *testing.T) { incrementHeight(vss[1]) vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) - cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) + cs.handleMsg(msgInfo{&VoteMessage{vote}, peerID}) select { case <-cs.statsMsgQueue: diff --git a/internal/p2p/errors.go b/internal/p2p/errors.go index 648f2cb3a..d4df28792 100644 --- a/internal/p2p/errors.go +++ b/internal/p2p/errors.go @@ -17,7 +17,7 @@ func (e ErrFilterTimeout) Error() string { // ErrRejected indicates that a Peer was rejected carrying additional // information as to the reason. type ErrRejected struct { - addr NetAddress + addr NodeAddress conn net.Conn err error id types.NodeID @@ -30,7 +30,7 @@ type ErrRejected struct { } // Addr returns the NetAddress for the rejected Peer. -func (e ErrRejected) Addr() NetAddress { +func (e ErrRejected) Addr() NodeAddress { return e.addr } @@ -120,15 +120,15 @@ func (e ErrSwitchDuplicatePeerIP) Error() string { // ErrSwitchConnectToSelf to be raised when trying to connect to itself. type ErrSwitchConnectToSelf struct { - Addr *NetAddress + Addr *NodeAddress } func (e ErrSwitchConnectToSelf) Error() string { - return fmt.Sprintf("connect to self: %v", e.Addr) + return fmt.Sprintf("connect to self: %s", e.Addr) } type ErrSwitchAuthenticationFailure struct { - Dialed *NetAddress + Dialed *NodeAddress Got types.NodeID } diff --git a/internal/p2p/mock/peer.go b/internal/p2p/mock/peer.go deleted file mode 100644 index 366f1d7fd..000000000 --- a/internal/p2p/mock/peer.go +++ /dev/null @@ -1,69 +0,0 @@ -package mock - -import ( - "net" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -type Peer struct { - *service.BaseService - ip net.IP - id types.NodeID - addr *p2p.NetAddress - kv map[string]interface{} - Outbound, Persistent bool -} - -// NewPeer creates and starts a new mock peer. If the ip -// is nil, random routable address is used. -func NewPeer(ip net.IP) *Peer { - var netAddr *p2p.NetAddress - if ip == nil { - _, netAddr = p2p.CreateRoutableAddr() - } else { - netAddr = types.NewNetAddressIPPort(ip, 26656) - } - nodeKey := types.GenNodeKey() - netAddr.ID = nodeKey.ID - mp := &Peer{ - ip: ip, - id: nodeKey.ID, - addr: netAddr, - kv: make(map[string]interface{}), - } - mp.BaseService = service.NewBaseService(nil, "MockPeer", mp) - if err := mp.Start(); err != nil { - panic(err) - } - return mp -} - -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: mp.addr.ID, - ListenAddr: mp.addr.DialString(), - } -} - -func (mp *Peer) ID() types.NodeID { return mp.id } -func (mp *Peer) IsOutbound() bool { return mp.Outbound } -func (mp *Peer) IsPersistent() bool { return mp.Persistent } -func (mp *Peer) Get(key string) interface{} { - if value, ok := mp.kv[key]; ok { - return value - } - return nil -} -func (mp *Peer) Set(key string, value interface{}) { - mp.kv[key] = value -} -func (mp *Peer) RemoteIP() net.IP { return mp.ip } -func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } -func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *Peer) CloseConn() error { return nil } diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go deleted file mode 100644 index 3e5d20325..000000000 --- a/internal/p2p/mocks/peer.go +++ /dev/null @@ -1,319 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - log "github.com/tendermint/tendermint/libs/log" - - mock "github.com/stretchr/testify/mock" - - net "net" - - types "github.com/tendermint/tendermint/types" -) - -// Peer is an autogenerated mock type for the Peer type -type Peer struct { - mock.Mock -} - -// CloseConn provides a mock function with given fields: -func (_m *Peer) CloseConn() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FlushStop provides a mock function with given fields: -func (_m *Peer) FlushStop() { - _m.Called() -} - -// Get provides a mock function with given fields: _a0 -func (_m *Peer) Get(_a0 string) interface{} { - ret := _m.Called(_a0) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(string) interface{}); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - return r0 -} - -// ID provides a mock function with given fields: -func (_m *Peer) ID() types.NodeID { - ret := _m.Called() - - var r0 types.NodeID - if rf, ok := ret.Get(0).(func() types.NodeID); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeID) - } - - return r0 -} - -// IsOutbound provides a mock function with given fields: -func (_m *Peer) IsOutbound() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsPersistent provides a mock function with given fields: -func (_m *Peer) IsPersistent() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsRunning provides a mock function with given fields: -func (_m *Peer) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// NodeInfo provides a mock function with given fields: -func (_m *Peer) NodeInfo() types.NodeInfo { - ret := _m.Called() - - var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func() types.NodeInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeInfo) - } - - return r0 -} - -// OnReset provides a mock function with given fields: -func (_m *Peer) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Peer) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Peer) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Peer) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// RemoteAddr provides a mock function with given fields: -func (_m *Peer) RemoteAddr() net.Addr { - ret := _m.Called() - - var r0 net.Addr - if rf, ok := ret.Get(0).(func() net.Addr); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.Addr) - } - } - - return r0 -} - -// RemoteIP provides a mock function with given fields: -func (_m *Peer) RemoteIP() net.IP { - ret := _m.Called() - - var r0 net.IP - if rf, ok := ret.Get(0).(func() net.IP); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.IP) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Peer) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Send provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Set provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Set(_a0 string, _a1 interface{}) { - _m.Called(_a0, _a1) -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Peer) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - -// SocketAddr provides a mock function with given fields: -func (_m *Peer) SocketAddr() *types.NetAddress { - ret := _m.Called() - - var r0 *types.NetAddress - if rf, ok := ret.Get(0).(func() *types.NetAddress); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.NetAddress) - } - } - - return r0 -} - -// Start provides a mock function with given fields: -func (_m *Peer) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Stop provides a mock function with given fields: -func (_m *Peer) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// String provides a mock function with given fields: -func (_m *Peer) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// TrySend provides a mock function with given fields: _a0, _a1 -func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Wait provides a mock function with given fields: -func (_m *Peer) Wait() { - _m.Called() -} diff --git a/internal/p2p/netaddress.go b/internal/p2p/netaddress.go deleted file mode 100644 index 6fce3a769..000000000 --- a/internal/p2p/netaddress.go +++ /dev/null @@ -1,11 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "github.com/tendermint/tendermint/types" -) - -type NetAddress = types.NetAddress diff --git a/internal/p2p/pex/errors.go b/internal/p2p/pex/errors.go deleted file mode 100644 index 4d41cce07..000000000 --- a/internal/p2p/pex/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - - "github.com/tendermint/tendermint/internal/p2p" -) - -type ErrAddrBookNonRoutable struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookNonRoutable) Error() string { - return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) -} - -type ErrAddrBookSelf struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookSelf) Error() string { - return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) -} - -type ErrAddrBookPrivate struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookPrivate) Error() string { - return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) -} - -func (err ErrAddrBookPrivate) PrivateAddr() bool { - return true -} - -type ErrAddrBookPrivateSrc struct { - Src *p2p.NetAddress -} - -func (err ErrAddrBookPrivateSrc) Error() string { - return fmt.Sprintf("Cannot add peer coming from private peer with address %v", err.Src) -} - -func (err ErrAddrBookPrivateSrc) PrivateAddr() bool { - return true -} - -type ErrAddrBookNilAddr struct { - Addr *p2p.NetAddress - Src *p2p.NetAddress -} - -func (err ErrAddrBookNilAddr) Error() string { - return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) -} - -type ErrAddrBookInvalidAddr struct { - Addr *p2p.NetAddress - AddrErr error -} - -func (err ErrAddrBookInvalidAddr) Error() string { - return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) -} - -// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used -type ErrAddressBanned struct { - Addr *p2p.NetAddress -} - -func (err ErrAddressBanned) Error() string { - return fmt.Sprintf("Address: %v is currently banned", err.Addr) -} - -// ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. -var ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 8b96bc93a..f7d012b49 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -1012,13 +1012,11 @@ func (r *Router) NodeInfo() types.NodeInfo { // OnStart implements service.Service. func (r *Router) OnStart() error { - netAddr, _ := r.nodeInfo.NetAddress() r.Logger.Info( "starting router", "node_id", r.nodeInfo.NodeID, "channels", r.nodeInfo.Channels, "listen_addr", r.nodeInfo.ListenAddr, - "net_addr", netAddr, ) go r.dialPeers() diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 436e3f004..c12f1a2f0 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -367,7 +367,7 @@ func TestRouter_AcceptPeers(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -755,7 +755,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -869,7 +869,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go deleted file mode 100644 index d29709a89..000000000 --- a/internal/p2p/test_util.go +++ /dev/null @@ -1,32 +0,0 @@ -package p2p - -import ( - "fmt" - mrand "math/rand" - - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -//------------------------------------------------ - -// nolint:gosec // G404: Use of weak random number generator -func CreateRoutableAddr() (addr string, netAddr *NetAddress) { - for { - var err error - addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", - tmrand.Bytes(20), - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256) - netAddr, err = types.NewNetAddressString(addr) - if err != nil { - panic(err) - } - if netAddr.Routable() { - break - } - } - return -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index 8440889d4..b49b096bb 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -122,12 +122,17 @@ type Endpoint struct { } // NewEndpoint constructs an Endpoint from a types.NetAddress structure. -func NewEndpoint(na *types.NetAddress) Endpoint { +func NewEndpoint(addr string) (Endpoint, error) { + ip, port, err := types.ParseAddressString(addr) + if err != nil { + return Endpoint{}, err + } + return Endpoint{ Protocol: MConnProtocol, - IP: na.IP, - Port: na.Port, - } + IP: ip, + Port: port, + }, nil } // NodeAddress converts the endpoint into a NodeAddress for the given node ID. diff --git a/node/node.go b/node/node.go index a6b8d8210..87e9b0dbd 100644 --- a/node/node.go +++ b/node/node.go @@ -520,11 +520,11 @@ func (n *nodeImpl) OnStart() error { } // Start the transport. - addr, err := types.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) + ep, err := p2p.NewEndpoint(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) if err != nil { return err } - if err := n.transport.Listen(p2p.NewEndpoint(addr)); err != nil { + if err := n.transport.Listen(ep); err != nil { return err } diff --git a/types/netaddress.go b/types/netaddress.go deleted file mode 100644 index bc074dca6..000000000 --- a/types/netaddress.go +++ /dev/null @@ -1,329 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package types - -import ( - "errors" - "flag" - "fmt" - "net" - "strconv" - "strings" - "time" -) - -// EmptyNetAddress defines the string representation of an empty NetAddress -const EmptyNetAddress = "" - -// NetAddress defines information about a peer on the network -// including its ID, IP address, and port. -type NetAddress struct { - ID NodeID `json:"id"` - IP net.IP `json:"ip"` - Port uint16 `json:"port"` -} - -// NewNetAddress returns a new NetAddress using the provided TCP -// address. When testing, other net.Addr (except TCP) will result in -// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will -// panic. Panics if ID is invalid. -// TODO: socks proxies? -func NewNetAddress(id NodeID, addr net.Addr) *NetAddress { - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - if flag.Lookup("test.v") == nil { // normal run - panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) - } else { // in testing - netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) - netAddr.ID = id - return netAddr - } - } - - if err := id.Validate(); err != nil { - panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr)) - } - - ip := tcpAddr.IP - port := uint16(tcpAddr.Port) - na := NewNetAddressIPPort(ip, port) - na.ID = id - return na -} - -// NewNetAddressIPPort returns a new NetAddress using the provided IP -// and port number. -func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { - return &NetAddress{ - IP: ip, - Port: port, - } -} - -// NewNetAddressString returns a new NetAddress using the provided address in -// the form of "ID@IP:Port". -// Also resolves the host if host is not an IP. -// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) -func NewNetAddressString(addr string) (*NetAddress, error) { - addrWithoutProtocol := removeProtocolIfDefined(addr) - spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) != 2 { - return nil, ErrNetAddressNoID{addr} - } - - id, err := NewNodeID(spl[0]) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - if err := id.Validate(); err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - addrWithoutProtocol = spl[1] - - // get host and port - host, portStr, err := net.SplitHostPort(addrWithoutProtocol) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(host) == 0 { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - errors.New("host is empty")} - } - - ip := net.ParseIP(host) - if ip == nil { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] - } - - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, ErrNetAddressInvalid{portStr, err} - } - - na := NewNetAddressIPPort(ip, uint16(port)) - na.ID = id - return na, nil -} - -// Equals reports whether na and other are the same addresses, -// including their ID, IP, and Port. -func (na *NetAddress) Equals(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - return na.String() == o.String() - } - return false -} - -// Same returns true is na has the same non-empty ID or DialString as other. -func (na *NetAddress) Same(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - if na.DialString() == o.DialString() { - return true - } - if na.ID != "" && na.ID == o.ID { - return true - } - } - return false -} - -// String representation: @: -func (na *NetAddress) String() string { - if na == nil { - return EmptyNetAddress - } - - addrStr := na.DialString() - if na.ID != "" { - addrStr = na.ID.AddressString(addrStr) - } - - return addrStr -} - -func (na *NetAddress) DialString() string { - if na == nil { - return "" - } - return net.JoinHostPort( - na.IP.String(), - strconv.FormatUint(uint64(na.Port), 10), - ) -} - -// Dial calls net.Dial on the address. -func (na *NetAddress) Dial() (net.Conn, error) { - conn, err := net.Dial("tcp", na.DialString()) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout calls net.DialTimeout on the address. -func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { - conn, err := net.DialTimeout("tcp", na.DialString(), timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -// Routable returns true if the address is routable. -func (na *NetAddress) Routable() bool { - if err := na.Valid(); err != nil { - return false - } - // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? - return !(na.RFC1918() || na.RFC3927() || na.RFC4862() || - na.RFC4193() || na.RFC4843() || na.Local()) -} - -// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero -// address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() error { - if err := na.ID.Validate(); err != nil { - return fmt.Errorf("invalid ID: %w", err) - } - - if na.IP == nil { - return errors.New("no IP") - } - if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) { - return errors.New("invalid IP") - } - return nil -} - -// Local returns true if it is a local address. -func (na *NetAddress) Local() bool { - return na.IP.IsLoopback() || zero4.Contains(na.IP) -} - -// ReachabilityTo checks whenever o can be reached from na. -func (na *NetAddress) ReachabilityTo(o *NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6Weak - Ipv4 - Ipv6Strong - ) - switch { - case !na.Routable(): - return Unreachable - case na.RFC4380(): - switch { - case !o.Routable(): - return Default - case o.RFC4380(): - return Teredo - case o.IP.To4() != nil: - return Ipv4 - default: // ipv6 - return Ipv6Weak - } - case na.IP.To4() != nil: - if o.Routable() && o.IP.To4() != nil { - return Ipv4 - } - return Default - default: /* ipv6 */ - var tunneled bool - // Is our v6 is tunneled? - if o.RFC3964() || o.RFC6052() || o.RFC6145() { - tunneled = true - } - switch { - case !o.Routable(): - return Default - case o.RFC4380(): - return Teredo - case o.IP.To4() != nil: - return Ipv4 - case tunneled: - // only prioritize ipv6 if we aren't tunneling it. - return Ipv6Weak - } - return Ipv6Strong - } -} - -// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) -// RFC3849: IPv6 Documentation address (2001:0DB8::/32) -// RFC3927: IPv4 Autoconfig (169.254.0.0/16) -// RFC3964: IPv6 6to4 (2002::/16) -// RFC4193: IPv6 unique local (FC00::/7) -// RFC4380: IPv6 Teredo tunneling (2001::/32) -// RFC4843: IPv6 ORCHID: (2001:10::/28) -// RFC4862: IPv6 Autoconfig (FE80::/64) -// RFC6052: IPv6 well known prefix (64:FF9B::/96) -// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 -var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} -var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} -var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} -var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} -var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} -var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} -var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} -var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} -var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} -var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} -var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} -var ( - // onionCatNet defines the IPv6 address block used to support Tor. - // bitcoind encodes a .onion address as a 16 byte number by decoding the - // address prior to the .onion (i.e. the key hash) base32 into a ten - // byte number. It then stores the first 6 bytes of the address as - // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. - // - // This is the same range used by OnionCat, which is part part of the - // RFC4193 unique local IPv6 range. - // - // In summary the format is: - // { magic 6 bytes, 10 bytes base32 decode of key hash } - onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) -) - -func (na *NetAddress) RFC1918() bool { - return rfc1918_10.Contains(na.IP) || - rfc1918_192.Contains(na.IP) || - rfc1918_172.Contains(na.IP) -} -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } -func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } - -func removeProtocolIfDefined(addr string) string { - if strings.Contains(addr, "://") { - return strings.Split(addr, "://")[1] - } - return addr - -} - -// ipNet returns a net.IPNet struct given the passed IP address string, number -// of one bits to include at the start of the mask, and the total number of bits -// for the mask. -func ipNet(ip string, ones, bits int) net.IPNet { - return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} -} diff --git a/types/netaddress_test.go b/types/netaddress_test.go deleted file mode 100644 index 393d70e0b..000000000 --- a/types/netaddress_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package types - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNetAddress_String(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - - netAddr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) - - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = netAddr.String() - }() - } - - wg.Wait() - - s := netAddr.String() - require.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", s) -} - -func TestNewNetAddress(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - - assert.Panics(t, func() { - NewNetAddress("", tcpAddr) - }) - - addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) - assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String()) - - assert.NotPanics(t, func() { - NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) - }, "Calling NewNetAddress with UDPAddr should not panic in testing") -} - -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - name string - addr string - expected string - correct bool - }{ - {"no node id and no protocol", "127.0.0.1:8080", "", false}, - {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, - {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, - - { - "no protocol", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "tcp input", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "udp input", - "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - // {"127.0.0:8080", false}, - {"invalid host", "notahost", "", false}, - {"invalid port", "127.0.0.1:notapath", "", false}, - {"invalid host w/ port", "notahost:8080", "", false}, - {"just a port", "8082", "", false}, - {"non-existent port", "127.0.0:8080000", "", false}, - - {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, - {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, - {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, - {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - { - "correct nodeId w/tcp", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - - {"no node id", "tcp://@127.0.0.1:8080", "", false}, - {"no node id or IP", "tcp://@", "", false}, - {"tcp no host, w/ port", "tcp://:26656", "", false}, - {"empty", "", "", false}, - {"node id delimiter 1", "@", "", false}, - {"node id delimiter 2", " @", "", false}, - {"node id delimiter 3", " @ ", "", false}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - }) - } -} - -func TestNewNetAddressIPPort(t *testing.T) { - addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) - assert.Equal(t, "127.0.0.1:8080", addr.String()) -} - -func TestNetAddressProperties(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - valid bool - local bool - routable bool - }{ - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true, true, false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", true, false, true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) - - err = addr.Valid() - if tc.valid { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - assert.Equal(t, tc.local, addr.Local()) - assert.Equal(t, tc.routable, addr.Routable()) - } -} - -func TestNetAddressReachabilityTo(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - other string - reachability int - }{ - { - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", - 0, - }, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) - - other, err := NewNetAddressString(tc.other) - require.Nil(t, err) - - assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) - } -} diff --git a/types/node_id.go b/types/node_id.go index c260aa117..a5db40159 100644 --- a/types/node_id.go +++ b/types/node_id.go @@ -31,8 +31,7 @@ func NewNodeID(nodeID string) (NodeID, error) { // IDAddressString returns id@hostPort. It strips the leading // protocol from protocolHostPort if it exists. func (id NodeID) AddressString(protocolHostPort string) string { - hostPort := removeProtocolIfDefined(protocolHostPort) - return fmt.Sprintf("%s@%s", id, hostPort) + return fmt.Sprintf("%s@%s", id, removeProtocolIfDefined(protocolHostPort)) } // NodeIDFromPubKey creates a node ID from a given PubKey address. diff --git a/types/node_info.go b/types/node_info.go index 9dbdbf70d..902ca759b 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -3,6 +3,9 @@ package types import ( "errors" "fmt" + "net" + "strconv" + "strings" "github.com/tendermint/tendermint/libs/bytes" tmstrings "github.com/tendermint/tendermint/libs/strings" @@ -74,17 +77,10 @@ func (info NodeInfo) ID() NodeID { // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). func (info NodeInfo) Validate() error { - - // ID is already validated. - - // Validate ListenAddr. - _, err := NewNetAddressString(info.ID().AddressString(info.ListenAddr)) - if err != nil { + if _, _, err := ParseAddressString(info.ID().AddressString(info.ListenAddr)); err != nil { return err } - // Network is validated in CompatibleWith. - // Validate Version if len(info.Version) > 0 && (!tmstrings.IsASCIIText(info.Version) || tmstrings.ASCIITrim(info.Version) == "") { @@ -163,15 +159,6 @@ OUTER_LOOP: return nil } -// NetAddress returns a NetAddress derived from the NodeInfo - -// it includes the authenticated peer ID and the self-reported -// ListenAddr. Note that the ListenAddr is not authenticated and -// may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() (*NetAddress, error) { - idAddr := info.ID().AddressString(info.ListenAddr) - return NewNetAddressString(idAddr) -} - // AddChannel is used by the router when a channel is opened to add it to the node info func (info *NodeInfo) AddChannel(channel uint16) { // check that the channel doesn't already exist @@ -244,3 +231,58 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) { return dni, nil } + +// ParseAddressString reads an address string, and returns the IP +// address and port information, returning an error for any validation +// errors. +func ParseAddressString(addr string) (net.IP, uint16, error) { + addrWithoutProtocol := removeProtocolIfDefined(addr) + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) != 2 { + return nil, 0, errors.New("invalid address") + } + + id, err := NewNodeID(spl[0]) + if err != nil { + return nil, 0, err + } + + if err := id.Validate(); err != nil { + return nil, 0, err + } + + addrWithoutProtocol = spl[1] + + // get host and port + host, portStr, err := net.SplitHostPort(addrWithoutProtocol) + if err != nil { + return nil, 0, err + } + if len(host) == 0 { + return nil, 0, err + } + + ip := net.ParseIP(host) + if ip == nil { + ips, err := net.LookupIP(host) + if err != nil { + return nil, 0, err + } + ip = ips[0] + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} + +func removeProtocolIfDefined(addr string) string { + if strings.Contains(addr, "://") { + return strings.Split(addr, "://")[1] + } + return addr + +} diff --git a/types/node_info_test.go b/types/node_info_test.go index 812cec184..ff30aa30a 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -173,3 +173,80 @@ func TestNodeInfoAddChannel(t *testing.T) { nodeInfo.AddChannel(2) require.Contains(t, nodeInfo.Channels, byte(0x02)) } + +func TestParseAddressString(t *testing.T) { + testCases := []struct { + name string + addr string + expected string + correct bool + }{ + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + { + "no protocol", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "tcp input", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "udp input", + "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + // {"127.0.0:8080", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + { + "correct nodeId w/tcp", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + + {"no node id", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + addr, port, err := ParseAddressString(tc.addr) + if tc.correct { + require.Nil(t, err, tc.addr) + assert.Contains(t, tc.expected, addr.String()) + assert.Contains(t, tc.expected, fmt.Sprint(port)) + } else { + assert.Error(t, err, "%v", tc.addr) + } + }) + } +} From befd669794af282733ecccabb50d3f18de154c61 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sat, 9 Oct 2021 00:20:09 -0400 Subject: [PATCH 082/174] e2e: light nodes should use builtin abci app (#7095) --- test/e2e/pkg/testnet.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index ee6a283aa..d0770ce8d 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -187,13 +187,15 @@ func LoadTestnet(file string) (*Testnet, error) { LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, } - if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this } if nodeManifest.Mode != "" { node.Mode = Mode(nodeManifest.Mode) } + if node.Mode == ModeLight { + node.ABCIProtocol = ProtocolBuiltin + } if nodeManifest.Database != "" { node.Database = nodeManifest.Database } From ded310093e0d771c9ed27f296921cb6b23d99f29 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Sat, 9 Oct 2021 11:33:54 -0400 Subject: [PATCH 083/174] lint: fix collection of stale errors (#7090) Few things that had been annoying. --- .github/workflows/lint.yml | 2 +- .golangci.yml | 4 ++-- internal/consensus/msgs.go | 2 +- internal/libs/protoio/io_test.go | 2 +- internal/mempool/ids.go | 16 +++++++--------- internal/mempool/v0/reactor.go | 2 +- internal/mempool/v1/reactor.go | 2 +- internal/p2p/peermanager.go | 6 +++--- internal/state/state.go | 4 ++-- internal/state/state_test.go | 2 +- internal/state/store.go | 2 +- internal/state/validation_test.go | 4 ++-- internal/statesync/reactor.go | 2 +- internal/store/store.go | 4 ++-- libs/json/helpers_test.go | 1 - node/node_test.go | 2 +- types/validation.go | 10 +++++----- types/validator_set_test.go | 2 +- 18 files changed, 33 insertions(+), 36 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 79cb3685b..3e257e47c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -23,7 +23,7 @@ jobs: - uses: golangci/golangci-lint-action@v2.5.2 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.38 + version: v1.42.1 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.golangci.yml b/.golangci.yml index b62f926e2..e0f3fe163 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,12 +13,12 @@ linters: # - gochecknoinits # - gocognit - goconst - - gocritic + # - gocritic # - gocyclo # - godox - gofmt - goimports - - golint + - revive - gosec - gosimple - govet diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 17aef9aa2..052b8f556 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -77,7 +77,7 @@ func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error { m.LastCommitRound, initialHeight) } if m.Height > initialHeight && m.LastCommitRound < 0 { - return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint + return fmt.Errorf("LastCommitRound can only be negative for initial height %v", initialHeight) } return nil diff --git a/internal/libs/protoio/io_test.go b/internal/libs/protoio/io_test.go index 2f1437c68..a84b34c00 100644 --- a/internal/libs/protoio/io_test.go +++ b/internal/libs/protoio/io_test.go @@ -71,7 +71,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { return err } if n != len(bz)+visize { - return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) // nolint + return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) } lens[i] = n } diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 49a9ac607..656f5b74c 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -7,17 +7,15 @@ import ( "github.com/tendermint/tendermint/types" ) -// nolint: golint -// TODO: Rename type. -type MempoolIDs struct { +type IDs struct { mtx tmsync.RWMutex peerMap map[types.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used } -func NewMempoolIDs() *MempoolIDs { - return &MempoolIDs{ +func NewMempoolIDs() *IDs { + return &IDs{ peerMap: make(map[types.NodeID]uint16), // reserve UnknownPeerID for mempoolReactor.BroadcastTx @@ -28,7 +26,7 @@ func NewMempoolIDs() *MempoolIDs { // ReserveForPeer searches for the next unused ID and assigns it to the provided // peer. -func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { +func (ids *IDs) ReserveForPeer(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -38,7 +36,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { } // Reclaim returns the ID reserved for the peer back to unused pool. -func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { +func (ids *IDs) Reclaim(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -50,7 +48,7 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { } // GetForPeer returns an ID reserved for the peer. -func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { +func (ids *IDs) GetForPeer(peerID types.NodeID) uint16 { ids.mtx.RLock() defer ids.mtx.RUnlock() @@ -59,7 +57,7 @@ func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { // nextPeerID returns the next unused peer ID to use. We assume that the mutex // is already held. -func (ids *MempoolIDs) nextPeerID() uint16 { +func (ids *IDs) nextPeerID() uint16 { if len(ids.activeIDs) == MaxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", MaxActiveIDs)) } diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 80362a04f..86392e96f 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -39,7 +39,7 @@ type Reactor struct { cfg *config.MempoolConfig mempool *CListMempool - ids *mempool.MempoolIDs + ids *mempool.IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index eff5b7ec0..f07808387 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -39,7 +39,7 @@ type Reactor struct { cfg *config.MempoolConfig mempool *TxMempool - ids *mempool.MempoolIDs + ids *mempool.IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 1e9afb38b..7ccc0d59c 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -180,7 +180,7 @@ func (o *PeerManagerOptions) Validate() error { if o.MaxPeers > 0 { if o.MaxConnected == 0 || o.MaxConnected+o.MaxConnectedUpgrade > o.MaxPeers { - return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", // nolint + return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", o.MaxConnected, o.MaxConnectedUpgrade, o.MaxPeers) } } @@ -190,7 +190,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTime without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTime { - return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", // nolint + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", o.MinRetryTime, o.MaxRetryTime) } } @@ -200,7 +200,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTimePersistent without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTimePersistent { - return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", // nolint + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", o.MinRetryTime, o.MaxRetryTimePersistent) } } diff --git a/internal/state/state.go b/internal/state/state.go index ce9990cc2..1b3c8f16e 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -195,8 +195,8 @@ func (state *State) ToProto() (*tmstate.State, error) { return sm, nil } -// StateFromProto takes a state proto message & returns the local state type -func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint +// FromProto takes a state proto message & returns the local state type +func FromProto(pb *tmstate.State) (*State, error) { if pb == nil { return nil, errors.New("nil State") } diff --git a/internal/state/state_test.go b/internal/state/state_test.go index ab73bc90c..8c0144abd 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -1079,7 +1079,7 @@ func TestStateProto(t *testing.T) { assert.NoError(t, err, tt.testName) } - smt, err := sm.StateFromProto(pbs) + smt, err := sm.FromProto(pbs) if tt.expPass2 { require.NoError(t, err, tt.testName) require.Equal(t, tt.state, smt, tt.testName) diff --git a/internal/state/store.go b/internal/state/store.go index 787b5c8ad..aff165aa1 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -130,7 +130,7 @@ func (store dbStore) loadState(key []byte) (state State, err error) { %v\n`, err)) } - sm, err := StateFromProto(sp) + sm, err := FromProto(sp) if err != nil { return state, err } diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 08c54f139..eb0cebbb7 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -270,7 +270,7 @@ func TestValidateBlockEvidence(t *testing.T) { A block with too much evidence fails */ evidence := make([]types.Evidence, 0) - var currentBytes int64 = 0 + var currentBytes int64 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), @@ -290,7 +290,7 @@ func TestValidateBlockEvidence(t *testing.T) { A good block with several pieces of good evidence passes */ evidence := make([]types.Evidence, 0) - var currentBytes int64 = 0 + var currentBytes int64 // precisely the amount of allowed evidence for { newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 98ace6e37..74803b3e2 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -734,7 +734,7 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { } case *ssproto.LightBlockResponse: - var height int64 = 0 + var height int64 if msg.LightBlock != nil { height = msg.LightBlock.SignedHeader.Header.Height } diff --git a/internal/store/store.go b/internal/store/store.go index 8848b76d9..c978241ff 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -345,7 +345,7 @@ func (bs *BlockStore) pruneRange( var ( err error pruned uint64 - totalPruned uint64 = 0 + totalPruned uint64 ) batch := bs.db.NewBatch() @@ -392,7 +392,7 @@ func (bs *BlockStore) batchDelete( start, end []byte, preDeletionHook func(key, value []byte, batch dbm.Batch) error, ) (uint64, []byte, error) { - var pruned uint64 = 0 + var pruned uint64 iter, err := bs.db.Iterator(start, end) if err != nil { return pruned, start, err diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go index a87bc51f1..ccb3c0038 100644 --- a/libs/json/helpers_test.go +++ b/libs/json/helpers_test.go @@ -61,7 +61,6 @@ func (c CustomValue) MarshalJSON() ([]byte, error) { } func (c CustomValue) UnmarshalJSON(bz []byte) error { - c.Value = "custom" return nil } diff --git a/node/node_test.go b/node/node_test.go index 30e7a8f13..61fd3fa21 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -249,7 +249,7 @@ func TestCreateProposalBlock(t *testing.T) { // fill the evidence pool with more evidence // than can fit in a block - var currentBytes int64 = 0 + var currentBytes int64 for currentBytes <= maxEvidenceBytes { ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") currentBytes += int64(len(ev.Bytes())) diff --git a/types/validation.go b/types/validation.go index 1bf0265db..e8f53f2a0 100644 --- a/types/validation.go +++ b/types/validation.go @@ -162,9 +162,9 @@ func verifyCommitBatch( var ( val *Validator valIdx int32 - seenVals = make(map[int32]int, len(commit.Signatures)) - batchSigIdxs = make([]int, 0, len(commit.Signatures)) - talliedVotingPower int64 = 0 + talliedVotingPower int64 + seenVals = make(map[int32]int, len(commit.Signatures)) + batchSigIdxs = make([]int, 0, len(commit.Signatures)) ) // attempt to create a batch verifier bv, ok := batch.CreateBatchVerifier(vals.GetProposer().PubKey) @@ -275,9 +275,9 @@ func verifyCommitSingle( var ( val *Validator valIdx int32 - seenVals = make(map[int32]int, len(commit.Signatures)) - talliedVotingPower int64 = 0 + talliedVotingPower int64 voteSignBytes []byte + seenVals = make(map[int32]int, len(commit.Signatures)) ) for idx, commitSig := range commit.Signatures { if ignoreSig(commitSig) { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index a69121344..87008bb1c 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -508,7 +508,7 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { {Address: []byte("c"), ProposerPriority: 1}}}, // this should average twice but the average should be 0 after the first iteration // (voting power is 0 -> no changes) - 11, 1 / 3}, + 11, 0}, 2: {ValidatorSet{ Validators: []*Validator{ {Address: []byte("a"), ProposerPriority: 100}, From 48295955edfd50b24fef42f1e96f7c54333c1ca8 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 11 Oct 2021 16:21:45 -0700 Subject: [PATCH 084/174] light: Update links in package docs. (#7099) Fixes #7098. The light client documentation moved to the spec repository. I was not able to figure out what happened to light-client-protocol.md, it was removed in #5252 but no corresponding file exists in the spec repository. Since the spec also discusses the protocol, this change simply links to the spec and removes the non-functional reference. Alternatively we could link to the top-level [light client doc](https://docs.tendermint.com/master/tendermint-core/light-client.html) if you think that's better. --- light/doc.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/light/doc.go b/light/doc.go index 700bbeb6c..c30c68eb0 100644 --- a/light/doc.go +++ b/light/doc.go @@ -94,7 +94,7 @@ Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/verification.md +https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md for details. There are two methods of verification: sequential and bisection @@ -118,10 +118,7 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html -for usage example. -Or see -https://github.com/tendermint/spec/tree/master/spec/consensus/light-client -for the full spec +https://github.com/tendermint/spec/tree/master/spec/light-client +for the light client specification. */ package light From 34a3fcd8fcfef1118b2f12113fb04e6d50d34f15 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 12 Oct 2021 11:29:31 -0400 Subject: [PATCH 085/174] Revert "abci: change client to use multi-reader mutexes (#6306)" (#7106) This reverts commit 1c4dbe30d40bb4c1dc59a2247d5113b78702a014. --- abci/client/client.go | 8 ++++---- abci/client/creators.go | 2 +- abci/client/grpc_client.go | 8 ++++---- abci/client/local_client.go | 26 ++++++++++++-------------- abci/client/socket_client.go | 8 ++++---- internal/consensus/byzantine_test.go | 2 +- internal/consensus/common_test.go | 2 +- internal/consensus/reactor_test.go | 2 +- 8 files changed, 28 insertions(+), 30 deletions(-) diff --git a/abci/client/client.go b/abci/client/client.go index 93f8b9293..a38c7f81b 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -87,7 +87,7 @@ type ReqRes struct { *sync.WaitGroup *types.Response // Not set atomically, so be sure to use WaitGroup. - mtx tmsync.RWMutex + mtx tmsync.Mutex done bool // Gets set to true once *after* WaitGroup.Done(). cb func(*types.Response) // A single callback that may be set. } @@ -137,16 +137,16 @@ func (r *ReqRes) InvokeCallback() { // // ref: https://github.com/tendermint/tendermint/issues/5439 func (r *ReqRes) GetCallback() func(*types.Response) { - r.mtx.RLock() - defer r.mtx.RUnlock() + r.mtx.Lock() + defer r.mtx.Unlock() return r.cb } // SetDone marks the ReqRes object as done. func (r *ReqRes) SetDone() { r.mtx.Lock() - defer r.mtx.Unlock() r.done = true + r.mtx.Unlock() } func waitGroup1() (wg *sync.WaitGroup) { diff --git a/abci/client/creators.go b/abci/client/creators.go index 5a71becaf..e17b15eca 100644 --- a/abci/client/creators.go +++ b/abci/client/creators.go @@ -13,7 +13,7 @@ type Creator func() (Client, error) // NewLocalCreator returns a Creator for the given app, // which will be running locally. func NewLocalCreator(app types.Application) Creator { - mtx := new(tmsync.RWMutex) + mtx := new(tmsync.Mutex) return func() (Client, error) { return NewLocalClient(mtx, app), nil diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 2bfa047bd..f1123fab5 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -24,7 +24,7 @@ type grpcClient struct { conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool - mtx tmsync.RWMutex + mtx tmsync.Mutex addr string err error resCb func(*types.Request, *types.Response) // listens to all callbacks @@ -149,8 +149,8 @@ func (cli *grpcClient) StopForError(err error) { } func (cli *grpcClient) Error() error { - cli.mtx.RLock() - defer cli.mtx.RUnlock() + cli.mtx.Lock() + defer cli.mtx.Unlock() return cli.err } @@ -158,8 +158,8 @@ func (cli *grpcClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *grpcClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() - defer cli.mtx.Unlock() cli.resCb = resCb + cli.mtx.Unlock() } //---------------------------------------- diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 06ff8171c..701108a3c 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -15,7 +15,7 @@ import ( type localClient struct { service.BaseService - mtx *tmsync.RWMutex + mtx *tmsync.Mutex types.Application Callback } @@ -26,24 +26,22 @@ var _ Client = (*localClient)(nil) // methods of the given app. // // Both Async and Sync methods ignore the given context.Context parameter. -func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client { +func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { if mtx == nil { - mtx = &tmsync.RWMutex{} + mtx = new(tmsync.Mutex) } - cli := &localClient{ mtx: mtx, Application: app, } - cli.BaseService = *service.NewBaseService(nil, "localClient", cli) return cli } func (app *localClient) SetResponseCallback(cb Callback) { app.mtx.Lock() - defer app.mtx.Unlock() app.Callback = cb + app.mtx.Unlock() } // TODO: change types.Application to include Error()? @@ -67,8 +65,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err } func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Info(req) return app.callback( @@ -100,8 +98,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck } func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Query(req) return app.callback( @@ -215,8 +213,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon } func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Info(req) return &res, nil @@ -249,8 +247,8 @@ func (app *localClient) QuerySync( ctx context.Context, req types.RequestQuery, ) (*types.ResponseQuery, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Query(req) return &res, nil diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 8a4598fe5..726c554d4 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -39,7 +39,7 @@ type socketClient struct { reqQueue chan *reqResWithContext - mtx tmsync.RWMutex + mtx tmsync.Mutex err error reqSent *list.List // list of requests sent, waiting for response resCb func(*types.Request, *types.Response) // called on all requests, if set. @@ -102,8 +102,8 @@ func (cli *socketClient) OnStop() { // Error returns an error if the client was stopped abruptly. func (cli *socketClient) Error() error { - cli.mtx.RLock() - defer cli.mtx.RUnlock() + cli.mtx.Lock() + defer cli.mtx.Unlock() return cli.err } @@ -113,8 +113,8 @@ func (cli *socketClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *socketClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() - defer cli.mtx.Unlock() cli.resCb = resCb + cli.mtx.Unlock() } //---------------------------------------- diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 81a4f8be8..a826ef79b 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -62,7 +62,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) + mtx := new(tmsync.Mutex) proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 5251b0fdb..8b54f6026 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -407,7 +407,7 @@ func newStateWithConfigAndBlockStore( blockStore *store.BlockStore, ) *State { // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) + mtx := new(tmsync.Mutex) proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index baffbac30..e536906a9 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -346,7 +346,7 @@ func TestReactorWithEvidence(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) + mtx := new(tmsync.Mutex) proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) From d837432681eb835cee35b2aa477b244eb5254666 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 12 Oct 2021 12:28:10 -0400 Subject: [PATCH 086/174] ci: use run-multiple.sh for e2e pr tests (#7111) * ci: use run-multiple.sh for e2e pr tests * fix labeling * Update .github/workflows/e2e-nightly-35x.yml Co-authored-by: M. J. Fromberger Co-authored-by: M. J. Fromberger --- .github/workflows/e2e-nightly-35x.yml | 6 +++--- .github/workflows/e2e-nightly-master.yml | 6 +++--- .github/workflows/e2e.yml | 6 +----- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index 13d4b3dc8..b3acdf62b 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -10,7 +10,7 @@ on: - cron: '0 2 * * *' jobs: - e2e-nightly-test-2: + e2e-nightly-test: # Run parallel jobs for the listed testnet groups (must match the # ./build/generator -g flag) strategy: @@ -44,7 +44,7 @@ jobs: run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml e2e-nightly-fail-2: - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ failure() }} runs-on: ubuntu-latest steps: @@ -60,7 +60,7 @@ jobs: SLACK_FOOTER: '' e2e-nightly-success: # may turn this off once they seem to pass consistently - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 28f8b3e4b..da8b07d70 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -10,7 +10,7 @@ on: - cron: '0 2 * * *' jobs: - e2e-nightly-test-2: + e2e-nightly-test: # Run parallel jobs for the listed testnet groups (must match the # ./build/generator -g flag) strategy: @@ -41,7 +41,7 @@ jobs: run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml e2e-nightly-fail-2: - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ failure() }} runs-on: ubuntu-latest steps: @@ -57,7 +57,7 @@ jobs: SLACK_FOOTER: '' e2e-nightly-success: # may turn this off once they seem to pass consistently - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index a0f18cd1b..134ae979c 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -33,10 +33,6 @@ jobs: - name: Run CI testnet working-directory: test/e2e - run: ./build/runner -f networks/ci.toml + run: ./run-multiple.sh networks/ci.toml if: "env.GIT_DIFF != ''" - - name: Emit logs on failure - if: ${{ failure() }} - working-directory: test/e2e - run: ./build/runner -f networks/ci.toml logs From 05245586967e3064fffcecc75d4035e4249d792e Mon Sep 17 00:00:00 2001 From: lklimek <842586+lklimek@users.noreply.github.com> Date: Tue, 12 Oct 2021 22:22:57 +0200 Subject: [PATCH 087/174] refactor: assignment copies lock value (#7108) Co-authored-by: M. J. Fromberger --- CHANGELOG_PENDING.md | 2 ++ libs/bits/bit_array.go | 30 ++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index fafb9cea7..0f8ba5ea7 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -36,3 +36,5 @@ Special thanks to external contributors on this release: ### IMPROVEMENTS ### BUG FIXES + +- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek) diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index b78fafddd..a0258521c 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -30,9 +30,21 @@ func NewBitArray(bits int) *BitArray { if bits <= 0 { return nil } - return &BitArray{ - Bits: bits, - Elems: make([]uint64, numElems(bits)), + bA := &BitArray{} + bA.reset(bits) + return bA +} + +// reset changes size of BitArray to `bits` and re-allocates (zeroed) data buffer +func (bA *BitArray) reset(bits int) { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bA.Bits = bits + if bits == 0 { + bA.Elems = nil + } else { + bA.Elems = make([]uint64, numElems(bits)) } } @@ -399,8 +411,7 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { if b == "null" { // This is required e.g. for encoding/json when decoding // into a pointer with pre-allocated BitArray. - bA.Bits = 0 - bA.Elems = nil + bA.reset(0) return nil } @@ -410,16 +421,15 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { return fmt.Errorf("bitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) } bits := match[1] - - // Construct new BitArray and copy over. numBits := len(bits) - bA2 := NewBitArray(numBits) + + bA.reset(numBits) for i := 0; i < numBits; i++ { if bits[i] == 'x' { - bA2.SetIndex(i, true) + bA.SetIndex(i, true) } } - *bA = *bA2 //nolint:govet + return nil } From 52ed9944162012a5ad44fa611f263545757d6021 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 12 Oct 2021 16:49:45 -0400 Subject: [PATCH 088/174] test: cleanup rpc/client and node test fixtures (#7112) --- node/node.go | 14 +- node/node_test.go | 94 +- rpc/client/event_test.go | 196 +--- rpc/client/examples_test.go | 110 +- rpc/client/http/http.go | 11 +- rpc/client/main_test.go | 7 +- rpc/client/rpc_test.go | 1359 ++++++++++++------------ rpc/jsonrpc/client/http_json_client.go | 3 +- 8 files changed, 845 insertions(+), 949 deletions(-) diff --git a/node/node.go b/node/node.go index 87e9b0dbd..6092f4c43 100644 --- a/node/node.go +++ b/node/node.go @@ -641,12 +641,16 @@ func (n *nodeImpl) OnStop() { n.Logger.Info("Stopping Node") - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) + if n.eventBus != nil { + // first stop the non-reactor services + if err := n.eventBus.Stop(); err != nil { + n.Logger.Error("Error closing eventBus", "err", err) + } } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) + if n.indexerService != nil { + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } } if n.config.Mode != config.ModeSeed { diff --git a/node/node_test.go b/node/node_test.go index 61fd3fa21..d5ea39aa6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -31,6 +31,7 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -46,11 +47,21 @@ func TestNodeStartStop(t *testing.T) { require.NoError(t, err) require.NoError(t, ns.Start()) + t.Cleanup(func() { + if ns.IsRunning() { + assert.NoError(t, ns.Stop()) + ns.Wait() + } + }) + n, ok := ns.(*nodeImpl) require.True(t, ok) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) + blocksSub, err := n.EventBus().Subscribe(ctx, "node_test", types.EventQueryNewBlock) require.NoError(t, err) select { case <-blocksSub.Out(): @@ -87,6 +98,14 @@ func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl n, ok := ns.(*nodeImpl) require.True(t, ok) + + t.Cleanup(func() { + if ns.IsRunning() { + assert.NoError(t, ns.Stop()) + ns.Wait() + } + }) + return n } @@ -100,7 +119,6 @@ func TestNodeDelayedStart(t *testing.T) { n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) require.NoError(t, n.Start()) - defer n.Stop() //nolint:errcheck // ignore for tests startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) @@ -165,8 +183,13 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix - _, err := newDefaultNode(cfg, log.TestingLogger()) + n, err := newDefaultNode(cfg, log.TestingLogger()) assert.Error(t, err) + + if n != nil && n.IsRunning() { + assert.NoError(t, n.Stop()) + n.Wait() + } } func TestNodeSetPrivValIPC(t *testing.T) { @@ -211,6 +234,9 @@ func testFreeAddr(t *testing.T) string { // create a proposal block using real and full // mempool and evidence pool and validate it. func TestCreateProposalBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) @@ -222,7 +248,7 @@ func TestCreateProposalBlock(t *testing.T) { logger := log.TestingLogger() const height int64 = 1 - state, stateDB, privVals := state(1, height) + state, stateDB, privVals := state(t, 1, height) stateStore := sm.NewStore(stateDB) maxBytes := 16384 const partSize uint32 = 256 @@ -266,7 +292,7 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -303,6 +329,9 @@ func TestCreateProposalBlock(t *testing.T) { } func TestMaxTxsProposalBlockSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) @@ -314,7 +343,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { logger := log.TestingLogger() const height int64 = 1 - state, stateDB, _ := state(1, height) + state, stateDB, _ := state(t, 1, height) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 16384 @@ -336,7 +365,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) tx := tmrand.Bytes(txLength - 4) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) blockExec := sm.NewBlockExecutor( @@ -365,6 +394,9 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) @@ -375,7 +407,7 @@ func TestMaxProposalBlockSize(t *testing.T) { logger := log.TestingLogger() - state, stateDB, _ := state(types.MaxVotesCount, int64(1)) + state, stateDB, _ := state(t, types.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 @@ -402,7 +434,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // At the end of the test, only the single big tx should be added for i := 0; i < 10; i++ { tx := tmrand.Bytes(10) - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -493,14 +525,17 @@ func TestNodeNewSeedNode(t *testing.T) { defaultGenesisDocProviderFunc(cfg), log.TestingLogger(), ) + require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) err = n.Start() require.NoError(t, err) - assert.True(t, n.pexReactor.IsRunning()) + + require.NoError(t, n.Stop()) + } func TestNodeSetEventSink(t *testing.T) { @@ -511,7 +546,7 @@ func TestNodeSetEventSink(t *testing.T) { setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { eventBus, err := createAndStartEventBus(logger) require.NoError(t, err) - + t.Cleanup(func() { require.NoError(t, eventBus.Stop()) }) genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) @@ -521,6 +556,22 @@ func TestNodeSetEventSink(t *testing.T) { t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) return eventSinks } + cleanup := func(ns service.Service) func() { + return func() { + n, ok := ns.(*nodeImpl) + if !ok { + return + } + if n == nil { + return + } + if !n.IsRunning() { + return + } + assert.NoError(t, n.Stop()) + n.Wait() + } + } eventSinks := setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) @@ -542,6 +593,7 @@ func TestNodeSetEventSink(t *testing.T) { ns, err := newDefaultNode(cfg, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("unsupported event sink type"), err) + t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{} eventSinks = setupTest(t, cfg) @@ -553,6 +605,7 @@ func TestNodeSetEventSink(t *testing.T) { ns, err = newDefaultNode(cfg, logger) assert.Nil(t, ns) assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) + t.Cleanup(cleanup(ns)) var psqlConn = "test" @@ -591,18 +644,21 @@ func TestNodeSetEventSink(t *testing.T) { var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} cfg.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(cfg, logger) require.Error(t, err) assert.Equal(t, e, err) + t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} cfg.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(cfg, logger) require.Error(t, err) assert.Equal(t, e, err) + t.Cleanup(cleanup(ns)) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { +func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { + t.Helper() privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { @@ -623,17 +679,15 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() + t.Cleanup(func() { require.NoError(t, stateDB.Close()) }) + stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privVals } diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 54d19a201..3b91de107 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "reflect" "testing" "time" @@ -17,7 +16,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var waitForEventTimeout = 8 * time.Second +const waitForEventTimeout = 2 * time.Second // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { @@ -26,164 +25,41 @@ func MakeTxKV() ([]byte, []byte, []byte) { return k, v, append(k, append([]byte("="), v...)...) } -func TestHeaderEvents(t *testing.T) { - n, conf := NodeSuite(t) +func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, c client.Client) { + // make the tx + _, _, tx := MakeTxKV() - for i, c := range GetClients(t, n, conf) { - i, c := i, c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } + // send + done := make(chan struct{}) + go func() { + defer close(done) + var ( + txres *coretypes.ResultBroadcastTx + err error + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(ctx, tx) + case "sync": + txres, err = c.BroadcastTxSync(ctx, tx) + default: + panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + } + if assert.NoError(t, err) { + assert.Equal(t, txres.Code, abci.CodeTypeOK) + } + }() - evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - _, ok := evt.(types.EventDataNewBlockHeader) - require.True(t, ok, "%d: %#v", i, evt) - // TODO: more checks... - }) - } -} - -// subscribe to new blocks and make sure height increments by 1 -func TestBlockEvents(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - const subscriber = "TestBlockEvents" - - eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) - require.NoError(t, err) - t.Cleanup(func() { - if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil { - t.Error(err) - } - }) - - var firstBlockHeight int64 - for i := int64(0); i < 3; i++ { - event := <-eventCh - blockEvent, ok := event.Data.(types.EventDataNewBlock) - require.True(t, ok) - - block := blockEvent.Block - - if firstBlockHeight == 0 { - firstBlockHeight = block.Header.Height - } - - require.Equal(t, firstBlockHeight+i, block.Header.Height) - } - }) - } -} - -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "async") } -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } - -func testTxEventsSent(t *testing.T, broadcastMethod string) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - // make the tx - _, _, tx := MakeTxKV() - - // send - go func() { - var ( - txres *coretypes.ResultBroadcastTx - err error - ctx = context.Background() - ) - switch broadcastMethod { - case "async": - txres, err = c.BroadcastTxAsync(ctx, tx) - case "sync": - txres, err = c.BroadcastTxSync(ctx, tx) - default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) - } - if assert.NoError(t, err) { - assert.Equal(t, txres.Code, abci.CodeTypeOK) - } - }() - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) - require.Nil(t, err) - - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok) - - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -// Test HTTPClient resubscribes upon disconnect && subscription error. -// Test Local client resubscribes upon subscription error. -func TestClientsResubscribe(t *testing.T) { - // TODO(melekes) -} - -func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // on Subscribe - _, err := c.Subscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on Unsubscribe - err = c.Unsubscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on UnsubscribeAll - err = c.UnsubscribeAll(ctx, "TestHeaderEvents") - assert.Error(t, err) + // and wait for confirmation + evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) + require.Nil(t, err) + + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok) + + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + <-done } diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 1acc29c11..38fb4fcf7 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,16 +3,20 @@ package client_test import ( "bytes" "context" - "fmt" "log" + "net/http" + "testing" + "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/rpc/coretypes" rpctest "github.com/tendermint/tendermint/rpc/test" ) -func ExampleHTTP_simple() { +func TestHTTPSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -29,9 +33,7 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := conf.RPC.ListenAddress c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Create a transaction k := []byte("name") @@ -41,6 +43,7 @@ func ExampleHTTP_simple() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). bres, err := c.BroadcastTxCommit(context.Background(), tx) + require.NoError(t, err) if err != nil { log.Fatal(err) } @@ -50,30 +53,19 @@ func ExampleHTTP_simple() { // Now try to fetch the value for the key qres, err := c.ABCIQuery(context.Background(), "/key", k) - if err != nil { - log.Fatal(err) - } - if qres.Response.IsErr() { - log.Fatal("ABCIQuery failed") - } - if !bytes.Equal(qres.Response.Key, k) { - log.Fatal("returned key does not match queried key") - } - if !bytes.Equal(qres.Response.Value, v) { - log.Fatal("returned value does not match sent value") - } + require.NoError(t, err) + require.False(t, qres.Response.IsErr(), "ABCIQuery failed") + require.True(t, bytes.Equal(qres.Response.Key, k), + "returned key does not match queried key") + require.True(t, bytes.Equal(qres.Response.Value, v), + "returned value does not match sent value [%s]", string(v)) - fmt.Println("Sent tx :", string(tx)) - fmt.Println("Queried for :", string(qres.Response.Key)) - fmt.Println("Got value :", string(qres.Response.Value)) - - // Output: - // Sent tx : name=satoshi - // Queried for : name - // Got value : satoshi + assert.Equal(t, "name=satoshi", string(tx), "sent tx") + assert.Equal(t, "name", string(qres.Response.Key), "queried for") + assert.Equal(t, "satoshi", string(qres.Response.Value), "got value") } -func ExampleHTTP_batching() { +func TestHTTPBatching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -88,10 +80,8 @@ func ExampleHTTP_batching() { defer func() { _ = closer(ctx) }() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) + require.NoError(t, err) // Create our two transactions k1 := []byte("firstName") @@ -111,41 +101,51 @@ func ExampleHTTP_batching() { for _, tx := range txs { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { - log.Fatal(err) - } + _, err := batch.BroadcastTxSync(ctx, tx) + require.NoError(t, err) } // Send the batch of 2 transactions - if _, err := batch.Send(context.Background()); err != nil { - log.Fatal(err) - } + _, err = batch.Send(ctx) + require.NoError(t, err) - // Now let's query for the original results as a batch - keys := [][]byte{k1, k2} - for _, key := range keys { - if _, err := batch.ABCIQuery(context.Background(), "/key", key); err != nil { - log.Fatal(err) - } - } + // wait for the transaction to land, we could poll more for + // the transactions to land definitively. + require.Eventually(t, + func() bool { + // Now let's query for the original results as a batch + exists := 0 + for _, key := range [][]byte{k1, k2} { + _, err := batch.ABCIQuery(context.Background(), "/key", key) + if err == nil { + exists++ + + } + } + return exists == 2 + }, + 10*time.Second, + time.Second, + ) // Send the 2 queries and keep the results - results, err := batch.Send(context.Background()) - if err != nil { - log.Fatal(err) - } + results, err := batch.Send(ctx) + require.NoError(t, err) + require.Len(t, results, 2) // Each result in the returned list is the deserialized result of each // respective ABCIQuery response for _, result := range results { qr, ok := result.(*coretypes.ResultABCIQuery) - if !ok { - log.Fatal("invalid result type from ABCIQuery request") - } - fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) - } + require.True(t, ok, "invalid result type from ABCIQuery request") - // Output: - // firstName = satoshi - // lastName = nakamoto + switch string(qr.Response.Key) { + case "firstName": + require.Equal(t, "satoshi", string(qr.Response.Value)) + case "lastName": + require.Equal(t, "nakamoto", string(qr.Response.Value)) + default: + t.Fatalf("encountered unknown key %q", string(qr.Response.Key)) + } + } } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index d0c1d5621..5bd7b398a 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -2,6 +2,7 @@ package http import ( "context" + "errors" "net/http" "time" @@ -120,20 +121,20 @@ func NewWithTimeout(remote string, t time.Duration) (*HTTP, error) { } // NewWithClient allows you to set a custom http client. An error is returned -// on invalid remote. The function panics when client is nil. +// on invalid remote. The function returns an error when client is nil +// or an invalid remote. func NewWithClient(remote string, c *http.Client) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } return NewWithClientAndWSOptions(remote, c, DefaultWSOptions()) } // NewWithClientAndWSOptions allows you to set a custom http client and -// WebSocket options. An error is returned on invalid remote. The function -// panics when client is nil. +// WebSocket options. An error is returned on invalid remote or nil client. func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } rpc, err := jsonrpcclient.NewWithHTTPClient(remote, c) if err != nil { diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index 4e2c0405c..c2e0dc3cd 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -7,6 +7,7 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" @@ -30,9 +31,11 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) require.NoError(t, err) t.Cleanup(func() { - _ = closer(ctx) cancel() - app.Close() + assert.NoError(t, node.Stop()) + assert.NoError(t, closer(ctx)) + assert.NoError(t, app.Close()) + node.Wait() _ = os.RemoveAll(dir) }) return node, conf diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 8a6845831..d7554d174 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -33,10 +33,16 @@ func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) + return c } @@ -44,10 +50,18 @@ func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Du t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) + + http.DefaultClient.Timeout = timeout + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + http.DefaultClient.Timeout = 0 + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) return c } @@ -63,382 +77,433 @@ func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client. require.NoError(t, err) return []client.Client{ - getHTTPClient(t, conf), ncl, + getHTTPClient(t, conf), } } -func TestNilCustomHTTPClient(t *testing.T) { - require.Panics(t, func() { - _, _ = rpchttp.NewWithClient("http://example.com", nil) - }) - require.Panics(t, func() { - _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) - }) -} - -func TestParseInvalidAddress(t *testing.T) { - _, conf := NodeSuite(t) - // should remove trailing / - invalidRemote := conf.RPC.ListenAddress + "/" - _, err := rpchttp.New(invalidRemote) - require.NoError(t, err) -} - -func TestCustomHTTPClient(t *testing.T) { - _, conf := NodeSuite(t) - remote := conf.RPC.ListenAddress - c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) - status, err := c.Status(context.Background()) - require.NoError(t, err) - require.NotNil(t, status) -} - -func TestCorsEnabled(t *testing.T) { - _, conf := NodeSuite(t) - origin := conf.RPC.CORSAllowedOrigins[0] - remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") - - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) - req.Header.Set("Origin", origin) - c := &http.Client{} - resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) - defer resp.Body.Close() - - assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { +func TestClientOperations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - moniker := conf.Moniker + _, conf := NodeSuite(t) + + t.Run("NilCustomHTTPClient", func(t *testing.T) { + _, err := rpchttp.NewWithClient("http://example.com", nil) + require.Error(t, err) + + _, err = rpcclient.NewWithHTTPClient("http://example.com", nil) + require.Error(t, err) + }) + t.Run("ParseInvalidAddress", func(t *testing.T) { + // should remove trailing / + invalidRemote := conf.RPC.ListenAddress + "/" + _, err := rpchttp.New(invalidRemote) + require.NoError(t, err) + }) + t.Run("CustomHTTPClient", func(t *testing.T) { + remote := conf.RPC.ListenAddress + c, err := rpchttp.NewWithClient(remote, http.DefaultClient) + require.Nil(t, err) status, err := c.Status(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } + require.NoError(t, err) + require.NotNil(t, status) + }) + t.Run("CorsEnabled", func(t *testing.T) { + origin := conf.RPC.CORSAllowedOrigins[0] + remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") + + req, err := http.NewRequestWithContext(ctx, "GET", remote, nil) + require.Nil(t, err, "%+v", err) + req.Header.Set("Origin", origin) + resp, err := http.DefaultClient.Do(req) + require.Nil(t, err, "%+v", err) + defer resp.Body.Close() + + assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) + }) + t.Run("Batching", func(t *testing.T) { + t.Run("JSONRPCCalls", func(t *testing.T) { + c := getHTTPClient(t, conf) + testBatchedJSONRPCCalls(ctx, t, c) + }) + t.Run("JSONRPCCallsCancellation", func(t *testing.T) { + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(ctx, tx1) + require.NoError(t, err) + _, err = batch.BroadcastTxCommit(ctx, tx2) + require.NoError(t, err) + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) + }) + t.Run("SendingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.Send(ctx) + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") + }) + t.Run("ClearingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") + }) + t.Run("ConcurrentJSONRPC", func(t *testing.T) { + var wg sync.WaitGroup + c := getHTTPClient(t, conf) + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(ctx, t, c) + }() + } + wg.Wait() + }) + }) + t.Run("HTTPReturnsErrorIfClientIsNotRunning", func(t *testing.T) { + c := getHTTPClientWithTimeout(t, conf, 100*time.Millisecond) + + // on Subscribe + _, err := c.Subscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on Unsubscribe + err = c.Unsubscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on UnsubscribeAll + err = c.UnsubscribeAll(ctx, "TestHeaderEvents") + assert.Error(t, err) + }) } // Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { +func TestClientMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(info.Response.Data, "size")) - } -} - -func TestNetInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} - -func TestConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health(ctx) - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - - // make sure this is the right genesis file - gen, err := c.Genesis(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - h := int64(1) - vals, err := c.Validators(ctx, &h, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - require.Equal(t, 1, vals.Count) - require.Equal(t, 1, vals.Total) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestGenesisChunked(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for _, c := range GetClients(t, n, conf) { - first, err := c.GenesisChunked(ctx, 0) - require.NoError(t, err) - - decoded := make([]string, 0, first.TotalChunks) - for i := 0; i < first.TotalChunks; i++ { - chunk, err := c.GenesisChunked(ctx, uint(i)) - require.NoError(t, err) - data, err := base64.StdEncoding.DecodeString(chunk.Data) - require.NoError(t, err) - decoded = append(decoded, string(data)) - - } - doc := []byte(strings.Join(decoded, "")) - - var out types.GenesisDoc - require.NoError(t, tmjson.Unmarshal(doc, &out), - "first: %+v, doc: %s", first, string(doc)) - } -} - -func TestABCIQuery(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - res, err := c.ABCIQuery(ctx, "/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } -} - -// Make some app checks -func TestAppCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - - // get an offset of height to avoid racing and guessing - s, err := c.Status(ctx) - require.NoError(t, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 20 - _, err = c.Block(ctx, &h) - require.Error(t, err) // no block yet - - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - require.True(t, bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - - _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) - require.NoError(t, err) - qres := _qres.Response - if assert.True(t, qres.IsOK()) { - assert.Equal(t, k, qres.Key) - assert.EqualValues(t, v, qres.Value) - } - - // make sure we can lookup the tx with proof - ptx, err := c.Tx(ctx, bres.Hash, true) - require.NoError(t, err) - assert.EqualValues(t, txh, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - - // and we can even check the block is added - block, err := c.Block(ctx, &apph) - require.NoError(t, err) - appHash := block.Block.Header.AppHash - assert.True(t, len(appHash) > 0) - assert.EqualValues(t, apph, block.Block.Header.Height) - - blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) - require.NoError(t, err) - require.Equal(t, block, blockByHash) - - // now check the results - blockResults, err := c.BlockResults(ctx, &txh) - require.NoError(t, err, "%d: %+v", i, err) - assert.Equal(t, txh, blockResults.Height) - if assert.Equal(t, 1, len(blockResults.TxsResults)) { - // check success code - assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(ctx, apph, apph) - require.NoError(t, err) - assert.True(t, info.LastHeight >= apph) - if assert.Equal(t, 1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(t, apph, lastMeta.Header.Height) - blockData := block.Block - assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(t, block.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(ctx, &apph) - require.NoError(t, err) - cappHash := commit.Header.AppHash - assert.Equal(t, appHash, cappHash) - assert.NotNil(t, commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(ctx, &h) - require.NoError(t, err) - assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) - - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) - require.NoError(t, err) - pres := _pres.Response - assert.True(t, pres.IsOK()) - - // XXX Test proof - } -} - -func TestBlockchainInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - err := client.WaitForHeight(c, 10, nil) - require.NoError(t, err) - - res, err := c.BlockchainInfo(ctx, 0, 0) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) > 0) - - res, err = c.BlockchainInfo(ctx, 1, 1) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) == 1) - - res, err = c.BlockchainInfo(ctx, 1, 10000) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) < 100) - for _, m := range res.BlockMetas { - assert.NotNil(t, m) - } - - res, err = c.BlockchainInfo(ctx, 10000, 1) - require.NotNil(t, err) - assert.Nil(t, res) - assert.Contains(t, err.Error(), "can't be greater than max") - } -} - -func TestBroadcastTxSync(t *testing.T) { - n, conf := NodeSuite(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // TODO (melekes): use mempool which is set on RPC rather than getting it from node pool := getMempool(t, n) - initMempoolSize := pool.Size() for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + t.Run("Status", func(t *testing.T) { + status, err := c.Status(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.Equal(t, conf.Moniker, status.NodeInfo.Moniker) + }) + t.Run("Info", func(t *testing.T) { + info, err := c.ABCIInfo(ctx) + require.NoError(t, err) - require.Equal(t, initMempoolSize+1, pool.Size()) + status, err := c.Status(ctx) + require.NoError(t, err) - txs := pool.ReapMaxTxs(len(tx)) - require.EqualValues(t, tx, txs[0]) - pool.Flush() + assert.GreaterOrEqual(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) + assert.True(t, strings.Contains(info.Response.Data, "size")) + }) + t.Run("NetInfo", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + netinfo, err := nc.NetInfo(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, netinfo.Listening) + assert.Equal(t, 0, len(netinfo.Peers)) + }) + t.Run("DumpConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.DumpConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + assert.Empty(t, cons.Peers) + }) + t.Run("ConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.ConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + }) + t.Run("Health", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + _, err := nc.Health(ctx) + require.Nil(t, err, "%d: %+v", i, err) + }) + t.Run("GenesisAndValidators", func(t *testing.T) { + // make sure this is the right genesis file + gen, err := c.Genesis(ctx) + require.Nil(t, err, "%d: %+v", i, err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + h := int64(1) + vals, err := c.Validators(ctx, &h, nil, nil) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, 1, len(vals.Validators)) + require.Equal(t, 1, vals.Count) + require.Equal(t, 1, vals.Total) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + }) + t.Run("GenesisChunked", func(t *testing.T) { + first, err := c.GenesisChunked(ctx, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := c.GenesisChunked(ctx, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) + + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, tmjson.Unmarshal(doc, &out), + "first: %+v, doc: %s", first, string(doc)) + }) + t.Run("ABCIQuery", func(t *testing.T) { + // write something + k, v, tx := MakeTxKV() + status, err := c.Status(ctx) + require.NoError(t, err) + _, err = c.BroadcastTxSync(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + apph := status.SyncInfo.LatestBlockHeight + 2 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + res, err := c.ABCIQuery(ctx, "/key", k) + qres := res.Response + if assert.NoError(t, err) && assert.True(t, qres.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + }) + t.Run("AppCalls", func(t *testing.T) { + // get an offset of height to avoid racing and guessing + s, err := c.Status(ctx) + require.NoError(t, err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = c.Block(ctx, &h) + require.Error(t, err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) + require.True(t, bres.DeliverTx.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + + _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) + require.NoError(t, err) + qres := _qres.Response + if assert.True(t, qres.IsOK()) { + assert.Equal(t, k, qres.Key) + assert.EqualValues(t, v, qres.Value) + } + + // make sure we can lookup the tx with proof + ptx, err := c.Tx(ctx, bres.Hash, true) + require.NoError(t, err) + assert.EqualValues(t, txh, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + + // and we can even check the block is added + block, err := c.Block(ctx, &apph) + require.NoError(t, err) + appHash := block.Block.Header.AppHash + assert.True(t, len(appHash) > 0) + assert.EqualValues(t, apph, block.Block.Header.Height) + + blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, block, blockByHash) + + // now check the results + blockResults, err := c.BlockResults(ctx, &txh) + require.NoError(t, err, "%d: %+v", i, err) + assert.Equal(t, txh, blockResults.Height) + if assert.Equal(t, 1, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) + } + + // check blockchain info, now that we know there is info + info, err := c.BlockchainInfo(ctx, apph, apph) + require.NoError(t, err) + assert.True(t, info.LastHeight >= apph) + if assert.Equal(t, 1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(t, apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(t, block.BlockID, lastMeta.BlockID) + } + + // and get the corresponding commit with the same apphash + commit, err := c.Commit(ctx, &apph) + require.NoError(t, err) + cappHash := commit.Header.AppHash + assert.Equal(t, appHash, cappHash) + assert.NotNil(t, commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := c.Commit(ctx, &h) + require.NoError(t, err) + assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) + require.NoError(t, err) + pres := _pres.Response + assert.True(t, pres.IsOK()) + + // XXX Test proof + }) + t.Run("BlockchainInfo", func(t *testing.T) { + err := client.WaitForHeight(c, 10, nil) + require.NoError(t, err) + + res, err := c.BlockchainInfo(ctx, 0, 0) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) > 0) + + res, err = c.BlockchainInfo(ctx, 1, 1) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) == 1) + + res, err = c.BlockchainInfo(ctx, 1, 10000) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) < 100) + for _, m := range res.BlockMetas { + assert.NotNil(t, m) + } + + res, err = c.BlockchainInfo(ctx, 10000, 1) + require.NotNil(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "can't be greater than max") + }) + t.Run("BroadcastTxCommit", func(t *testing.T) { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.True(t, bres.CheckTx.IsOK()) + require.True(t, bres.DeliverTx.IsOK()) + + require.Equal(t, 0, pool.Size()) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + _, _, tx := MakeTxKV() + initMempoolSize := pool.Size() + bres, err := c.BroadcastTxSync(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + + require.Equal(t, initMempoolSize+1, pool.Size()) + + txs := pool.ReapMaxTxs(len(tx)) + require.EqualValues(t, tx, txs[0]) + pool.Flush() + }) + t.Run("CheckTx", func(t *testing.T) { + _, _, tx := MakeTxKV() + + res, err := c.CheckTx(ctx, tx) + require.NoError(t, err) + assert.Equal(t, abci.CodeTypeOK, res.Code) + + assert.Equal(t, 0, pool.Size(), "mempool must be empty") + }) + t.Run("Events", func(t *testing.T) { + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) + } + + t.Run("Header", func(t *testing.T) { + evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + _, ok := evt.(types.EventDataNewBlockHeader) + require.True(t, ok, "%d: %#v", i, evt) + // TODO: more checks... + }) + t.Run("Block", func(t *testing.T) { + const subscriber = "TestBlockEvents" + + eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) + require.NoError(t, err) + t.Cleanup(func() { + if err := c.UnsubscribeAll(ctx, subscriber); err != nil { + t.Error(err) + } + }) + + var firstBlockHeight int64 + for i := int64(0); i < 3; i++ { + event := <-eventCh + blockEvent, ok := event.Data.(types.EventDataNewBlock) + require.True(t, ok) + + block := blockEvent.Block + + if firstBlockHeight == 0 { + firstBlockHeight = block.Header.Height + } + + require.Equal(t, firstBlockHeight+i, block.Header.Height) + } + }) + t.Run("BroadcastTxAsync", func(t *testing.T) { + testTxEventsSent(ctx, t, "async", c) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + testTxEventsSent(ctx, t, "sync", c) + }) + }) + }) } } @@ -451,319 +516,268 @@ func getMempool(t *testing.T, srv service.Service) mempool.Mempool { return n.Mempool() } -func TestBroadcastTxCommit(t *testing.T) { +// these cases are roughly the same as the TestClientMethodCalls, but +// they have to loop over their clients in the individual test cases, +// so making a separate suite makes more sense, though isn't strictly +// speaking desirable. +func TestClientMethodCallsAdvanced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) - pool := getMempool(t, n) - for i, c := range GetClients(t, n, conf) { + + t.Run("UnconfirmedTxs", func(t *testing.T) { + _, _, tx := MakeTxKV() + ch := make(chan struct{}) + + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + for _, c := range GetClients(t, n, conf) { + mc := c.(client.MempoolClient) + limit := 1 + res, err := mc.UnconfirmedTxs(ctx, &limit) + require.NoError(t, err) + + assert.Equal(t, 1, res.Count) + assert.Equal(t, 1, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) + } + + pool.Flush() + }) + t.Run("NumUnconfirmedTxs", func(t *testing.T) { + ch := make(chan struct{}) + + pool := getMempool(t, n) + + _, _, tx := MakeTxKV() + + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + mempoolSize := pool.Size() + for i, c := range GetClients(t, n, conf) { + mc, ok := c.(client.MempoolClient) + require.True(t, ok, "%d", i) + res, err := mc.NumUnconfirmedTxs(ctx) + require.Nil(t, err, "%d: %+v", i, err) + + assert.Equal(t, mempoolSize, res.Count) + assert.Equal(t, mempoolSize, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + } + + pool.Flush() + }) + t.Run("Tx", func(t *testing.T) { + c := getHTTPClient(t, conf) + + // first we broadcast a tx _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.True(t, bres.CheckTx.IsOK()) - require.True(t, bres.DeliverTx.IsOK()) + require.Nil(t, err, "%+v", err) - require.Equal(t, 0, pool.Size()) - } -} + txHeight := bres.Height + txHash := bres.Hash -func TestUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + anotherTxHash := types.Tx("a different tx").Hash() - _, _, tx := MakeTxKV() - ch := make(chan *abci.Response, 1) + cases := []struct { + valid bool + prove bool + hash []byte + }{ + // only valid if correct hash provided + {true, false, txHash}, + {true, true, txHash}, + {false, false, anotherTxHash}, + {false, true, anotherTxHash}, + {false, false, nil}, + {false, true, nil}, + } - n, conf := NodeSuite(t) - pool := getMempool(t, n) - err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + for j, tc := range cases { + t.Run(fmt.Sprintf("Case%d", j), func(t *testing.T) { + // now we query for the tx. + // since there's only one tx, we know index=0. + ptx, err := c.Tx(ctx, tc.hash, tc.prove) - require.NoError(t, err) + if !tc.valid { + require.NotNil(t, err) + } else { + require.Nil(t, err, "%+v", err) + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - for _, c := range GetClients(t, n, conf) { - mc := c.(client.MempoolClient) - limit := 1 - res, err := mc.UnconfirmedTxs(ctx, &limit) - require.NoError(t, err) - - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) - } - - pool.Flush() -} - -func TestNumUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - - n, conf := NodeSuite(t) - ch := make(chan *abci.Response, 1) - pool := getMempool(t, n) - - err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) - require.NoError(t, err) - - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - mempoolSize := pool.Size() - for i, c := range GetClients(t, n, conf) { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs(ctx) - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, mempoolSize, res.Count) - assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - } - - pool.Flush() -} - -func TestCheckTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - pool := getMempool(t, n) - - for _, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - - res, err := c.CheckTx(ctx, tx) - require.NoError(t, err) - assert.Equal(t, abci.CodeTypeOK, res.Code) - - assert.Equal(t, 0, pool.Size(), "mempool must be empty") - } -} - -func TestTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // first we broadcast a tx - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - cases := []struct { - valid bool - prove bool - hash []byte - }{ - // only valid if correct hash provided - {true, false, txHash}, - {true, true, txHash}, - {false, false, anotherTxHash}, - {false, true, anotherTxHash}, - {false, false, nil}, - {false, true, nil}, - } - - for i, c := range GetClients(t, n, conf) { - for j, tc := range cases { - t.Logf("client %d, case %d", i, j) - - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(ctx, tc.hash, tc.prove) - - if !tc.valid { - require.NotNil(t, err) - } else { - require.Nil(t, err, "%+v", err) - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + // time to verify the proof + proof := ptx.Proof + if tc.prove && assert.EqualValues(t, tx, proof.Data) { + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + } + } + }) } - } + }) } - } -} + }) + t.Run("TxSearchWithTimeout", func(t *testing.T) { + timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) -func TestTxSearchWithTimeout(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) - - _, _, tx := MakeTxKV() - _, err := timeoutClient.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - - // query using a compositeKey (see kvstore application) - result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") -} - -func TestTxSearch(t *testing.T) { - n, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - - // first we broadcast a few txs - for i := 0; i < 10; i++ { _, _, tx := MakeTxKV() - _, err := c.BroadcastTxCommit(context.Background(), tx) + _, err := timeoutClient.BroadcastTxCommit(ctx, tx) require.NoError(t, err) - } - - // since we're not using an isolated test server, we'll have lingering transactions - // from other tests as well - result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") - require.NoError(t, err) - txCount := len(result.Txs) - - // pick out the last tx to have something to search for in tests - find := result.Txs[len(result.Txs)-1] - anotherTxHash := types.Tx("a different tx").Hash() - - for i, c := range GetClients(t, n, conf) { - t.Logf("client %d", i) - - // now we query for the tx. - result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - require.Equal(t, find.Hash, result.Txs[0].Hash) - - ptx := result.Txs[0] - assert.EqualValues(t, find.Height, ptx.Height) - assert.EqualValues(t, find.Tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, find.Hash, ptx.Hash) - - // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) - } - - // query by height - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - - // query for non existing tx - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) // query using a compositeKey (see kvstore application) - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") require.Nil(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + }) + t.Run("TxSearch", func(t *testing.T) { + t.Skip("Test Asserts Non-Deterministic Results") + c := getHTTPClient(t, conf) - // query using an index key - result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using an noindex key - result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using a compositeKey (see kvstore application) and height - result, err = c.TxSearch(context.Background(), - "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query a non existing tx with page 1 and txsPerPage 1 - perPage := 1 - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) - - // check sorting - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - // check pagination - perPage = 3 - var ( - seen = map[int64]bool{} - maxHeight int64 - pages = int(math.Ceil(float64(txCount) / float64(perPage))) - ) - - for page := 1; page <= pages; page++ { - page := page - result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") + // first we broadcast a few txs + for i := 0; i < 10; i++ { + _, _, tx := MakeTxKV() + _, err := c.BroadcastTxSync(ctx, tx) require.NoError(t, err) - if page < pages { - require.Len(t, result.Txs, perPage) - } else { - require.LessOrEqual(t, len(result.Txs), perPage) - } - require.Equal(t, txCount, result.TotalCount) - for _, tx := range result.Txs { - require.False(t, seen[tx.Height], - "Found duplicate height %v in page %v", tx.Height, page) - require.Greater(t, tx.Height, maxHeight, - "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) - seen[tx.Height] = true - maxHeight = tx.Height - } } - require.Len(t, seen, txCount) - } -} -func TestBatchedJSONRPCCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // since we're not using an isolated test server, we'll have lingering transactions + // from other tests as well + result, err := c.TxSearch(ctx, "tx.height >= 0", true, nil, nil, "asc") + require.NoError(t, err) + txCount := len(result.Txs) - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - testBatchedJSONRPCCalls(ctx, t, c) + // pick out the last tx to have something to search for in tests + find := result.Txs[len(result.Txs)-1] + anotherTxHash := types.Tx("a different tx").Hash() + + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + + // now we query for the tx. + result, err := c.TxSearch(ctx, fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + require.Equal(t, find.Hash, result.Txs[0].Hash) + + ptx := result.Txs[0] + assert.EqualValues(t, find.Height, ptx.Height) + assert.EqualValues(t, find.Tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, find.Hash, ptx.Hash) + + // time to verify the proof + if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { + assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + } + + // query by height + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + + // query for non existing tx + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // query using a compositeKey (see kvstore application) + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an index key + result, err = c.TxSearch(ctx, "app.index_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an noindex key + result, err = c.TxSearch(ctx, "app.noindex_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using a compositeKey (see kvstore application) and height + result, err = c.TxSearch(ctx, + "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query a non existing tx with page 1 and txsPerPage 1 + perPage := 1 + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // check sorting + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "asc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "desc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + // check pagination + perPage = 3 + var ( + seen = map[int64]bool{} + maxHeight int64 + pages = int(math.Ceil(float64(txCount) / float64(perPage))) + ) + + for page := 1; page <= pages; page++ { + page := page + result, err := c.TxSearch(ctx, "tx.height >= 1", false, &page, &perPage, "asc") + require.NoError(t, err) + if page < pages { + require.Len(t, result.Txs, perPage) + } else { + require.LessOrEqual(t, len(result.Txs), perPage) + } + require.Equal(t, txCount, result.TotalCount) + for _, tx := range result.Txs { + require.False(t, seen[tx.Height], + "Found duplicate height %v in page %v", tx.Height, page) + require.Greater(t, tx.Height, maxHeight, + "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) + seen[tx.Height] = true + maxHeight = tx.Height + } + } + require.Len(t, seen, txCount) + }) + } + }) } func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { @@ -814,60 +828,3 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, qresult1.Response.Value, v1) require.Equal(t, qresult2.Response.Value, v2) } - -func TestBatchedJSONRPCCallsCancellation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - _, _, tx1 := MakeTxKV() - _, _, tx2 := MakeTxKV() - - batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(ctx, tx1) - require.NoError(t, err) - _, err = batch.BroadcastTxCommit(ctx, tx2) - require.NoError(t, err) - // we should have 2 requests waiting - require.Equal(t, 2, batch.Count()) - // we want to make sure we cleared 2 pending requests - require.Equal(t, 2, batch.Clear()) - // now there should be no batched requests - require.Equal(t, 0, batch.Count()) -} - -func TestSendingEmptyRequestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - _, err := batch.Send(ctx) - require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") -} - -func TestClearingEmptyRequestBatch(t *testing.T) { - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") -} - -func TestConcurrentJSONRPCBatching(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - var wg sync.WaitGroup - c := getHTTPClient(t, conf) - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - defer wg.Done() - testBatchedJSONRPCCalls(ctx, t, c) - }() - } - wg.Wait() -} diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 9c73b8a8c..7733eb00c 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io/ioutil" "net" @@ -155,7 +156,7 @@ func New(remote string) (*Client, error) { // panics when client is nil. func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } parsedURL, err := newParsedURL(remote) From 4781d04d1808cc428e259915d5f820ef785f1d24 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 12 Oct 2021 17:40:59 -0400 Subject: [PATCH 089/174] node: always close database engine (#7113) --- internal/state/indexer/sink/kv/kv.go | 12 +-- node/node.go | 114 ++++++++++++++++++++------- node/node_test.go | 8 +- node/setup.go | 75 ++++++++++++++---- 4 files changed, 155 insertions(+), 54 deletions(-) diff --git a/internal/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go index 4c471b4d3..fe7068a1b 100644 --- a/internal/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -18,14 +18,16 @@ var _ indexer.EventSink = (*EventSink)(nil) // The EventSink is an aggregator for redirecting the call path of the tx/block kvIndexer. // For the implementation details please see the kv.go in the indexer/block and indexer/tx folder. type EventSink struct { - txi *kvt.TxIndex - bi *kvb.BlockerIndexer + txi *kvt.TxIndex + bi *kvb.BlockerIndexer + store dbm.DB } func NewEventSink(store dbm.DB) indexer.EventSink { return &EventSink{ - txi: kvt.NewTxIndex(store), - bi: kvb.New(store), + txi: kvt.NewTxIndex(store), + bi: kvb.New(store), + store: store, } } @@ -58,5 +60,5 @@ func (kves *EventSink) HasBlock(h int64) (bool, error) { } func (kves *EventSink) Stop() error { - return nil + return kves.store.Close() } diff --git a/node/node.go b/node/node.go index 6092f4c43..6ee0d30f9 100644 --- a/node/node.go +++ b/node/node.go @@ -23,6 +23,7 @@ import ( "github.com/tendermint/tendermint/internal/proxy" rpccore "github.com/tendermint/tendermint/internal/rpc/core" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" @@ -62,6 +63,7 @@ type nodeImpl struct { // services eventBus *types.EventBus // pub/sub for services + eventSinks []indexer.EventSink stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor service.Service // for block-syncing @@ -73,6 +75,7 @@ type nodeImpl struct { pexReactor service.Service // for exchanging peer addresses evidenceReactor service.Service rpcListeners []net.Listener // rpc servers + shutdownOps closer indexerService service.Service rpcEnv *rpccore.Environment prometheusSrv *http.Server @@ -106,6 +109,7 @@ func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, err } appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + return makeNode(cfg, pval, nodeKey, @@ -123,33 +127,41 @@ func makeNode(cfg *config.Config, clientCreator abciclient.Creator, genesisDocProvider genesisDocProvider, dbProvider config.DBProvider, - logger log.Logger) (service.Service, error) { + logger log.Logger, +) (service.Service, error) { + closers := []closer{} - blockStore, stateDB, err := initDBs(cfg, dbProvider) + blockStore, stateDB, dbCloser, err := initDBs(cfg, dbProvider) if err != nil { - return nil, err + return nil, combineCloseError(err, dbCloser) } + closers = append(closers, dbCloser) + stateStore := sm.NewStore(stateDB) genDoc, err := genesisDocProvider() if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } err = genDoc.ValidateAndComplete() if err != nil { - return nil, fmt.Errorf("error in genesis doc: %w", err) + return nil, combineCloseError( + fmt.Errorf("error in genesis doc: %w", err), + makeCloser(closers)) } state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } // EventBus and IndexerService must be started before the handshake because @@ -158,12 +170,13 @@ func makeNode(cfg *config.Config, // but before it indexed the txs, or, endblocker panicked) eventBus, err := createAndStartEventBus(logger) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } // If an address is provided, listen on the socket for a connection from an @@ -175,12 +188,16 @@ func makeNode(cfg *config.Config, case "grpc": privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, logger) if err != nil { - return nil, fmt.Errorf("error with private validator grpc client: %w", err) + return nil, combineCloseError( + fmt.Errorf("error with private validator grpc client: %w", err), + makeCloser(closers)) } default: privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, logger) if err != nil { - return nil, fmt.Errorf("error with private validator socket client: %w", err) + return nil, combineCloseError( + fmt.Errorf("error with private validator socket client: %w", err), + makeCloser(closers)) } } } @@ -188,10 +205,14 @@ func makeNode(cfg *config.Config, if cfg.Mode == config.ModeValidator { pubKey, err = privValidator.GetPubKey(context.TODO()) if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + return nil, combineCloseError(fmt.Errorf("can't get pubkey: %w", err), + makeCloser(closers)) + } if pubKey == nil { - return nil, errors.New("could not retrieve public key from private validator") + return nil, combineCloseError( + errors.New("could not retrieve public key from private validator"), + makeCloser(closers)) } } @@ -207,7 +228,8 @@ func makeNode(cfg *config.Config, consensusLogger := logger.With("module", "consensus") if !stateSync { if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } // Reload the state. It will have the Version.Consensus.App set by the @@ -215,7 +237,9 @@ func makeNode(cfg *config.Config, // what happened during block replay). state, err = stateStore.Load() if err != nil { - return nil, fmt.Errorf("cannot load state: %w", err) + return nil, combineCloseError( + fmt.Errorf("cannot load state: %w", err), + makeCloser(closers)) } } @@ -229,38 +253,45 @@ func makeNode(cfg *config.Config, // TODO: Use a persistent peer database. nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } p2pLogger := logger.With("module", "p2p") transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + closers = append(closers, peerCloser) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + makeCloser(closers)) } - nodeMetrics := - defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) + nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, peerManager, transport, getRouterConfig(cfg, proxyApp)) if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + makeCloser(closers)) } mpReactorShim, mpReactor, mp, err := createMempoolReactor( cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } evReactorShim, evReactor, evPool, err := createEvidenceReactor( cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, ) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } // make block executor for consensus and blockchain reactors to execute blocks @@ -287,7 +318,9 @@ func makeNode(cfg *config.Config, peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, ) if err != nil { - return nil, fmt.Errorf("could not create blockchain reactor: %w", err) + return nil, combineCloseError( + fmt.Errorf("could not create blockchain reactor: %w", err), + makeCloser(closers)) } // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. @@ -305,6 +338,7 @@ func makeNode(cfg *config.Config, ssLogger := logger.With("module", "statesync") ssReactorShim := p2p.NewReactorShim(ssLogger, "StateSyncShim", statesync.ChannelShims) channels := makeChannelsFromShims(router, statesync.ChannelShims) + peerUpdates := peerManager.Subscribe() stateSyncReactor := statesync.NewReactor( genDoc.ChainID, @@ -354,7 +388,8 @@ func makeNode(cfg *config.Config, pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } if cfg.RPC.PprofListenAddress != "" { @@ -387,6 +422,9 @@ func makeNode(cfg *config.Config, evidenceReactor: evReactor, indexerService: indexerService, eventBus: eventBus, + eventSinks: eventSinks, + + shutdownOps: makeCloser(closers), rpcEnv: &rpccore.Environment{ ProxyAppQuery: proxyApp.Query(), @@ -434,6 +472,7 @@ func makeSeedNode(cfg *config.Config, state, err := sm.MakeGenesisState(genDoc) if err != nil { return nil, err + } nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) @@ -446,15 +485,19 @@ func makeSeedNode(cfg *config.Config, p2pLogger := logger.With("module", "p2p") transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + closer) } router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, peerManager, transport, getRouterConfig(cfg, nil)) if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + closer) } var pexReactor service.Service @@ -468,7 +511,8 @@ func makeSeedNode(cfg *config.Config, pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { - return nil, err + return nil, combineCloseError(err, closer) + } if cfg.RPC.PprofListenAddress != "" { @@ -488,6 +532,8 @@ func makeSeedNode(cfg *config.Config, peerManager: peerManager, router: router, + shutdownOps: closer, + pexReactor: pexReactor, } node.BaseService = *service.NewBaseService(logger, "SeedNode", node) @@ -653,6 +699,12 @@ func (n *nodeImpl) OnStop() { } } + for _, es := range n.eventSinks { + if err := es.Stop(); err != nil { + n.Logger.Error("failed to stop event sink", "err", err) + } + } + if n.config.Mode != config.ModeSeed { // now stop the reactors if n.config.BlockSync.Enable { @@ -716,6 +768,10 @@ func (n *nodeImpl) OnStop() { // Error from closing listeners, or context timeout: n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) } + + } + if err := n.shutdownOps(); err != nil { + n.Logger.Error("problem shutting down additional services", "err", err) } } diff --git a/node/node_test.go b/node/node_test.go index d5ea39aa6..7039b09e1 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -592,7 +592,7 @@ func TestNodeSetEventSink(t *testing.T) { cfg.TxIndex.Indexer = []string{"kvv"} ns, err := newDefaultNode(cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("unsupported event sink type"), err) + assert.Contains(t, err.Error(), "unsupported event sink type") t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{} @@ -604,7 +604,7 @@ func TestNodeSetEventSink(t *testing.T) { cfg.TxIndex.Indexer = []string{"psql"} ns, err = newDefaultNode(cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) + assert.Contains(t, err.Error(), "the psql connection settings cannot be empty") t.Cleanup(cleanup(ns)) var psqlConn = "test" @@ -646,14 +646,14 @@ func TestNodeSetEventSink(t *testing.T) { cfg.TxIndex.PsqlConn = psqlConn ns, err = newDefaultNode(cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} cfg.TxIndex.PsqlConn = psqlConn ns, err = newDefaultNode(cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) t.Cleanup(cleanup(ns)) } diff --git a/node/setup.go b/node/setup.go index f6b9c028d..ac36b0858 100644 --- a/node/setup.go +++ b/node/setup.go @@ -2,7 +2,9 @@ package node import ( "bytes" + "errors" "fmt" + "strings" "time" dbm "github.com/tendermint/tm-db" @@ -35,16 +37,57 @@ import ( _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port ) -func initDBs(cfg *config.Config, dbProvider config.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { //nolint:lll - var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) - if err != nil { - return - } - blockStore = store.NewBlockStore(blockStoreDB) +type closer func() error - stateDB, err = dbProvider(&config.DBContext{ID: "state", Config: cfg}) - return +func makeCloser(cs []closer) closer { + return func() error { + errs := make([]string, 0, len(cs)) + for _, cl := range cs { + if err := cl(); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) >= 0 { + return errors.New(strings.Join(errs, "; ")) + } + return nil + } +} + +func combineCloseError(err error, cl closer) error { + if err == nil { + return cl() + } + + clerr := cl() + if clerr == nil { + return err + } + + return fmt.Errorf("error=%q closerError=%q", err.Error(), clerr.Error()) +} + +func initDBs( + cfg *config.Config, + dbProvider config.DBProvider, +) (*store.BlockStore, dbm.DB, closer, error) { + + blockStoreDB, err := dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) + if err != nil { + return nil, nil, func() error { return nil }, err + } + closers := []closer{} + blockStore := store.NewBlockStore(blockStoreDB) + closers = append(closers, blockStoreDB.Close) + + stateDB, err := dbProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, nil, makeCloser(closers), err + } + + closers = append(closers, stateDB.Close) + + return blockStore, stateDB, makeCloser(closers), nil } func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger) (proxy.AppConns, error) { @@ -354,7 +397,7 @@ func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, nodeID types.NodeID, -) (*p2p.PeerManager, error) { +) (*p2p.PeerManager, closer, error) { var maxConns uint16 @@ -385,7 +428,7 @@ func createPeerManager( for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) @@ -395,28 +438,28 @@ func createPeerManager( for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.BootstrapPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) } peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg}) if err != nil { - return nil, err + return nil, func() error { return nil }, err } peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, peerDB.Close, fmt.Errorf("failed to create peer manager: %w", err) } for _, peer := range peers { if _, err := peerManager.Add(peer); err != nil { - return nil, fmt.Errorf("failed to add peer %q: %w", peer, err) + return nil, peerDB.Close, fmt.Errorf("failed to add peer %q: %w", peer, err) } } - return peerManager, nil + return peerManager, peerDB.Close, nil } func createRouter( From 6538776e6a5c9b9a378fd1424062bf854b8e2f3d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 12 Oct 2021 15:29:06 -0700 Subject: [PATCH 090/174] build: Fix build-docker to include the full context. (#7114) Fixes #7068. The build-docker rule relies on being able to run make build-linux, but did not pull the Makefile into the build context. There are various ways to fix this, but this was probably the smallest. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c9b6951f4..7f5dc677c 100644 --- a/Makefile +++ b/Makefile @@ -227,13 +227,13 @@ build-docs: build-docker: build-linux cp $(BUILDDIR)/tendermint DOCKER/tendermint - docker build --label=tendermint --tag="tendermint/tendermint" DOCKER + docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile . rm -rf DOCKER/tendermint .PHONY: build-docker ############################################################################### -### Mocks ### +### Mocks ### ############################################################################### mockery: From 4fe0f262d4a110505cfbfde02c7d413a084d6661 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 13 Oct 2021 14:28:43 +0200 Subject: [PATCH 091/174] changelog: add 0.34.14 updates (#7117) This PR adds the 0.34.14 changes to the changelog in master --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2eda330f2..e1ef2201a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -178,6 +178,24 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad, - [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) - [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) +## v0.34.14 + +This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash. + +### FEATURES + +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters). +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters + +### IMPROVEMENTS + +- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters) + +### BUG FIXES + +- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair). +- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish). + ## v0.34.13 *September 6, 2021* From 164de918420c88e5d7f8c207c654839dd5bf06bf Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 13 Oct 2021 14:29:03 -0400 Subject: [PATCH 092/174] rpc: move evidence tests to shared fixtures (#7119) This is follow on to the work in #7112. --- rpc/client/evidence_test.go | 63 ------------------------------------- rpc/client/rpc_test.go | 55 ++++++++++++++++++++++++++++++++ rpc/test/helpers.go | 1 + 3 files changed, 56 insertions(+), 63 deletions(-) diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 98b071b91..ae4e29f52 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -1,17 +1,12 @@ package client_test import ( - "bytes" "context" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" @@ -113,64 +108,6 @@ func makeEvidences( return correct, fakes } -func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, config := NodeSuite(t) - chainID := config.ChainID() - - pv, err := privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) - require.NoError(t, err) - - for i, c := range GetClients(t, n, config) { - correct, fakes := makeEvidences(t, pv, chainID) - t.Logf("client %d", i) - - // make sure that the node has produced enough blocks - waitForBlock(ctx, t, c, 2) - - result, err := c.BroadcastEvidence(ctx, correct) - require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) - assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") - - status, err := c.Status(ctx) - require.NoError(t, err) - err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) - require.NoError(t, err) - - ed25519pub := pv.Key.PubKey.(ed25519.PubKey) - rawpub := ed25519pub.Bytes() - result2, err := c.ABCIQuery(ctx, "/val", rawpub) - require.NoError(t, err) - qres := result2.Response - require.True(t, qres.IsOK()) - - var v abci.ValidatorUpdate - err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) - require.NoError(t, err, "Error reading query result, value %v", qres.Value) - - pk, err := encoding.PubKeyFromProto(v.PubKey) - require.NoError(t, err) - - require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) - require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) - - for _, fake := range fakes { - _, err := c.BroadcastEvidence(ctx, fake) - require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) - } - } -} - -func TestBroadcastEmptyEvidence(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - _, err := c.BroadcastEvidence(context.Background(), nil) - assert.Error(t, err) - } -} - func waitForBlock(ctx context.Context, t *testing.T, c client.Client, height int64) { timer := time.NewTimer(0 * time.Millisecond) defer timer.Stop() diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index d7554d174..38766e047 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,6 +1,7 @@ package client_test import ( + "bytes" "context" "encoding/base64" "fmt" @@ -16,11 +17,14 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/mempool" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" @@ -193,8 +197,13 @@ func TestClientMethodCalls(t *testing.T) { defer cancel() n, conf := NodeSuite(t) + // for broadcast tx tests pool := getMempool(t, n) + // for evidence tests + pv, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + require.NoError(t, err) + for i, c := range GetClients(t, n, conf) { t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { t.Run("Status", func(t *testing.T) { @@ -503,6 +512,52 @@ func TestClientMethodCalls(t *testing.T) { testTxEventsSent(ctx, t, "sync", c) }) }) + t.Run("Evidence", func(t *testing.T) { + t.Run("BraodcastDuplicateVote", func(t *testing.T) { + chainID := conf.ChainID() + + correct, fakes := makeEvidences(t, pv, chainID) + + // make sure that the node has produced enough blocks + waitForBlock(ctx, t, c, 2) + + result, err := c.BroadcastEvidence(ctx, correct) + require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) + assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") + + status, err := c.Status(ctx) + require.NoError(t, err) + err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) + require.NoError(t, err) + + ed25519pub := pv.Key.PubKey.(ed25519.PubKey) + rawpub := ed25519pub.Bytes() + result2, err := c.ABCIQuery(ctx, "/val", rawpub) + require.NoError(t, err) + qres := result2.Response + require.True(t, qres.IsOK()) + + var v abci.ValidatorUpdate + err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) + require.NoError(t, err, "Error reading query result, value %v", qres.Value) + + pk, err := encoding.PubKeyFromProto(v.PubKey) + require.NoError(t, err) + + require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) + + for _, fake := range fakes { + _, err := c.BroadcastEvidence(ctx, fake) + require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) + } + + }) + t.Run("BroadcastEmpty", func(t *testing.T) { + _, err := c.BroadcastEvidence(ctx, nil) + assert.Error(t, err) + }) + }) }) } } diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 01aa83b4f..e2f47839c 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -73,6 +73,7 @@ func CreateConfig(testName string) *config.Config { tm, rpc, grpc := makeAddrs() c.P2P.ListenAddress = tm c.RPC.ListenAddress = rpc + c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} c.RPC.GRPCListenAddress = grpc return c From 36a1acff520b9f30925de9ad8f27189653709ea4 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Wed, 13 Oct 2021 16:52:25 -0400 Subject: [PATCH 093/174] internal/proxy: add initial set of abci metrics (#7115) This PR adds an initial set of metrics for use ABCI. The initial metrics enable the calculation of timing histograms and call counts for each of the ABCI methods. The metrics are also labeled as either 'sync' or 'async' to determine if the method call was performed using ABCI's `*Async` methods. An example of these metrics is included here for reference: ``` tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.0001"} 0 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.0004"} 5 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.002"} 12 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.009"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.02"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.1"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="0.65"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="2"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="6"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="25"} 13 tendermint_abci_connection_method_timing_bucket{chain_id="ci",method="commit",type="sync",le="+Inf"} 13 tendermint_abci_connection_method_timing_sum{chain_id="ci",method="commit",type="sync"} 0.007802058000000001 tendermint_abci_connection_method_timing_count{chain_id="ci",method="commit",type="sync"} 13 ``` These metrics can easily be graphed using prometheus's `histogram_quantile(...)` method to pick out a particular quantile to graph or examine. I chose buckets that were somewhat of an estimate of expected range of times for ABCI operations. They start at .0001 seconds and range to 25 seconds. The hope is that this range captures enough possible times to be useful for us and operators. --- internal/blocksync/v0/reactor_test.go | 2 +- internal/consensus/metrics.go | 9 +++++ internal/consensus/replay_file.go | 2 +- internal/consensus/replay_stubs.go | 2 +- internal/consensus/replay_test.go | 12 +++---- internal/consensus/wal_generator.go | 2 +- internal/proxy/app_conn.go | 43 +++++++++++++++++++++--- internal/proxy/metrics.go | 47 +++++++++++++++++++++++++++ internal/proxy/multi_app_conn.go | 16 +++++---- internal/proxy/multi_app_conn_test.go | 4 +-- internal/state/execution_test.go | 10 +++--- internal/state/helpers_test.go | 2 +- node/node.go | 9 +++-- node/node_test.go | 6 ++-- node/setup.go | 5 +-- 15 files changed, 134 insertions(+), 37 deletions(-) create mode 100644 internal/proxy/metrics.go diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index e947581aa..312b1cb39 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -98,7 +98,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, t.Helper() rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{})) + rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics()) require.NoError(t, rts.app[nodeID].Start()) blockDB := dbm.NewMemDB() diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index bceac4942..a75f1505c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -61,6 +61,9 @@ type Metrics struct { // Number of blockparts transmitted by peer. BlockParts metrics.Counter + + // Histogram of time taken per step annotated with reason that the step proceeded. + StepTime metrics.Histogram } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -187,6 +190,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_parts", Help: "Number of blockparts transmitted by peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + StepTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "step_time", + Help: "Time spent per step.", + }, append(labels, "step", "reason")).With(labelsAndValues...), } } diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index d009533ba..f60dff531 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -312,7 +312,7 @@ func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.Consensu // Create proxyAppConn connection (consensus, mempool, query) clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err = proxyApp.Start() if err != nil { tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index bc8c11cd9..1235baccb 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -64,7 +64,7 @@ func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy if err != nil { panic(err) } - return proxy.NewAppConnConsensus(cli) + return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()) } type mockProxyApp struct { diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 97017985d..0d0ae36e8 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -745,7 +745,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2) + proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) stateDB1 := dbm.NewMemDB() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) @@ -765,7 +765,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) handshaker := NewHandshaker(stateStore, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2) + proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } @@ -893,7 +893,7 @@ func buildTMStateFromChain( defer kvstoreApp.Close() clientCreator := abciclient.NewLocalCreator(kvstoreApp) - proxyApp := proxy.NewAppConns(clientCreator) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { panic(err) } @@ -960,7 +960,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { { app := &badApp{numBlocks: 3, allHashesAreWrong: true} clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) t.Cleanup(func() { @@ -984,7 +984,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) t.Cleanup(func() { @@ -1243,7 +1243,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) handshaker := NewHandshaker(stateStore, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 7e31188fa..4b4375498 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -65,7 +65,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) - proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app)) + proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), proxy.NopMetrics()) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go index ca2c7c109..803875646 100644 --- a/internal/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -2,7 +2,9 @@ package proxy import ( "context" + "time" + "github.com/go-kit/kit/metrics" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" ) @@ -56,11 +58,13 @@ type AppConnSnapshot interface { // Implements AppConnConsensus (subset of abciclient.Client) type appConnConsensus struct { + metrics *Metrics appConn abciclient.Client } -func NewAppConnConsensus(appConn abciclient.Client) AppConnConsensus { +func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { return &appConnConsensus{ + metrics: metrics, appConn: appConn, } } @@ -77,6 +81,7 @@ func (app *appConnConsensus) InitChainSync( ctx context.Context, req types.RequestInitChain, ) (*types.ResponseInitChain, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() return app.appConn.InitChainSync(ctx, req) } @@ -84,6 +89,7 @@ func (app *appConnConsensus) BeginBlockSync( ctx context.Context, req types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "begin_block", "type", "sync"))() return app.appConn.BeginBlockSync(ctx, req) } @@ -91,6 +97,7 @@ func (app *appConnConsensus) DeliverTxAsync( ctx context.Context, req types.RequestDeliverTx, ) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "async"))() return app.appConn.DeliverTxAsync(ctx, req) } @@ -98,10 +105,12 @@ func (app *appConnConsensus) EndBlockSync( ctx context.Context, req types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() return app.appConn.EndBlockSync(ctx, req) } func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() return app.appConn.CommitSync(ctx) } @@ -109,11 +118,13 @@ func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCom // Implements AppConnMempool (subset of abciclient.Client) type appConnMempool struct { + metrics *Metrics appConn abciclient.Client } -func NewAppConnMempool(appConn abciclient.Client) AppConnMempool { +func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool { return &appConnMempool{ + metrics: metrics, appConn: appConn, } } @@ -127,18 +138,22 @@ func (app *appConnMempool) Error() error { } func (app *appConnMempool) FlushAsync(ctx context.Context) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "async"))() return app.appConn.FlushAsync(ctx) } func (app *appConnMempool) FlushSync(ctx context.Context) error { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() return app.appConn.FlushSync(ctx) } func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "async"))() return app.appConn.CheckTxAsync(ctx, req) } func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() return app.appConn.CheckTxSync(ctx, req) } @@ -146,11 +161,13 @@ func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestChe // Implements AppConnQuery (subset of abciclient.Client) type appConnQuery struct { + metrics *Metrics appConn abciclient.Client } -func NewAppConnQuery(appConn abciclient.Client) AppConnQuery { +func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery { return &appConnQuery{ + metrics: metrics, appConn: appConn, } } @@ -160,14 +177,17 @@ func (app *appConnQuery) Error() error { } func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() return app.appConn.EchoSync(ctx, msg) } func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() return app.appConn.InfoSync(ctx, req) } func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() return app.appConn.QuerySync(ctx, reqQuery) } @@ -175,11 +195,13 @@ func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQu // Implements AppConnSnapshot (subset of abciclient.Client) type appConnSnapshot struct { + metrics *Metrics appConn abciclient.Client } -func NewAppConnSnapshot(appConn abciclient.Client) AppConnSnapshot { +func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot { return &appConnSnapshot{ + metrics: metrics, appConn: appConn, } } @@ -192,6 +214,7 @@ func (app *appConnSnapshot) ListSnapshotsSync( ctx context.Context, req types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() return app.appConn.ListSnapshotsSync(ctx, req) } @@ -199,17 +222,29 @@ func (app *appConnSnapshot) OfferSnapshotSync( ctx context.Context, req types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() return app.appConn.OfferSnapshotSync(ctx, req) } func (app *appConnSnapshot) LoadSnapshotChunkSync( ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() return app.appConn.LoadSnapshotChunkSync(ctx, req) } func (app *appConnSnapshot) ApplySnapshotChunkSync( ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() return app.appConn.ApplySnapshotChunkSync(ctx, req) } + +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds ellapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(m metrics.Histogram) func() { + start := time.Now() + return func() { m.Observe(time.Since(start).Seconds()) } +} diff --git a/internal/proxy/metrics.go b/internal/proxy/metrics.go new file mode 100644 index 000000000..99bd7d7b0 --- /dev/null +++ b/internal/proxy/metrics.go @@ -0,0 +1,47 @@ +package proxy + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "abci_connection" +) + +// Metrics contains the prometheus metrics exposed by the proxy package. +type Metrics struct { + MethodTiming metrics.Histogram +} + +// PrometheusMetrics constructs a Metrics instance that collects metrics samples. +// The resulting metrics will be prefixed with namespace and labeled with the +// defaultLabelsAndValues. defaultLabelsAndValues must be a list of string pairs +// where the first of each pair is the label and the second is the value. +func PrometheusMetrics(namespace string, defaultLabelsAndValues ...string) *Metrics { + defaultLabels := []string{} + for i := 0; i < len(defaultLabelsAndValues); i += 2 { + defaultLabels = append(defaultLabels, defaultLabelsAndValues[i]) + } + return &Metrics{ + MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "method_timing", + Help: "ABCI Method Timing", + Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, + }, append(defaultLabels, []string{"method", "type"}...)).With(defaultLabelsAndValues...), + } +} + +// NopMetrics constructs a Metrics instance that discards all samples and is suitable +// for testing. +func NopMetrics() *Metrics { + return &Metrics{ + MethodTiming: discard.NewHistogram(), + } +} diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go index df49df287..0bcc64af6 100644 --- a/internal/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -33,8 +33,8 @@ type AppConns interface { } // NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator abciclient.Creator) AppConns { - return NewMultiAppConn(clientCreator) +func NewAppConns(clientCreator abciclient.Creator, metrics *Metrics) AppConns { + return NewMultiAppConn(clientCreator, metrics) } // multiAppConn implements AppConns. @@ -45,6 +45,7 @@ func NewAppConns(clientCreator abciclient.Creator) AppConns { type multiAppConn struct { service.BaseService + metrics *Metrics consensusConn AppConnConsensus mempoolConn AppConnMempool queryConn AppConnQuery @@ -59,8 +60,9 @@ type multiAppConn struct { } // NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator abciclient.Creator) AppConns { +func NewMultiAppConn(clientCreator abciclient.Creator, metrics *Metrics) AppConns { multiAppConn := &multiAppConn{ + metrics: metrics, clientCreator: clientCreator, } multiAppConn.BaseService = *service.NewBaseService(nil, "multiAppConn", multiAppConn) @@ -89,7 +91,7 @@ func (app *multiAppConn) OnStart() error { return err } app.queryConnClient = c - app.queryConn = NewAppConnQuery(c) + app.queryConn = NewAppConnQuery(c, app.metrics) c, err = app.abciClientFor(connSnapshot) if err != nil { @@ -97,7 +99,7 @@ func (app *multiAppConn) OnStart() error { return err } app.snapshotConnClient = c - app.snapshotConn = NewAppConnSnapshot(c) + app.snapshotConn = NewAppConnSnapshot(c, app.metrics) c, err = app.abciClientFor(connMempool) if err != nil { @@ -105,7 +107,7 @@ func (app *multiAppConn) OnStart() error { return err } app.mempoolConnClient = c - app.mempoolConn = NewAppConnMempool(c) + app.mempoolConn = NewAppConnMempool(c, app.metrics) c, err = app.abciClientFor(connConsensus) if err != nil { @@ -113,7 +115,7 @@ func (app *multiAppConn) OnStart() error { return err } app.consensusConnClient = c - app.consensusConn = NewAppConnConsensus(c) + app.consensusConn = NewAppConnConsensus(c, app.metrics) // Kill Tendermint if the ABCI application crashes. go app.killTMOnClientError() diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go index 55bcc7524..25ed692ab 100644 --- a/internal/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -31,7 +31,7 @@ func TestAppConns_Start_Stop(t *testing.T) { return clientMock, nil } - appConns := NewAppConns(creator) + appConns := NewAppConns(creator, NopMetrics()) err := appConns.Start() require.NoError(t, err) @@ -72,7 +72,7 @@ func TestAppConns_Failure(t *testing.T) { return clientMock, nil } - appConns := NewAppConns(creator) + appConns := NewAppConns(creator, NopMetrics()) err := appConns.Start() require.NoError(t, err) diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 8eff0430d..a66b677f9 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -36,7 +36,7 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -61,7 +61,7 @@ func TestApplyBlock(t *testing.T) { func TestBeginBlockValidators(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // no need to check error again @@ -124,7 +124,7 @@ func TestBeginBlockValidators(t *testing.T) { func TestBeginBlockByzantineValidators(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -349,7 +349,7 @@ func TestUpdateValidators(t *testing.T) { func TestEndBlockValidatorUpdates(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -422,7 +422,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index f8ed77b32..0cedebb00 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -31,7 +31,7 @@ type paramsChangeTestCase struct { func newTestApp() proxy.AppConns { app := &testApp{} cc := abciclient.NewLocalCreator(app) - return proxy.NewAppConns(cc) + return proxy.NewAppConns(cc, proxy.NopMetrics()) } func makeAndCommitGoodBlock( diff --git a/node/node.go b/node/node.go index 6ee0d30f9..e15bde553 100644 --- a/node/node.go +++ b/node/node.go @@ -157,8 +157,10 @@ func makeNode(cfg *config.Config, } + nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) @@ -268,8 +270,6 @@ func makeNode(cfg *config.Config, makeCloser(closers)) } - nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) - router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, peerManager, transport, getRouterConfig(cfg, proxyApp)) if err != nil { @@ -988,6 +988,7 @@ type nodeMetrics struct { mempool *mempool.Metrics state *sm.Metrics statesync *statesync.Metrics + proxy *proxy.Metrics } // metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. @@ -1004,6 +1005,7 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), } } return &nodeMetrics{ @@ -1012,6 +1014,7 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { mempool.NopMetrics(), sm.NopMetrics(), statesync.NopMetrics(), + proxy.NopMetrics(), } } } diff --git a/node/node_test.go b/node/node_test.go index 7039b09e1..19f27a640 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -240,7 +240,7 @@ func TestCreateProposalBlock(t *testing.T) { cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -335,7 +335,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -400,7 +400,7 @@ func TestMaxProposalBlockSize(t *testing.T) { cfg := config.ResetTestRoot("node_create_proposal") defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests diff --git a/node/setup.go b/node/setup.go index ac36b0858..ebaa576c2 100644 --- a/node/setup.go +++ b/node/setup.go @@ -90,8 +90,9 @@ func initDBs( return blockStore, stateDB, makeCloser(closers), nil } -func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator) +// nolint:lll +func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { + proxyApp := proxy.NewAppConns(clientCreator, metrics) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { return nil, fmt.Errorf("error starting proxy app connections: %v", err) From ff7b0e638eca88077a6eacbd33974abd8a6d92c9 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Wed, 13 Oct 2021 17:18:44 -0400 Subject: [PATCH 094/174] p2p: fix priority queue bytes pending calculation (#7120) This metric describes itself as 'pending' but never actual decrements when the messages are removed from the queue. This change fixes that by decrementing the metric when the data is removed from the queue. --- internal/p2p/pqueue.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index e4560c7bd..b19436e15 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -167,13 +167,12 @@ func (s *pqScheduler) process() { timestamp: time.Now().UTC(), } - s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) - // enqueue // Check if we have sufficient capacity to simply enqueue the incoming // Envelope. if s.size+pqEnv.size <= s.capacity { + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) // enqueue the incoming Envelope s.push(pqEnv) } else { @@ -213,6 +212,8 @@ func (s *pqScheduler) process() { "capacity", s.capacity, ) + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size)) + // dequeue/drop from the priority queue heap.Remove(s.pq, pqEnvTmp.index) @@ -257,6 +258,8 @@ func (s *pqScheduler) process() { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, "peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) + s.metrics.PeerPendingSendBytes.With( + "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { case s.dequeueCh <- pqEnv.envelope: case <-s.closer.Done(): From 86f00135dd5d14a382a033c705062839d75e66cf Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 13 Oct 2021 15:01:01 -0700 Subject: [PATCH 095/174] rpc: Remove the deprecated gRPC interface to the RPC service (#7121) This change removes the partial gRPC interface to the RPC service, which was deprecated in resolution of #6718. Details: - rpc: Remove the client and server interfaces and proto definitions. - Remove the gRPC settings from the config library. - Remove gRPC setup for the RPC service in the node startup. - Fix various test helpers to remove gRPC bits. - Remove the --rpc.grpc-laddr flag from the CLI. Note that to satisfy the protobuf interface check, this change also includes a temporary edit to buf.yaml, that I will revert after this is merged. --- CHANGELOG_PENDING.md | 2 + Makefile | 6 +- buf.yaml | 3 + cmd/tendermint/commands/run_node.go | 4 - config/config.go | 30 +- config/config_test.go | 1 - config/toml.go | 20 +- internal/consensus/wal_generator.go | 18 +- node/node.go | 29 - proto/tendermint/rpc/grpc/types.proto | 32 - rpc/grpc/api.go | 41 -- rpc/grpc/client_server.go | 40 -- rpc/grpc/grpc_test.go | 45 -- rpc/grpc/types.pb.go | 924 -------------------------- rpc/test/helpers.go | 36 +- test/app/grpc_client.go | 42 -- 16 files changed, 33 insertions(+), 1240 deletions(-) delete mode 100644 proto/tendermint/rpc/grpc/types.proto delete mode 100644 rpc/grpc/api.go delete mode 100644 rpc/grpc/client_server.go delete mode 100644 rpc/grpc/grpc_test.go delete mode 100644 rpc/grpc/types.pb.go delete mode 100644 test/app/grpc_client.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0f8ba5ea7..9d8148783 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -12,6 +12,8 @@ Special thanks to external contributors on this release: - CLI/RPC/Config + - [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair). + - Apps - P2P Protocol diff --git a/Makefile b/Makefile index 7f5dc677c..2bac7f5bf 100644 --- a/Makefile +++ b/Makefile @@ -89,7 +89,7 @@ proto-gen: .PHONY: proto-gen proto-lint: - @$(DOCKER_BUF) check lint --error-format=json + @$(DOCKER_BUF) lint --error-format=json .PHONY: proto-lint proto-format: @@ -98,11 +98,11 @@ proto-format: .PHONY: proto-format proto-check-breaking: - @$(DOCKER_BUF) check breaking --against-input .git#branch=master + @$(DOCKER_BUF) breaking --against .git#branch=master .PHONY: proto-check-breaking proto-check-breaking-ci: - @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master + @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master .PHONY: proto-check-breaking-ci ############################################################################### diff --git a/buf.yaml b/buf.yaml index cc4aced57..98feb708e 100644 --- a/buf.yaml +++ b/buf.yaml @@ -14,3 +14,6 @@ lint: breaking: use: - FILE + ignore: + # TODO(#7075): Remove after file deletion is merged. + - tendermint/rpc diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 0c6d3271e..a5fa72ed5 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -70,10 +70,6 @@ func AddNodeFlags(cmd *cobra.Command) { // rpc flags cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") - cmd.Flags().String( - "rpc.grpc-laddr", - config.RPC.GRPCListenAddress, - "GRPC listen address (BroadcastTx only). Port required") cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") diff --git a/config/config.go b/config/config.go index 0e204b6e6..a9b2576fd 100644 --- a/config/config.go +++ b/config/config.go @@ -455,24 +455,10 @@ type RPCConfig struct { // A list of non simple headers the client is allowed to use with cross-domain requests. CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"` - // TCP or UNIX socket address for the gRPC server to listen on - // NOTE: This server only supports /broadcast_tx_commit - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCListenAddress string `mapstructure:"grpc-laddr"` - - // Maximum number of simultaneous connections. - // Does not include RPC (HTTP&WebSocket) connections. See max-open-connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"` - // Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool Unsafe bool `mapstructure:"unsafe"` // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc-max-open-connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. @@ -486,7 +472,7 @@ type RPCConfig struct { MaxSubscriptionClients int `mapstructure:"max-subscription-clients"` // Maximum number of unique queries a given client can /subscribe to - // If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set + // If you're using a Local RPC client and /broadcast_tx_commit, set this // to the estimated maximum number of broadcast_tx_commit calls per block. MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"` @@ -527,12 +513,10 @@ type RPCConfig struct { // DefaultRPCConfig returns a default configuration for the RPC server func DefaultRPCConfig() *RPCConfig { return &RPCConfig{ - ListenAddress: "tcp://127.0.0.1:26657", - CORSAllowedOrigins: []string{}, - CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, - CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, - GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, + ListenAddress: "tcp://127.0.0.1:26657", + CORSAllowedOrigins: []string{}, + CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, + CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, Unsafe: false, MaxOpenConnections: 900, @@ -553,7 +537,6 @@ func DefaultRPCConfig() *RPCConfig { func TestRPCConfig() *RPCConfig { cfg := DefaultRPCConfig() cfg.ListenAddress = "tcp://127.0.0.1:36657" - cfg.GRPCListenAddress = "tcp://127.0.0.1:36658" cfg.Unsafe = true return cfg } @@ -561,9 +544,6 @@ func TestRPCConfig() *RPCConfig { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *RPCConfig) ValidateBasic() error { - if cfg.GRPCMaxOpenConnections < 0 { - return errors.New("grpc-max-open-connections can't be negative") - } if cfg.MaxOpenConnections < 0 { return errors.New("max-open-connections can't be negative") } diff --git a/config/config_test.go b/config/config_test.go index db0fb5967..181314492 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -66,7 +66,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { assert.NoError(t, cfg.ValidateBasic()) fieldsToTest := []string{ - "GRPCMaxOpenConnections", "MaxOpenConnections", "MaxSubscriptionClients", "MaxSubscriptionsPerClient", diff --git a/config/toml.go b/config/toml.go index 1159640fb..3be385060 100644 --- a/config/toml.go +++ b/config/toml.go @@ -193,26 +193,10 @@ cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }} # A list of non simple headers the client is allowed to use with cross-domain requests cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-laddr = "{{ .RPC.GRPCListenAddress }}" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }} - # Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = {{ .RPC.Unsafe }} # Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc-max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. @@ -226,8 +210,8 @@ max-open-connections = {{ .RPC.MaxOpenConnections }} max-subscription-clients = {{ .RPC.MaxSubscriptionClients }} # Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. +# If you're using a Local RPC client and /broadcast_tx_commit, set this +# to the estimated maximum number of broadcast_tx_commit calls per block. max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }} # How long to wait for a tx to be committed during /broadcast_tx_commit. diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 4b4375498..f81234f97 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -145,22 +145,22 @@ func randPort() int { return base + mrand.Intn(spread) } -func makeAddrs() (string, string, string) { +// makeAddrs constructs local TCP addresses for node services. +// It uses consecutive ports from a random starting point, so that concurrent +// instances are less likely to collide. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" start := randPort() - return fmt.Sprintf("tcp://127.0.0.1:%d", start), - fmt.Sprintf("tcp://127.0.0.1:%d", start+1), - fmt.Sprintf("tcp://127.0.0.1:%d", start+2) + return fmt.Sprintf(addrTemplate, start), fmt.Sprintf(addrTemplate, start+1) } // getConfig returns a config for test cases func getConfig(t *testing.T) *config.Config { c := config.ResetTestRoot(t.Name()) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc - c.RPC.GRPCListenAddress = grpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr return c } diff --git a/node/node.go b/node/node.go index e15bde553..b6c47a9ce 100644 --- a/node/node.go +++ b/node/node.go @@ -34,7 +34,6 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" - grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" @@ -869,35 +868,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { listeners[i] = listener } - // we expose a simplified api over grpc for convenience to app devs - grpcListenAddr := n.config.RPC.GRPCListenAddress - if grpcListenAddr != "" { - cfg := rpcserver.DefaultConfig() - cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes - cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections - cfg.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - listener, err := rpcserver.Listen(grpcListenAddr, cfg.MaxOpenConnections) - if err != nil { - return nil, err - } - go func() { - if err := grpccore.StartGRPCServer(n.rpcEnv, listener); err != nil { - n.Logger.Error("Error starting gRPC server", "err", err) - } - }() - listeners = append(listeners, listener) - - } - return listeners, nil - } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto deleted file mode 100644 index ee948a406..000000000 --- a/proto/tendermint/rpc/grpc/types.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; -package tendermint.rpc.grpc; -option go_package = "github.com/tendermint/tendermint/rpc/grpc;coregrpc"; - -import "tendermint/abci/types.proto"; - -//---------------------------------------- -// Request types - -message RequestPing {} - -message RequestBroadcastTx { - bytes tx = 1; -} - -//---------------------------------------- -// Response types - -message ResponsePing {} - -message ResponseBroadcastTx { - tendermint.abci.ResponseCheckTx check_tx = 1; - tendermint.abci.ResponseDeliverTx deliver_tx = 2; -} - -//---------------------------------------- -// Service Definition - -service BroadcastAPI { - rpc Ping(RequestPing) returns (ResponsePing); - rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); -} diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go deleted file mode 100644 index 4a4a13687..000000000 --- a/rpc/grpc/api.go +++ /dev/null @@ -1,41 +0,0 @@ -package coregrpc - -import ( - "context" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/rpc/core" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -type broadcastAPI struct { - env *core.Environment -} - -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - // kvstore so we can check if the server is up - return &ResponsePing{}, nil -} - -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - // NOTE: there's no way to get client's remote address - // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go - res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) - if err != nil { - return nil, err - } - - return &ResponseBroadcastTx{ - CheckTx: &abci.ResponseCheckTx{ - Code: res.CheckTx.Code, - Data: res.CheckTx.Data, - Log: res.CheckTx.Log, - }, - DeliverTx: &abci.ResponseDeliverTx{ - Code: res.DeliverTx.Code, - Data: res.DeliverTx.Data, - Log: res.DeliverTx.Log, - }, - }, nil -} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go deleted file mode 100644 index e5164dc64..000000000 --- a/rpc/grpc/client_server.go +++ /dev/null @@ -1,40 +0,0 @@ -package coregrpc - -import ( - "context" - "net" - - "google.golang.org/grpc" - - "github.com/tendermint/tendermint/internal/rpc/core" - tmnet "github.com/tendermint/tendermint/libs/net" -) - -// Config is an gRPC server configuration. -type Config struct { - MaxOpenConnections int -} - -// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given -// net.Listener. -// NOTE: This function blocks - you may want to call it in a go-routine. -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36 -func StartGRPCServer(env *core.Environment, ln net.Listener) error { - grpcServer := grpc.NewServer() - RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) - return grpcServer.Serve(ln) -} - -// StartGRPCClient dials the gRPC server using protoAddr and returns a new -// BroadcastAPIClient. -func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) - if err != nil { - panic(err) - } - return NewBroadcastAPIClient(conn) -} - -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { - return tmnet.Connect(addr) -} diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go deleted file mode 100644 index e680d195e..000000000 --- a/rpc/grpc/grpc_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package coregrpc_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/service" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -func NodeSuite(t *testing.T) (service.Service, *config.Config) { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - - conf := rpctest.CreateConfig(t.Name()) - - // start a tendermint node in the background to test against - app := kvstore.NewApplication() - - node, closer, err := rpctest.StartTendermint(ctx, conf, app) - require.NoError(t, err) - t.Cleanup(func() { - _ = closer(ctx) - cancel() - }) - return node, conf -} - -func TestBroadcastTx(t *testing.T) { - _, conf := NodeSuite(t) - - res, err := rpctest.GetGRPCClient(conf).BroadcastTx( - context.Background(), - &coregrpc.RequestBroadcastTx{Tx: []byte("this is a tx")}, - ) - require.NoError(t, err) - require.EqualValues(t, 0, res.CheckTx.Code) - require.EqualValues(t, 0, res.DeliverTx.Code) -} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go deleted file mode 100644 index b9cbee03f..000000000 --- a/rpc/grpc/types.pb.go +++ /dev/null @@ -1,924 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/rpc/grpc/types.proto - -package coregrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/tendermint/tendermint/abci/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type RequestPing struct { -} - -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{0} -} -func (m *RequestPing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestPing) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPing.Merge(m, src) -} -func (m *RequestPing) XXX_Size() int { - return m.Size() -} -func (m *RequestPing) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPing.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestPing proto.InternalMessageInfo - -type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{1} -} -func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBroadcastTx.Merge(m, src) -} -func (m *RequestBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *RequestBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo - -func (m *RequestBroadcastTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type ResponsePing struct { -} - -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{2} -} -func (m *ResponsePing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponsePing) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePing.Merge(m, src) -} -func (m *ResponsePing) XXX_Size() int { - return m.Size() -} -func (m *ResponsePing) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePing.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponsePing proto.InternalMessageInfo - -type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` -} - -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{3} -} -func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) -} -func (m *ResponseBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo - -func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { - if m != nil { - return m.CheckTx - } - return nil -} - -func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { - if m != nil { - return m.DeliverTx - } - return nil -} - -func init() { - proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") -} - -func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } - -var fileDescriptor_0ffff5682c662b95 = []byte{ - // 316 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, - 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, - 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, - 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, - 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, - 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, - 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, - 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, - 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, - 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, - 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, - 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, - 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, - 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, - 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, - 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, - 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, - 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BroadcastAPIClient is the client API for BroadcastAPI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) -} - -type broadcastAPIClient struct { - cc *grpc.ClientConn -} - -func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} -} - -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BroadcastAPIServer is the server API for BroadcastAPI service. -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) -} - -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { -} - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") -} -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") -} - -func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) -} - -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) - } - return interceptor(ctx, in, info, handler) -} - -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) - } - return interceptor(ctx, in, info, handler) -} - -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.rpc.grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/rpc/grpc/types.proto", -} - -func (m *RequestPing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResponsePing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RequestPing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *RequestBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *ResponsePing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ResponseBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestPing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponsePing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckTx == nil { - m.CheckTx = &types.ResponseCheckTx{} - } - if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeliverTx == nil { - m.DeliverTx = &types.ResponseDeliverTx{} - } - if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index e2f47839c..189289b0a 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -14,7 +14,6 @@ import ( "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/coretypes" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) @@ -24,6 +23,7 @@ type Options struct { suppressStdout bool } +// waitForRPC connects to the RPC service and blocks until a /status call succeeds. func waitForRPC(ctx context.Context, conf *config.Config) { laddr := conf.RPC.ListenAddress client, err := rpcclient.New(laddr) @@ -42,16 +42,6 @@ func waitForRPC(ctx context.Context, conf *config.Config) { } } -func waitForGRPC(ctx context.Context, conf *config.Config) { - client := GetGRPCClient(conf) - for { - _, err := client.Ping(ctx, &coregrpc.RequestPing{}) - if err == nil { - return - } - } -} - func randPort() int { port, err := tmnet.GetFreePort() if err != nil { @@ -60,30 +50,24 @@ func randPort() int { return port } -func makeAddrs() (string, string, string) { - return fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()) +// makeAddrs constructs local listener addresses for node services. This +// implementation uses random ports so test instances can run concurrently. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" + return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } func CreateConfig(testName string) *config.Config { c := config.ResetTestRoot(testName) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - c.RPC.GRPCListenAddress = grpc return c } -func GetGRPCClient(conf *config.Config) coregrpc.BroadcastAPIClient { - grpcAddr := conf.RPC.GRPCListenAddress - return coregrpc.StartGRPCClient(grpcAddr) -} - type ServiceCloser func(context.Context) error func StartTendermint(ctx context.Context, @@ -112,9 +96,7 @@ func StartTendermint(ctx context.Context, return nil, func(_ context.Context) error { return nil }, err } - // wait for rpc waitForRPC(ctx, conf) - waitForGRPC(ctx, conf) if !nodeOpts.suppressStdout { fmt.Println("Tendermint running!") diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go deleted file mode 100644 index 73022aaf8..000000000 --- a/test/app/grpc_client.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "encoding/hex" - "fmt" - "os" - - "context" - - tmjson "github.com/tendermint/tendermint/libs/json" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" -) - -var grpcAddr = "tcp://localhost:36656" - -func main() { - args := os.Args - if len(args) == 1 { - fmt.Println("Must enter a transaction to send (hex)") - os.Exit(1) - } - tx := args[1] - txBytes, err := hex.DecodeString(tx) - if err != nil { - fmt.Println("Invalid hex", err) - os.Exit(1) - } - - clientGRPC := coregrpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &coregrpc.RequestBroadcastTx{Tx: txBytes}) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - bz, err := tmjson.Marshal(res) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println(string(bz)) -} From bc1a20dbb86e4fa2120b2c8a9de88814471f4a2c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 13 Oct 2021 16:37:50 -0700 Subject: [PATCH 096/174] Revert temporary patch to buf.yaml. (#7122) This patch was needed to pass the buf breakage check for the proto file removed in #7121, but now that master contains the change we no longer need the patch. --- buf.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/buf.yaml b/buf.yaml index 98feb708e..cc4aced57 100644 --- a/buf.yaml +++ b/buf.yaml @@ -14,6 +14,3 @@ lint: breaking: use: - FILE - ignore: - # TODO(#7075): Remove after file deletion is merged. - - tendermint/rpc From b95c26198163335999e746c8cacbe29cab6b01a1 Mon Sep 17 00:00:00 2001 From: Jared Zhou Date: Thu, 14 Oct 2021 16:34:25 +0800 Subject: [PATCH 097/174] rpc: fix typo in broadcast commit (#7124) --- internal/rpc/core/mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index c20f02032..5b12a6c2b 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -78,7 +78,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* CheckTx: *r, Hash: tx.Hash(), }, - errors.New("cannot wait for commit because kvEventSync is not enabled") + errors.New("cannot confirm transaction because kvEventSink is not enabled") } startAt := time.Now() From c3dc7d20df6d32c8087f2cf01f7817d47273bd19 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 15 Oct 2021 10:35:36 +0200 Subject: [PATCH 098/174] docs: add roadmap to repo (#7107) --- README.md | 4 + docs/.vuepress/config.js | 4 - docs/app-dev/readme.md | 5 +- docs/networks/README.md | 16 --- docs/nodes/README.md | 2 +- docs/roadmap/roadmap.md | 100 ++++++++++++++++++ docs/tendermint-core/README.md | 5 +- docs/tools/README.md | 8 ++ docs/{networks => tools}/docker-compose.md | 0 .../terraform-and-ansible.md | 0 docs/tutorials/readme.md | 2 +- 11 files changed, 119 insertions(+), 27 deletions(-) delete mode 100644 docs/networks/README.md create mode 100644 docs/roadmap/roadmap.md rename docs/{networks => tools}/docker-compose.md (100%) rename docs/{networks => tools}/terraform-and-ansible.md (100%) diff --git a/README.md b/README.md index 3157a148e..f0da8f484 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,8 @@ See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, we're happy to help. You can contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork). +More on how releases are conducted can be found [here](./RELEASES.md). + ## Security To report a security vulnerability, see our [bug bounty @@ -112,6 +114,8 @@ in [UPGRADING.md](./UPGRADING.md). ### Tendermint Core +We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md) + For details about the blockchain data structures and the p2p protocols, see the [Tendermint specification](https://docs.tendermint.com/master/spec/). diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index f453dc1ef..4653e2c5b 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -48,10 +48,6 @@ module.exports = { { title: 'Resources', children: [ - { - title: 'Developer Sessions', - path: '/DEV_SESSIONS.html' - }, { title: 'RPC', path: 'https://docs.tendermint.com/master/rpc/', diff --git a/docs/app-dev/readme.md b/docs/app-dev/readme.md index 51e88fc34..46ce06ca0 100644 --- a/docs/app-dev/readme.md +++ b/docs/app-dev/readme.md @@ -1,7 +1,6 @@ --- order: false parent: + title: "Building Applications" order: 3 ---- - -# Apps +--- \ No newline at end of file diff --git a/docs/networks/README.md b/docs/networks/README.md deleted file mode 100644 index 0b14e391b..000000000 --- a/docs/networks/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -order: 1 -parent: - title: Networks - order: 1 ---- - -# Overview - -Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your -local machine. - -Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint -testnets to the cloud. - -See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/nodes/README.md b/docs/nodes/README.md index 9be6febf0..fd9056e0d 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: Nodes + title: Node Operators order: 4 --- diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md new file mode 100644 index 000000000..e6e5a32fe --- /dev/null +++ b/docs/roadmap/roadmap.md @@ -0,0 +1,100 @@ +--- +order: false +parent: + title: Roadmap + order: 7 +--- + +# Tendermint Roadmap + +*Last Updated: Friday 8 October 2021* + +This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams. + +Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). + +This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status. + +The upgrades are split into two components: **Epics**, the features that define a release and to a large part dictate the timing of releases; and **minors**, features of smaller scale and lower priority, that could land in neighboring releases. + +## V0.35 (completed Q3 2021) + +### Prioritized Mempool + +Transactions were previously added to blocks in the order with which they arrived to the mempool. Adding a priority field via `CheckTx` gives applications more control over which transactions make it into a block. This is important in the presence of transaction fees. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md) + +### Refactor of the P2P Framework + +The Tendermint P2P system is undergoing a large redesign to improve its performance and reliability. The first phase of this redesign is included in 0.35. This phase cleans and decouples abstractions, improves peer lifecycle management, peer address handling and enables pluggable transports. It is implemented to be protocol-compatible with the previous implementation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-062-p2p-architecture.md) + +### State Sync Improvements + +Following the initial version of state sync, several improvements have been made. These include the addition of [Reverse Sync](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-068-reverse-sync.md) needed for evidence handling, the introduction of a [P2P State Provider](https://github.com/tendermint/tendermint/pull/6807) as an alternative to RPC endpoints, new configuration parameters to adjust throughput, and several bug fixes. + +### Custom event indexing + PSQL Indexer + +Added a new `EventSink` interface to allow alternatives to Tendermint's proprietary transaction indexer. We also added a PostgreSQL Indexer implementation, allowing rich SQL-based index queries. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-065-custom-event-indexing.md) + +### Minor Works + +- Several Go packages were reorganized to make the distinction between public APIs and implementation details more clear. +- Block indexer to index begin-block and end-block events. [More](https://github.com/tendermint/tendermint/pull/6226) +- Block, state, evidence, and light storage keys were reworked to preserve lexicographic order. This change requires a database migration. [More](https://github.com/tendermint/tendermint/pull/5771) +- Introduciton of Tendermint modes. Part of this change includes the possibility to run a separate seed node that runs the PEX reactor only. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) + +## V0.36 (expected Q1 2022) + +### ABCI++ + +An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md) + +### Proposer-Based Timestamps + +Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md) + +### Soft Upgrades + +We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222) + +### Minor Works + +- Remove the "legacy" P2P framework, and clean up of P2P package. [More](https://github.com/tendermint/tendermint/issues/5670) +- Remove the global mutex from the local ABCI client to enable application-controlled concurrency. [More](https://github.com/tendermint/tendermint/issues/7073) +- Enable P2P support for light clients +- Node orchestration of services + Node initialization and composibility +- Remove redundancy in several data structures. Remove unused components such as the block sync v2 reactor, gRPC in the RPC layer, and the socket-based remote signer. +- Improve node visibility by introducing more metrics + +## V0.37 (expected Q3 2022) + +### Complete P2P Refactor + +Finish the final phase of the P2P system. Ongoing research and planning is taking place to decide whether to adopt [libp2p](https://libp2p.io/), alternative transports to `MConn` such as [QUIC](https://en.wikipedia.org/wiki/QUIC) and handshake/authentication protocols such as [Noise](https://noiseprotocol.org/). Research into more advanced gossiping techniques. + +### Streamline Storage Engine + +Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897) + +### Evaluate Interprocess Communication + +Tendermint nodes currently have multiple areas of communication with other processes (ABCI, remote-signer, P2P, JSONRPC, websockets, events as examples). Many of these have multiple implementations in which a single suffices. Consolidate and clean up IPC. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md) + +### Minor Works + +- Amnesia attack handling. [More](https://github.com/tendermint/tendermint/issues/5270) +- Remove / Update Consensus WAL. [More](https://github.com/tendermint/tendermint/issues/6397) +- Signature Aggregation. [More](https://github.com/tendermint/tendermint/issues/1319) +- Remove gogoproto dependency. [More](https://github.com/tendermint/tendermint/issues/5446) + +## V1.0 (expected Q4 2022) + +Has the same feature set as V0.37 but with a focus towards testing, protocol correctness and minor tweaks to ensure a stable product. Such work might include extending the [consensus testing framework](https://github.com/tendermint/tendermint/issues/5920), the use of canary/long-lived testnets and greater integration tests. + +## Post 1.0 Work + +- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347) +- Consensus engine refactor +- Bidirectional ABCI +- Randomized Leader Election +- ZK proofs / other cryptographic primitives +- Multichain Tendermint diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index fb359a08b..f83349db2 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: System + title: Understanding Tendermint order: 5 --- @@ -10,7 +10,6 @@ parent: This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) -- [Running in Production](./running-in-production.md) - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) @@ -18,3 +17,5 @@ This section dives into the internals of Go-Tendermint. - [State Sync](./state-sync.md) - [Mempool](./mempool.md) - [Light Client](./light-client.md) + +For full specifications refer to the [spec repo](https://github.com/tendermint/spec). \ No newline at end of file diff --git a/docs/tools/README.md b/docs/tools/README.md index 3e87a2ea1..5d778f470 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -27,3 +27,11 @@ testing Tendermint networks. This repository contains various different configurations of test networks for, and relating to, Tendermint. + +Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/networks/docker-compose.md b/docs/tools/docker-compose.md similarity index 100% rename from docs/networks/docker-compose.md rename to docs/tools/docker-compose.md diff --git a/docs/networks/terraform-and-ansible.md b/docs/tools/terraform-and-ansible.md similarity index 100% rename from docs/networks/terraform-and-ansible.md rename to docs/tools/terraform-and-ansible.md diff --git a/docs/tutorials/readme.md b/docs/tutorials/readme.md index a60fba349..0216df800 100644 --- a/docs/tutorials/readme.md +++ b/docs/tutorials/readme.md @@ -4,4 +4,4 @@ parent: order: 2 --- -# Guides +# Tutorials From 006e6108a121c50e482c8d2da551162d36b844a1 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 15 Oct 2021 05:18:53 -0700 Subject: [PATCH 099/174] Fix the protobuf generation script. (#7127) This should have been part of #7121, but I missed it. --- scripts/protocgen.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 51b1cc6d3..1b84c2c56 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -5,5 +5,3 @@ set -eo pipefail buf generate --path proto/tendermint mv ./proto/tendermint/abci/types.pb.go ./abci/types - -mv ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc From 66a11fe52795eaba6aacae3ca25206c380e5283a Mon Sep 17 00:00:00 2001 From: Marko Date: Fri, 15 Oct 2021 13:03:53 +0000 Subject: [PATCH 100/174] blocksync: remove v0 folder structure (#7128) Remove v0 blocksync folder structure. --- internal/blocksync/{v0 => }/pool.go | 2 +- internal/blocksync/{v0 => }/pool_test.go | 2 +- internal/blocksync/{v0 => }/reactor.go | 5 ++--- internal/blocksync/{v0 => }/reactor_test.go | 2 +- node/setup.go | 12 ++++++------ 5 files changed, 11 insertions(+), 12 deletions(-) rename internal/blocksync/{v0 => }/pool.go (99%) rename internal/blocksync/{v0 => }/pool_test.go (99%) rename internal/blocksync/{v0 => }/reactor.go (99%) rename internal/blocksync/{v0 => }/reactor_test.go (99%) diff --git a/internal/blocksync/v0/pool.go b/internal/blocksync/pool.go similarity index 99% rename from internal/blocksync/v0/pool.go rename to internal/blocksync/pool.go index b3704f333..d10a32b0d 100644 --- a/internal/blocksync/v0/pool.go +++ b/internal/blocksync/pool.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "errors" diff --git a/internal/blocksync/v0/pool_test.go b/internal/blocksync/pool_test.go similarity index 99% rename from internal/blocksync/v0/pool_test.go rename to internal/blocksync/pool_test.go index 67617d2b7..cbe19acbe 100644 --- a/internal/blocksync/v0/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "fmt" diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/reactor.go similarity index 99% rename from internal/blocksync/v0/reactor.go rename to internal/blocksync/reactor.go index 552dcbda5..0c193577e 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/reactor.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "fmt" @@ -6,7 +6,6 @@ import ( "sync" "time" - "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" @@ -36,7 +35,7 @@ var ( Priority: 5, SendQueueCapacity: 1000, RecvBufferCapacity: 1024, - RecvMessageCapacity: blocksync.MaxMsgSize, + RecvMessageCapacity: MaxMsgSize, MaxSendBytes: 100, }, }, diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/reactor_test.go similarity index 99% rename from internal/blocksync/v0/reactor_test.go rename to internal/blocksync/reactor_test.go index 312b1cb39..3824f1460 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "os" diff --git a/node/setup.go b/node/setup.go index ebaa576c2..a3491c995 100644 --- a/node/setup.go +++ b/node/setup.go @@ -12,7 +12,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" @@ -310,13 +310,13 @@ func createBlockchainReactor( logger = logger.With("module", "blockchain") - reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - channels := makeChannelsFromShims(router, bcv0.ChannelShims) + reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", blocksync.ChannelShims) + channels := makeChannelsFromShims(router, blocksync.ChannelShims) peerUpdates := peerManager.Subscribe() - reactor, err := bcv0.NewReactor( + reactor, err := blocksync.NewReactor( logger, state.Copy(), blockExec, blockStore, csReactor, - channels[bcv0.BlockSyncChannel], peerUpdates, blockSync, + channels[blocksync.BlockSyncChannel], peerUpdates, blockSync, metrics, ) if err != nil { @@ -512,7 +512,7 @@ func makeNodeInfo( txIndexerStatus = "on" } - bcChannel := byte(bcv0.BlockSyncChannel) + bcChannel := byte(blocksync.BlockSyncChannel) nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ From f4a56f4034e04775b4157413c3a5bbf71eef3409 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 15 Oct 2021 11:45:12 -0400 Subject: [PATCH 101/174] p2p: refactor channel description (#7130) This is another small sliver of #7075, with the intention of removing the legacy shim layer related to channel registration. --- internal/blocksync/reactor.go | 3 +-- internal/consensus/reactor.go | 12 ++++-------- internal/evidence/reactor.go | 3 +-- internal/mempool/v0/reactor.go | 3 +-- internal/mempool/v1/reactor.go | 3 +-- internal/p2p/conn/connection.go | 6 ++---- internal/p2p/p2p_test.go | 1 - internal/p2p/p2ptest/network.go | 1 - internal/p2p/pex/reactor.go | 1 - internal/p2p/pqueue_test.go | 2 +- internal/p2p/shim.go | 4 +--- internal/statesync/reactor.go | 12 ++++-------- node/node.go | 2 +- 13 files changed, 17 insertions(+), 36 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 0c193577e..5f4ce3029 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -29,14 +29,13 @@ var ( // ref: https://github.com/tendermint/tendermint/issues/5670 ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ BlockSyncChannel: { - MsgType: new(bcproto.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(BlockSyncChannel), + MessageType: new(bcproto.Message), Priority: 5, SendQueueCapacity: 1000, RecvBufferCapacity: 1024, RecvMessageCapacity: MaxMsgSize, - MaxSendBytes: 100, }, }, } diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 22bd7576e..27ecb4602 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -31,50 +31,46 @@ var ( // ref: https://github.com/tendermint/tendermint/issues/5670 ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ StateChannel: { - MsgType: new(tmcons.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(StateChannel), + MessageType: new(tmcons.Message), Priority: 8, SendQueueCapacity: 64, RecvMessageCapacity: maxMsgSize, RecvBufferCapacity: 128, - MaxSendBytes: 12000, }, }, DataChannel: { - MsgType: new(tmcons.Message), Descriptor: &p2p.ChannelDescriptor{ // TODO: Consider a split between gossiping current block and catchup // stuff. Once we gossip the whole block there is nothing left to send // until next height or round. ID: byte(DataChannel), + MessageType: new(tmcons.Message), Priority: 12, SendQueueCapacity: 64, RecvBufferCapacity: 512, RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 40000, }, }, VoteChannel: { - MsgType: new(tmcons.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(VoteChannel), + MessageType: new(tmcons.Message), Priority: 10, SendQueueCapacity: 64, RecvBufferCapacity: 128, RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 150, }, }, VoteSetBitsChannel: { - MsgType: new(tmcons.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(VoteSetBitsChannel), + MessageType: new(tmcons.Message), Priority: 5, SendQueueCapacity: 8, RecvBufferCapacity: 128, RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 50, }, }, } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index a454038fd..0090bd32f 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -27,13 +27,12 @@ var ( // ref: https://github.com/tendermint/tendermint/issues/5670 ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ EvidenceChannel: { - MsgType: new(tmproto.EvidenceList), Descriptor: &p2p.ChannelDescriptor{ ID: byte(EvidenceChannel), + MessageType: new(tmproto.EvidenceList), Priority: 6, RecvMessageCapacity: maxMsgSize, RecvBufferCapacity: 32, - MaxSendBytes: 400, }, }, } diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 86392e96f..1587c0778 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -100,13 +100,12 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ mempool.MempoolChannel: { - MsgType: new(protomem.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(mempool.MempoolChannel), + MessageType: new(protomem.Message), Priority: 5, RecvMessageCapacity: batchMsg.Size(), RecvBufferCapacity: 128, - MaxSendBytes: 5000, }, }, } diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index f07808387..e31977cc9 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -107,13 +107,12 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ mempool.MempoolChannel: { - MsgType: new(protomem.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(mempool.MempoolChannel), + MessageType: new(protomem.Message), Priority: 5, RecvMessageCapacity: batchMsg.Size(), RecvBufferCapacity: 128, - MaxSendBytes: 5000, }, }, } diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 0ab7b7546..c6a9b206c 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -612,6 +612,8 @@ type ChannelDescriptor struct { ID byte Priority int + MessageType proto.Message + // TODO: Remove once p2p refactor is complete. SendQueueCapacity int RecvMessageCapacity int @@ -619,10 +621,6 @@ type ChannelDescriptor struct { // RecvBufferCapacity defines the max buffer size of inbound messages for a // given p2p Channel queue. RecvBufferCapacity int - - // MaxSendBytes defines the maximum number of bytes that can be sent at any - // given moment from a Channel to a peer. - MaxSendBytes uint } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 6e524d492..91e1c0824 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -19,7 +19,6 @@ var ( Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd}) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 1daba3f14..21aa1a0ea 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -339,6 +339,5 @@ func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } } diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index f5c03adf8..f313279c3 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -69,7 +69,6 @@ func ChannelDescriptor() conn.ChannelDescriptor { SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, RecvBufferCapacity: 32, - MaxSendBytes: 200, } } diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index c038408ef..18f0c02e3 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -13,7 +13,7 @@ type testMessage = gogotypes.StringValue func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, + {ID: 0x01, Priority: 1}, } pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go index 9f82bc11a..f57ce18b9 100644 --- a/internal/p2p/shim.go +++ b/internal/p2p/shim.go @@ -3,7 +3,6 @@ package p2p import ( "sort" - "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/libs/log" ) @@ -12,7 +11,6 @@ import ( // A ChannelDescriptorShim is not contained in ReactorShim, but is rather // used to construct a ReactorShim. type ChannelDescriptorShim struct { - MsgType proto.Message Descriptor *ChannelDescriptor } @@ -61,7 +59,7 @@ func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { Descriptor: cds.Descriptor, Channel: NewChannel( ChannelID(cds.Descriptor.ID), - cds.MsgType, + cds.Descriptor.MessageType, inCh, outCh, errCh, diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 74803b3e2..3757d028d 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -38,47 +38,43 @@ var ( // ref: https://github.com/tendermint/tendermint/issues/5670 ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ SnapshotChannel: { - MsgType: new(ssproto.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(SnapshotChannel), + MessageType: new(ssproto.Message), Priority: 6, SendQueueCapacity: 10, RecvMessageCapacity: snapshotMsgSize, RecvBufferCapacity: 128, - MaxSendBytes: 400, }, }, ChunkChannel: { - MsgType: new(ssproto.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(ChunkChannel), Priority: 3, + MessageType: new(ssproto.Message), SendQueueCapacity: 4, RecvMessageCapacity: chunkMsgSize, RecvBufferCapacity: 128, - MaxSendBytes: 400, }, }, LightBlockChannel: { - MsgType: new(ssproto.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(LightBlockChannel), + MessageType: new(ssproto.Message), Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: lightBlockMsgSize, RecvBufferCapacity: 128, - MaxSendBytes: 400, }, }, ParamsChannel: { - MsgType: new(ssproto.Message), Descriptor: &p2p.ChannelDescriptor{ ID: byte(ParamsChannel), + MessageType: new(ssproto.Message), Priority: 2, SendQueueCapacity: 10, RecvMessageCapacity: paramMsgSize, RecvBufferCapacity: 128, - MaxSendBytes: 400, }, }, } diff --git a/node/node.go b/node/node.go index b6c47a9ce..6a66f4224 100644 --- a/node/node.go +++ b/node/node.go @@ -1119,7 +1119,7 @@ func makeChannelsFromShims( channels := map[p2p.ChannelID]*p2p.Channel{} for chID, chShim := range chShims { - ch, err := router.OpenChannel(*chShim.Descriptor, chShim.MsgType, chShim.Descriptor.RecvBufferCapacity) + ch, err := router.OpenChannel(*chShim.Descriptor, chShim.Descriptor.MessageType, chShim.Descriptor.RecvBufferCapacity) if err != nil { panic(fmt.Sprintf("failed to open channel %v: %v", chID, err)) } From 0900ea8396359cdf72be555cddcf597349e39af1 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 15 Oct 2021 12:31:33 -0400 Subject: [PATCH 102/174] p2p: channel shim cleanup (#7129) --- internal/blocksync/reactor.go | 2 +- internal/blocksync/reactor_test.go | 2 +- internal/consensus/reactor.go | 8 +-- internal/consensus/reactor_test.go | 2 +- internal/evidence/reactor.go | 2 +- internal/evidence/reactor_test.go | 2 +- internal/mempool/v0/reactor.go | 2 +- internal/mempool/v0/reactor_test.go | 2 +- internal/mempool/v1/reactor.go | 2 +- internal/mempool/v1/reactor_test.go | 2 +- internal/p2p/conn/connection.go | 14 +++-- internal/p2p/conn/connection_test.go | 22 +++---- internal/p2p/mocks/connection.go | 17 +++--- internal/p2p/mocks/transport.go | 8 +++ internal/p2p/p2p_test.go | 2 +- internal/p2p/p2ptest/network.go | 4 +- internal/p2p/pex/reactor.go | 2 +- internal/p2p/pqueue.go | 2 +- internal/p2p/router.go | 9 +-- internal/p2p/router_test.go | 7 ++- internal/p2p/shim.go | 91 ---------------------------- internal/p2p/transport.go | 4 ++ internal/p2p/transport_mconn.go | 6 +- internal/p2p/transport_mconn_test.go | 8 +-- internal/p2p/transport_memory.go | 2 + internal/p2p/types.go | 1 + internal/statesync/reactor.go | 8 +-- node/node.go | 30 ++------- node/setup.go | 31 ++++------ 29 files changed, 99 insertions(+), 195 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 5f4ce3029..86aaf79d3 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -30,7 +30,7 @@ var ( ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ BlockSyncChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(BlockSyncChannel), + ID: BlockSyncChannel, MessageType: new(bcproto.Message), Priority: 5, SendQueueCapacity: 1000, diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 3824f1460..59889eec4 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -65,7 +65,7 @@ func setup( blockSync: true, } - chDesc := p2p.ChannelDescriptor{ID: byte(BlockSyncChannel)} + chDesc := p2p.ChannelDescriptor{ID: BlockSyncChannel} rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) i := 0 diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 27ecb4602..dde36306c 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -32,7 +32,7 @@ var ( ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ StateChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(StateChannel), + ID: StateChannel, MessageType: new(tmcons.Message), Priority: 8, SendQueueCapacity: 64, @@ -45,7 +45,7 @@ var ( // TODO: Consider a split between gossiping current block and catchup // stuff. Once we gossip the whole block there is nothing left to send // until next height or round. - ID: byte(DataChannel), + ID: DataChannel, MessageType: new(tmcons.Message), Priority: 12, SendQueueCapacity: 64, @@ -55,7 +55,7 @@ var ( }, VoteChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteChannel), + ID: VoteChannel, MessageType: new(tmcons.Message), Priority: 10, SendQueueCapacity: 64, @@ -65,7 +65,7 @@ var ( }, VoteSetBitsChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteSetBitsChannel), + ID: VoteSetBitsChannel, MessageType: new(tmcons.Message), Priority: 5, SendQueueCapacity: 8, diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index e536906a9..04bc3708e 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -52,7 +52,7 @@ type reactorTestSuite struct { func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { return p2p.ChannelDescriptor{ - ID: byte(chID), + ID: chID, } } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 0090bd32f..0cf261441 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -28,7 +28,7 @@ var ( ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ EvidenceChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(EvidenceChannel), + ID: EvidenceChannel, MessageType: new(tmproto.EvidenceList), Priority: 6, RecvMessageCapacity: maxMsgSize, diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index b8055263d..7963ba959 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -62,7 +62,7 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)} + chDesc := p2p.ChannelDescriptor{ID: evidence.EvidenceChannel} rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(tmproto.EvidenceList), diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 1587c0778..010f98f5d 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -101,7 +101,7 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ mempool.MempoolChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), + ID: mempool.MempoolChannel, MessageType: new(protomem.Message), Priority: 5, RecvMessageCapacity: batchMsg.Size(), diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index b0462a249..4ae2523a1 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -50,7 +50,7 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} + chDesc := p2p.ChannelDescriptor{ID: mempool.MempoolChannel} rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) for nodeID := range rts.network.Nodes { diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index e31977cc9..72154b4a8 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -108,7 +108,7 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ mempool.MempoolChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), + ID: mempool.MempoolChannel, MessageType: new(protomem.Message), Priority: 5, RecvMessageCapacity: batchMsg.Size(), diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index 5934d534c..1449b20b1 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -52,7 +52,7 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} + chDesc := p2p.ChannelDescriptor{ID: mempool.MempoolChannel} rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) for nodeID := range rts.network.Nodes { diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index c6a9b206c..1e149a2e5 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -48,7 +48,7 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID byte, msgBytes []byte) +type receiveCbFunc func(chID ChannelID, msgBytes []byte) type errorCbFunc func(interface{}) /* @@ -82,7 +82,7 @@ type MConnection struct { send chan struct{} pong chan struct{} channels []*Channel - channelsIdx map[byte]*Channel + channelsIdx map[ChannelID]*Channel onReceive receiveCbFunc onError errorCbFunc errored uint32 @@ -186,7 +186,7 @@ func NewMConnectionWithConfig( } // Create channels - var channelsIdx = map[byte]*Channel{} + var channelsIdx = map[ChannelID]*Channel{} var channels = []*Channel{} for _, desc := range chDescs { @@ -307,7 +307,7 @@ func (c *MConnection) stopForError(r interface{}) { } // Queues a message to be sent to channel. -func (c *MConnection) Send(chID byte, msgBytes []byte) bool { +func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { if !c.IsRunning() { return false } @@ -540,7 +540,7 @@ FOR_LOOP: // never block } case *tmp2p.Packet_PacketMsg: - channelID := byte(pkt.PacketMsg.ChannelID) + channelID := ChannelID(pkt.PacketMsg.ChannelID) channel, ok := c.channelsIdx[channelID] if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil { err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) @@ -607,9 +607,11 @@ type ChannelStatus struct { } //----------------------------------------------------------------------------- +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 type ChannelDescriptor struct { - ID byte + ID ChannelID Priority int MessageType proto.Message diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 6a9c0988f..1ed179ad8 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -20,7 +20,7 @@ import ( const maxPingPongPacketSize = 1024 // bytes func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { } onError := func(r interface{}) { } @@ -31,7 +31,7 @@ func createTestMConnection(conn net.Conn) *MConnection { func createMConnectionWithCallbacks( conn net.Conn, - onReceive func(chID byte, msgBytes []byte), + onReceive func(chID ChannelID, msgBytes []byte), onError func(r interface{}), ) *MConnection { cfg := DefaultMConnConfig() @@ -111,7 +111,7 @@ func TestMConnectionReceive(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -146,7 +146,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -184,7 +184,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -238,7 +238,7 @@ func TestMConnectionMultiplePings(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -285,7 +285,7 @@ func TestMConnectionPingPongs(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -342,7 +342,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -371,7 +371,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { server, client := NetPipe() - onReceive := func(chID byte, msgBytes []byte) {} + onReceive := func(chID ChannelID, msgBytes []byte) {} onError := func(r interface{}) {} // create client conn with two channels @@ -443,7 +443,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { chOnRcv <- struct{}{} } @@ -538,7 +538,7 @@ func TestMConnectionChannelOverflow(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { chOnRcv <- struct{}{} } diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 03f7b5b33..65b9afafb 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -5,9 +5,12 @@ package mocks import ( context "context" - mock "github.com/stretchr/testify/mock" + conn "github.com/tendermint/tendermint/internal/p2p/conn" + crypto "github.com/tendermint/tendermint/crypto" + mock "github.com/stretchr/testify/mock" + p2p "github.com/tendermint/tendermint/internal/p2p" types "github.com/tendermint/tendermint/types" @@ -77,14 +80,14 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { } // ReceiveMessage provides a mock function with given fields: -func (_m *Connection) ReceiveMessage() (p2p.ChannelID, []byte, error) { +func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { ret := _m.Called() - var r0 p2p.ChannelID - if rf, ok := ret.Get(0).(func() p2p.ChannelID); ok { + var r0 conn.ChannelID + if rf, ok := ret.Get(0).(func() conn.ChannelID); ok { r0 = rf() } else { - r0 = ret.Get(0).(p2p.ChannelID) + r0 = ret.Get(0).(conn.ChannelID) } var r1 []byte @@ -121,11 +124,11 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { } // SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) error { +func (_m *Connection) SendMessage(_a0 conn.ChannelID, _a1 []byte) error { ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) error); ok { + if rf, ok := ret.Get(0).(func(conn.ChannelID, []byte) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 82bd670cb..2fc7baa29 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -5,7 +5,10 @@ package mocks import ( context "context" + conn "github.com/tendermint/tendermint/internal/p2p/conn" + mock "github.com/stretchr/testify/mock" + p2p "github.com/tendermint/tendermint/internal/p2p" ) @@ -37,6 +40,11 @@ func (_m *Transport) Accept() (p2p.Connection, error) { return r0, r1 } +// AddChannelDescriptors provides a mock function with given fields: _a0 +func (_m *Transport) AddChannelDescriptors(_a0 []*conn.ChannelDescriptor) { + _m.Called(_a0) +} + // Close provides a mock function with given fields: func (_m *Transport) Close() error { ret := _m.Called() diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 91e1c0824..f5ed5706c 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -15,7 +15,7 @@ var ( ctx = context.Background() chID = p2p.ChannelID(1) chDesc = p2p.ChannelDescriptor{ - ID: byte(chID), + ID: chID, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 21aa1a0ea..2ed888764 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -285,7 +285,7 @@ func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, messageType proto.Message, size int) *p2p.Channel { channel, err := n.Router.OpenChannel(chDesc, messageType, size) require.NoError(t, err) - require.Contains(t, n.Router.NodeInfo().Channels, chDesc.ID) + require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID)) t.Cleanup(func() { RequireEmpty(t, channel) channel.Close() @@ -335,7 +335,7 @@ func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { return p2p.ChannelDescriptor{ - ID: byte(chID), + ID: chID, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index f313279c3..d43d836ce 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -25,7 +25,7 @@ var ( // See https://github.com/tendermint/tendermint/issues/6371 const ( // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) + PexChannel = 0x00 // over-estimate of max NetAddress size // hexID (40) + IP (16) + Port (2) + Name (100) ... diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index b19436e15..fd0a43db6 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -99,7 +99,7 @@ func newPQScheduler( ) for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) + chID := chDesc.ID chPriorities[chID] = uint(chDesc.Priority) sizes[uint(chDesc.Priority)] = 0 } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index f7d012b49..11d0514bb 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -23,9 +23,6 @@ const queueBufferDefault = 32 const dialRandomizerIntervalMillisecond = 3000 -// ChannelID is an arbitrary channel ID. -type ChannelID uint16 - // Envelope contains a message with sender/receiver routing info. type Envelope struct { From types.NodeID // sender (empty if outbound) @@ -361,7 +358,7 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message r.channelMtx.Lock() defer r.channelMtx.Unlock() - id := ChannelID(chDesc.ID) + id := chDesc.ID if _, ok := r.channelQueues[id]; ok { return nil, fmt.Errorf("channel %v already exists", id) } @@ -383,6 +380,10 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message // add the channel to the nodeInfo if it's not already there. r.nodeInfo.AddChannel(uint16(chDesc.ID)) + for _, t := range r.transports { + t.AddChannelDescriptors([]*ChannelDescriptor{&chDesc}) + } + go func() { defer func() { r.channelMtx.Lock() diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index c12f1a2f0..7f922c29d 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -121,17 +121,17 @@ func TestRouter_Channel_Basic(t *testing.T) { // Opening a channel should work. channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc.ID) + require.Contains(t, router.NodeInfo().Channels, byte(chDesc.ID)) // Opening the same channel again should fail. _, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) require.Error(t, err) // Opening a different channel should work. - chDesc2 := p2p.ChannelDescriptor{ID: byte(2)} + chDesc2 := p2p.ChannelDescriptor{ID: 2} _, err = router.OpenChannel(chDesc2, &p2ptest.Message{}, 0) require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc2.ID) + require.Contains(t, router.NodeInfo().Channels, byte(chDesc2.ID)) // Closing the channel, then opening it again should be fine. channel.Close() @@ -865,6 +865,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) mockTransport := &mocks.Transport{} + mockTransport.On("AddChannelDescriptors", mock.Anything).Return() mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go index f57ce18b9..8de0b71c7 100644 --- a/internal/p2p/shim.go +++ b/internal/p2p/shim.go @@ -1,11 +1,5 @@ package p2p -import ( - "sort" - - "github.com/tendermint/tendermint/libs/log" -) - // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel // and the proto.Message the new p2p Channel is responsible for handling. // A ChannelDescriptorShim is not contained in ReactorShim, but is rather @@ -13,88 +7,3 @@ import ( type ChannelDescriptorShim struct { Descriptor *ChannelDescriptor } - -// ChannelShim defines a generic shim wrapper around a legacy p2p channel -// and the new p2p Channel. It also includes the raw bi-directional Go channels -// so we can proxy message delivery. -type ChannelShim struct { - Descriptor *ChannelDescriptor - Channel *Channel - inCh chan<- Envelope - outCh <-chan Envelope - errCh <-chan PeerError -} - -// ReactorShim defines a generic shim wrapper around a BaseReactor. It is -// responsible for wiring up legacy p2p behavior to the new p2p semantics -// (e.g. proxying Envelope messages to legacy peers). -type ReactorShim struct { - Name string - PeerUpdates *PeerUpdates - Channels map[ChannelID]*ChannelShim -} - -func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { - channels := make(map[ChannelID]*ChannelShim) - - for _, cds := range descriptors { - chShim := NewChannelShim(cds, 0) - channels[chShim.Channel.ID] = chShim - } - - rs := &ReactorShim{ - Name: name, - PeerUpdates: NewPeerUpdates(make(chan PeerUpdate), 0), - Channels: channels, - } - - return rs -} - -func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { - inCh := make(chan Envelope, buf) - outCh := make(chan Envelope, buf) - errCh := make(chan PeerError, buf) - return &ChannelShim{ - Descriptor: cds.Descriptor, - Channel: NewChannel( - ChannelID(cds.Descriptor.ID), - cds.Descriptor.MessageType, - inCh, - outCh, - errCh, - ), - inCh: inCh, - outCh: outCh, - errCh: errCh, - } -} - -// GetChannels implements the legacy Reactor interface for getting a slice of all -// the supported ChannelDescriptors. -func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { - sortedChIDs := make([]ChannelID, 0, len(rs.Channels)) - for cID := range rs.Channels { - sortedChIDs = append(sortedChIDs, cID) - } - - sort.Slice(sortedChIDs, func(i, j int) bool { return sortedChIDs[i] < sortedChIDs[j] }) - - descriptors := make([]*ChannelDescriptor, len(rs.Channels)) - for i, cID := range sortedChIDs { - descriptors[i] = rs.Channels[cID].Descriptor - } - - return descriptors -} - -// GetChannel returns a p2p Channel reference for a given ChannelID. If no -// Channel exists, nil is returned. -func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { - channelShim, ok := rs.Channels[cID] - if ok { - return channelShim.Channel - } - - return nil -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index b49b096bb..e78906362 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -44,6 +44,10 @@ type Transport interface { // Close stops accepting new connections, but does not close active connections. Close() error + // AddChannelDescriptors is only part of this interface + // temporarily + AddChannelDescriptors([]*ChannelDescriptor) + // Stringer is used to display the transport, e.g. in logs. // // Without this, the logger may use reflection to access and display diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index de643bdb9..3e0281c39 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -348,9 +348,9 @@ func (c *mConnConnection) handshake( } // onReceive is a callback for MConnection received messages. -func (c *mConnConnection) onReceive(chID byte, payload []byte) { +func (c *mConnConnection) onReceive(chID ChannelID, payload []byte) { select { - case c.receiveCh <- mConnMessage{channelID: ChannelID(chID), payload: payload}: + case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: case <-c.closeCh: } } @@ -387,7 +387,7 @@ func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { case <-c.closeCh: return io.EOF default: - if ok := c.mconn.Send(byte(chID), msg); !ok { + if ok := c.mconn.Send(chID, msg); !ok { return errors.New("sending message timed out") } diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index f4d7198ed..d33438109 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -21,7 +21,7 @@ func init() { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) err := transport.Listen(p2p.Endpoint{ @@ -43,7 +43,7 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -61,7 +61,7 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -148,7 +148,7 @@ func TestMConnTransport_Listen(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 853288c59..b4161ecd6 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -117,6 +117,8 @@ func (t *MemoryTransport) String() string { return string(MemoryProtocol) } +func (t *MemoryTransport) AddChannelDescriptors([]*ChannelDescriptor) {} + // Protocols implements Transport. func (t *MemoryTransport) Protocols() []Protocol { return []Protocol{MemoryProtocol} diff --git a/internal/p2p/types.go b/internal/p2p/types.go index 388ff2253..bee99a4fe 100644 --- a/internal/p2p/types.go +++ b/internal/p2p/types.go @@ -5,3 +5,4 @@ import ( ) type ChannelDescriptor = conn.ChannelDescriptor +type ChannelID = conn.ChannelID diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 3757d028d..86a46ea63 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -39,7 +39,7 @@ var ( ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ SnapshotChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(SnapshotChannel), + ID: SnapshotChannel, MessageType: new(ssproto.Message), Priority: 6, SendQueueCapacity: 10, @@ -49,7 +49,7 @@ var ( }, ChunkChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ChunkChannel), + ID: ChunkChannel, Priority: 3, MessageType: new(ssproto.Message), SendQueueCapacity: 4, @@ -59,7 +59,7 @@ var ( }, LightBlockChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(LightBlockChannel), + ID: LightBlockChannel, MessageType: new(ssproto.Message), Priority: 5, SendQueueCapacity: 10, @@ -69,7 +69,7 @@ var ( }, ParamsChannel: { Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ParamsChannel), + ID: ParamsChannel, MessageType: new(ssproto.Message), Priority: 2, SendQueueCapacity: 10, diff --git a/node/node.go b/node/node.go index 6a66f4224..61ba25749 100644 --- a/node/node.go +++ b/node/node.go @@ -19,7 +19,6 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/proxy" rpccore "github.com/tendermint/tendermint/internal/rpc/core" sm "github.com/tendermint/tendermint/internal/state" @@ -277,7 +276,7 @@ func makeNode(cfg *config.Config, makeCloser(closers)) } - mpReactorShim, mpReactor, mp, err := createMempoolReactor( + mpReactor, mp, err := createMempoolReactor( cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { @@ -285,7 +284,7 @@ func makeNode(cfg *config.Config, } - evReactorShim, evReactor, evPool, err := createEvidenceReactor( + evReactor, evPool, err := createEvidenceReactor( cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, ) if err != nil { @@ -304,7 +303,7 @@ func makeNode(cfg *config.Config, sm.BlockExecutorWithMetrics(nodeMetrics.state), ) - csReactorShim, csReactor, csState := createConsensusReactor( + csReactor, csState := createConsensusReactor( cfg, state, blockExec, blockStore, mp, evPool, privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, peerManager, router, consensusLogger, @@ -312,7 +311,7 @@ func makeNode(cfg *config.Config, // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. - bcReactorShim, bcReactor, err := createBlockchainReactor( + bcReactor, err := createBlockchainReactor( logger, state, blockExec, blockStore, csReactor, peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, ) @@ -335,7 +334,6 @@ func makeNode(cfg *config.Config, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 ssLogger := logger.With("module", "statesync") - ssReactorShim := p2p.NewReactorShim(ssLogger, "StateSyncShim", statesync.ChannelShims) channels := makeChannelsFromShims(router, statesync.ChannelShims) peerUpdates := peerManager.Subscribe() @@ -357,16 +355,6 @@ func makeNode(cfg *config.Config, nodeMetrics.statesync, ) - // add the channel descriptors to both the transports - // FIXME: This should be removed when the legacy p2p stack is removed and - // transports can either be agnostic to channel descriptors or can be - // declared in the constructor. - transport.AddChannelDescriptors(mpReactorShim.GetChannels()) - transport.AddChannelDescriptors(bcReactorShim.GetChannels()) - transport.AddChannelDescriptors(csReactorShim.GetChannels()) - transport.AddChannelDescriptors(evReactorShim.GetChannels()) - transport.AddChannelDescriptors(ssReactorShim.GetChannels()) - // Optionally, start the pex reactor // // TODO: @@ -382,9 +370,6 @@ func makeNode(cfg *config.Config, var pexReactor service.Service - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) @@ -501,13 +486,6 @@ func makeSeedNode(cfg *config.Config, var pexReactor service.Service - // add the pex reactor - // FIXME: we add channel descriptors to both the router and the transport but only the router - // should be aware of channel info. We should remove this from transport once the legacy - // p2p stack is removed. - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { return nil, combineCloseError(err, closer) diff --git a/node/setup.go b/node/setup.go index a3491c995..fb5599dfc 100644 --- a/node/setup.go +++ b/node/setup.go @@ -196,11 +196,10 @@ func createMempoolReactor( peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { +) (service.Service, mempool.Mempool, error) { logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) channelShims := mempoolv0.GetChannelShims(cfg.Mempool) - reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) channels := makeChannelsFromShims(router, channelShims) peerUpdates := peerManager.Subscribe() @@ -231,7 +230,7 @@ func createMempoolReactor( mp.EnableTxsAvailable() } - return reactorShim, reactor, mp, nil + return reactor, mp, nil case config.MempoolV1: mp := mempoolv1.NewTxMempool( @@ -257,10 +256,10 @@ func createMempoolReactor( mp.EnableTxsAvailable() } - return reactorShim, reactor, mp, nil + return reactor, mp, nil default: - return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) + return nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) } } @@ -272,18 +271,17 @@ func createEvidenceReactor( peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) { +) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { - return nil, nil, nil, err + return nil, nil, err } logger = logger.With("module", "evidence") - reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims) evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore) if err != nil { - return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) + return nil, nil, fmt.Errorf("creating evidence pool: %w", err) } evidenceReactor := evidence.NewReactor( @@ -293,7 +291,7 @@ func createEvidenceReactor( evidencePool, ) - return reactorShim, evidenceReactor, evidencePool, nil + return evidenceReactor, evidencePool, nil } func createBlockchainReactor( @@ -306,11 +304,10 @@ func createBlockchainReactor( router *p2p.Router, blockSync bool, metrics *consensus.Metrics, -) (*p2p.ReactorShim, service.Service, error) { +) (service.Service, error) { logger = logger.With("module", "blockchain") - reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", blocksync.ChannelShims) channels := makeChannelsFromShims(router, blocksync.ChannelShims) peerUpdates := peerManager.Subscribe() @@ -320,10 +317,10 @@ func createBlockchainReactor( metrics, ) if err != nil { - return nil, nil, err + return nil, err } - return reactorShim, reactor, nil + return reactor, nil } func createConsensusReactor( @@ -340,7 +337,7 @@ func createConsensusReactor( peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *consensus.Reactor, *consensus.State) { +) (*consensus.Reactor, *consensus.State) { consensusState := consensus.NewState( cfg.Consensus, @@ -356,8 +353,6 @@ func createConsensusReactor( consensusState.SetPrivValidator(privValidator) } - reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", consensus.ChannelShims) - var ( channels map[p2p.ChannelID]*p2p.Channel peerUpdates *p2p.PeerUpdates @@ -382,7 +377,7 @@ func createConsensusReactor( // consensusReactor will set it on consensusState and blockExecutor. reactor.SetEventBus(eventBus) - return reactorShim, reactor, consensusState + return reactor, consensusState } func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { From cbe6ad6cd5261963b0d05722f5ba0c549130d96f Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 15 Oct 2021 13:03:10 -0400 Subject: [PATCH 103/174] p2p: flatten channel descriptor (#7132) --- internal/blocksync/reactor.go | 18 ++++---- internal/blocksync/reactor_test.go | 2 +- internal/consensus/reactor.go | 72 +++++++++++++---------------- internal/consensus/reactor_test.go | 4 +- internal/evidence/reactor.go | 16 +++---- internal/evidence/reactor_test.go | 2 +- internal/mempool/v0/reactor.go | 18 ++++---- internal/mempool/v0/reactor_test.go | 2 +- internal/mempool/v1/reactor.go | 18 ++++---- internal/mempool/v1/reactor_test.go | 2 +- internal/p2p/p2p_test.go | 2 +- internal/p2p/p2ptest/network.go | 12 ++--- internal/p2p/pex/reactor.go | 7 +-- internal/p2p/pqueue.go | 6 +-- internal/p2p/pqueue_test.go | 2 +- internal/p2p/router.go | 8 ++-- internal/p2p/router_test.go | 2 +- internal/p2p/shim.go | 9 ---- internal/statesync/reactor.go | 66 ++++++++++++-------------- node/node.go | 11 +++-- 20 files changed, 124 insertions(+), 155 deletions(-) delete mode 100644 internal/p2p/shim.go diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 86aaf79d3..eebfda764 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -27,16 +27,14 @@ var ( // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - BlockSyncChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: BlockSyncChannel, - MessageType: new(bcproto.Message), - Priority: 5, - SendQueueCapacity: 1000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: MaxMsgSize, - }, + ChannelShims = []*p2p.ChannelDescriptor{ + { + ID: BlockSyncChannel, + MessageType: new(bcproto.Message), + Priority: 5, + SendQueueCapacity: 1000, + RecvBufferCapacity: 1024, + RecvMessageCapacity: MaxMsgSize, }, } ) diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 59889eec4..568b928a5 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -65,7 +65,7 @@ func setup( blockSync: true, } - chDesc := p2p.ChannelDescriptor{ID: BlockSyncChannel} + chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel} rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) i := 0 diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index dde36306c..eb8345b72 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -29,49 +29,41 @@ var ( // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - StateChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: StateChannel, - MessageType: new(tmcons.Message), - Priority: 8, - SendQueueCapacity: 64, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 128, - }, + ChannelShims = []*p2p.ChannelDescriptor{ + { + ID: StateChannel, + MessageType: new(tmcons.Message), + Priority: 8, + SendQueueCapacity: 64, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 128, }, - DataChannel: { - Descriptor: &p2p.ChannelDescriptor{ - // TODO: Consider a split between gossiping current block and catchup - // stuff. Once we gossip the whole block there is nothing left to send - // until next height or round. - ID: DataChannel, - MessageType: new(tmcons.Message), - Priority: 12, - SendQueueCapacity: 64, - RecvBufferCapacity: 512, - RecvMessageCapacity: maxMsgSize, - }, + { + // TODO: Consider a split between gossiping current block and catchup + // stuff. Once we gossip the whole block there is nothing left to send + // until next height or round. + ID: DataChannel, + MessageType: new(tmcons.Message), + Priority: 12, + SendQueueCapacity: 64, + RecvBufferCapacity: 512, + RecvMessageCapacity: maxMsgSize, }, - VoteChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: VoteChannel, - MessageType: new(tmcons.Message), - Priority: 10, - SendQueueCapacity: 64, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - }, + { + ID: VoteChannel, + MessageType: new(tmcons.Message), + Priority: 10, + SendQueueCapacity: 64, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, }, - VoteSetBitsChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: VoteSetBitsChannel, - MessageType: new(tmcons.Message), - Priority: 5, - SendQueueCapacity: 8, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - }, + { + ID: VoteSetBitsChannel, + MessageType: new(tmcons.Message), + Priority: 5, + SendQueueCapacity: 8, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, }, } ) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 04bc3708e..246178db4 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -50,8 +50,8 @@ type reactorTestSuite struct { voteSetBitsChannels map[types.NodeID]*p2p.Channel } -func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ +func chDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ ID: chID, } } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 0cf261441..6f96b3906 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -25,15 +25,13 @@ var ( // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - EvidenceChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: EvidenceChannel, - MessageType: new(tmproto.EvidenceList), - Priority: 6, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - }, + ChannelShims = []*p2p.ChannelDescriptor{ + { + ID: EvidenceChannel, + MessageType: new(tmproto.EvidenceList), + Priority: 6, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 32, }, } ) diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index 7963ba959..50db47012 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -62,7 +62,7 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := p2p.ChannelDescriptor{ID: evidence.EvidenceChannel} + chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel} rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(tmproto.EvidenceList), diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 010f98f5d..d2a621829 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -90,7 +90,7 @@ func NewReactor( // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { +func GetChannelShims(cfg *config.MempoolConfig) []*p2p.ChannelDescriptor { largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -98,15 +98,13 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe }, } - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: mempool.MempoolChannel, - MessageType: new(protomem.Message), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - }, + return []*p2p.ChannelDescriptor{ + { + ID: mempool.MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, }, } } diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 4ae2523a1..2964018d6 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -50,7 +50,7 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: mempool.MempoolChannel} + chDesc := &p2p.ChannelDescriptor{ID: mempool.MempoolChannel} rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) for nodeID := range rts.network.Nodes { diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index 72154b4a8..e4b04c931 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -97,7 +97,7 @@ func defaultObservePanic(r interface{}) {} // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { +func GetChannelShims(cfg *config.MempoolConfig) []*p2p.ChannelDescriptor { largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -105,15 +105,13 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe }, } - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: mempool.MempoolChannel, - MessageType: new(protomem.Message), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - }, + return []*p2p.ChannelDescriptor{ + { + ID: mempool.MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, }, } } diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index 1449b20b1..bdb66f436 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -52,7 +52,7 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: mempool.MempoolChannel} + chDesc := &p2p.ChannelDescriptor{ID: mempool.MempoolChannel} rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) for nodeID := range rts.network.Nodes { diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index f5ed5706c..15e561d9b 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -14,7 +14,7 @@ import ( var ( ctx = context.Background() chID = p2p.ChannelID(1) - chDesc = p2p.ChannelDescriptor{ + chDesc = &p2p.ChannelDescriptor{ ID: chID, Priority: 5, SendQueueCapacity: 10, diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 2ed888764..dff5b6a75 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -137,7 +137,7 @@ func (n *Network) NodeIDs() []types.NodeID { // doing error checks and cleanups. func (n *Network) MakeChannels( t *testing.T, - chDesc p2p.ChannelDescriptor, + chDesc *p2p.ChannelDescriptor, messageType proto.Message, size int, ) map[types.NodeID]*p2p.Channel { @@ -153,7 +153,7 @@ func (n *Network) MakeChannels( // all the channels. func (n *Network) MakeChannelsNoCleanup( t *testing.T, - chDesc p2p.ChannelDescriptor, + chDesc *p2p.ChannelDescriptor, messageType proto.Message, size int, ) map[types.NodeID]*p2p.Channel { @@ -281,7 +281,7 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { // MakeChannel opens a channel, with automatic error handling and cleanup. On // test cleanup, it also checks that the channel is empty, to make sure // all expected messages have been asserted. -func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, +func (n *Node) MakeChannel(t *testing.T, chDesc *p2p.ChannelDescriptor, messageType proto.Message, size int) *p2p.Channel { channel, err := n.Router.OpenChannel(chDesc, messageType, size) require.NoError(t, err) @@ -297,7 +297,7 @@ func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, // caller must ensure proper cleanup of the channel. func (n *Node) MakeChannelNoCleanup( t *testing.T, - chDesc p2p.ChannelDescriptor, + chDesc *p2p.ChannelDescriptor, messageType proto.Message, size int, ) *p2p.Channel { @@ -333,8 +333,8 @@ func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { return sub } -func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ +func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ ID: chID, Priority: 5, SendQueueCapacity: 10, diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index d43d836ce..645cc19e1 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -62,13 +62,14 @@ const ( // within each reactor (as they are now) or, considering that the reactor doesn't // really need to care about the channel descriptors, if they should be housed // in the node module. -func ChannelDescriptor() conn.ChannelDescriptor { - return conn.ChannelDescriptor{ +func ChannelDescriptor() *conn.ChannelDescriptor { + return &conn.ChannelDescriptor{ ID: PexChannel, + MessageType: new(protop2p.PexMessage), Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, + RecvBufferCapacity: 128, } } diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index fd0a43db6..e0e812cf5 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -71,7 +71,7 @@ type pqScheduler struct { size uint sizes map[uint]uint // cumulative priority sizes pq *priorityQueue - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor capacity uint chPriorities map[ChannelID]uint @@ -84,12 +84,12 @@ type pqScheduler struct { func newPQScheduler( logger log.Logger, m *Metrics, - chDescs []ChannelDescriptor, + chDescs []*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { // copy each ChannelDescriptor and sort them by ascending channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) + chDescsCopy := make([]*ChannelDescriptor, len(chDescs)) copy(chDescsCopy, chDescs) sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority < chDescsCopy[j].Priority }) diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index 18f0c02e3..ffa7e39a8 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -12,7 +12,7 @@ type testMessage = gogotypes.StringValue func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 - chDescs := []ChannelDescriptor{ + chDescs := []*ChannelDescriptor{ {ID: 0x01, Priority: 1}, } pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 11d0514bb..fbf2eea6b 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -249,7 +249,7 @@ type Router struct { nodeInfo types.NodeInfo privKey crypto.PrivKey peerManager *PeerManager - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor transports []Transport connTracker connectionTracker protocolTransports map[Protocol]Transport @@ -295,7 +295,7 @@ func NewRouter( options.MaxIncomingConnectionAttempts, options.IncomingConnectionWindow, ), - chDescs: make([]ChannelDescriptor, 0), + chDescs: make([]*ChannelDescriptor, 0), transports: transports, protocolTransports: map[Protocol]Transport{}, peerManager: peerManager, @@ -354,7 +354,7 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { +func (r *Router) OpenChannel(chDesc *ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() @@ -381,7 +381,7 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message r.nodeInfo.AddChannel(uint16(chDesc.ID)) for _, t := range r.transports { - t.AddChannelDescriptors([]*ChannelDescriptor{&chDesc}) + t.AddChannelDescriptors([]*ChannelDescriptor{chDesc}) } go func() { diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 7f922c29d..7ba42b9b0 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -128,7 +128,7 @@ func TestRouter_Channel_Basic(t *testing.T) { require.Error(t, err) // Opening a different channel should work. - chDesc2 := p2p.ChannelDescriptor{ID: 2} + chDesc2 := &p2p.ChannelDescriptor{ID: 2} _, err = router.OpenChannel(chDesc2, &p2ptest.Message{}, 0) require.NoError(t, err) require.Contains(t, router.NodeInfo().Channels, byte(chDesc2.ID)) diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go deleted file mode 100644 index 8de0b71c7..000000000 --- a/internal/p2p/shim.go +++ /dev/null @@ -1,9 +0,0 @@ -package p2p - -// ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel -// and the proto.Message the new p2p Channel is responsible for handling. -// A ChannelDescriptorShim is not contained in ReactorShim, but is rather -// used to construct a ReactorShim. -type ChannelDescriptorShim struct { - Descriptor *ChannelDescriptor -} diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 86a46ea63..99cb4e6ab 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -36,46 +36,38 @@ var ( // // TODO: Remove once p2p refactor is complete. // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - SnapshotChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: SnapshotChannel, - MessageType: new(ssproto.Message), - Priority: 6, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - RecvBufferCapacity: 128, - }, + ChannelShims = []*p2p.ChannelDescriptor{ + { + ID: SnapshotChannel, + MessageType: new(ssproto.Message), + Priority: 6, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + RecvBufferCapacity: 128, }, - ChunkChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: ChunkChannel, - Priority: 3, - MessageType: new(ssproto.Message), - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - RecvBufferCapacity: 128, - }, + { + ID: ChunkChannel, + Priority: 3, + MessageType: new(ssproto.Message), + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + RecvBufferCapacity: 128, }, - LightBlockChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: LightBlockChannel, - MessageType: new(ssproto.Message), - Priority: 5, - SendQueueCapacity: 10, - RecvMessageCapacity: lightBlockMsgSize, - RecvBufferCapacity: 128, - }, + { + ID: LightBlockChannel, + MessageType: new(ssproto.Message), + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: lightBlockMsgSize, + RecvBufferCapacity: 128, }, - ParamsChannel: { - Descriptor: &p2p.ChannelDescriptor{ - ID: ParamsChannel, - MessageType: new(ssproto.Message), - Priority: 2, - SendQueueCapacity: 10, - RecvMessageCapacity: paramMsgSize, - RecvBufferCapacity: 128, - }, + { + ID: ParamsChannel, + MessageType: new(ssproto.Message), + Priority: 2, + SendQueueCapacity: 10, + RecvMessageCapacity: paramMsgSize, + RecvBufferCapacity: 128, }, } ) diff --git a/node/node.go b/node/node.go index 61ba25749..cf2d104dc 100644 --- a/node/node.go +++ b/node/node.go @@ -1092,17 +1092,18 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt // FIXME: Temporary helper function, shims should be removed. func makeChannelsFromShims( router *p2p.Router, - chShims map[p2p.ChannelID]*p2p.ChannelDescriptorShim, + chDescs []*p2p.ChannelDescriptor, ) map[p2p.ChannelID]*p2p.Channel { channels := map[p2p.ChannelID]*p2p.Channel{} - for chID, chShim := range chShims { - ch, err := router.OpenChannel(*chShim.Descriptor, chShim.Descriptor.MessageType, chShim.Descriptor.RecvBufferCapacity) + for idx := range chDescs { + chDesc := chDescs[idx] + ch, err := router.OpenChannel(chDesc, chDesc.MessageType, chDesc.RecvBufferCapacity) if err != nil { - panic(fmt.Sprintf("failed to open channel %v: %v", chID, err)) + panic(fmt.Sprintf("failed to open channel %v: %v", chDesc.ID, err)) } - channels[chID] = ch + channels[chDesc.ID] = ch } return channels From 7143f14a633cc9420c122309c252f19a2ef701e5 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 15 Oct 2021 14:00:24 -0400 Subject: [PATCH 104/174] p2p: simplify open channel interface (#7133) A fourth #7075 component patch to simplify the channel creation interface --- internal/blocksync/reactor_test.go | 4 ++-- internal/consensus/reactor_test.go | 14 ++++++------ internal/evidence/reactor_test.go | 7 ++---- internal/mempool/v0/reactor_test.go | 5 +++-- internal/mempool/v1/reactor_test.go | 6 ++---- internal/p2p/p2p_test.go | 2 ++ internal/p2p/p2ptest/network.go | 23 ++++++++------------ internal/p2p/p2ptest/require.go | 2 ++ internal/p2p/pex/reactor_test.go | 8 ++----- internal/p2p/router.go | 10 +++++---- internal/p2p/router_test.go | 33 ++++++++++++++++++----------- node/node.go | 2 +- node/setup.go | 3 +-- 13 files changed, 61 insertions(+), 58 deletions(-) diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 568b928a5..6bca8d4a9 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -65,8 +65,8 @@ func setup( blockSync: true, } - chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel} - rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} + rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) i := 0 for nodeID := range rts.network.Nodes { diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 246178db4..16fa13969 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -50,9 +50,11 @@ type reactorTestSuite struct { voteSetBitsChannels map[types.NodeID]*p2p.Channel } -func chDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { +func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { return &p2p.ChannelDescriptor{ - ID: chID, + ID: chID, + MessageType: new(tmcons.Message), + RecvBufferCapacity: size, } } @@ -67,10 +69,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), } - rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size) - rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel), new(tmcons.Message), size) - rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size) - rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size) + rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel, size)) + rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel, size)) + rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel, size)) + rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel, size)) _, cancel := context.WithCancel(context.Background()) diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index 50db47012..cf8f840ea 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -62,11 +62,8 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel} - rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, - chDesc, - new(tmproto.EvidenceList), - int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel, MessageType: new(tmproto.EvidenceList)} + rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0) idx := 0 diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 2964018d6..104e1d4bd 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -50,8 +50,9 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := &p2p.ChannelDescriptor{ID: mempool.MempoolChannel} - rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) + chDesc := GetChannelShims(config)[0] + chDesc.RecvBufferCapacity = int(chBuf) + rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc) for nodeID := range rts.network.Nodes { rts.kvstores[nodeID] = kvstore.NewApplication() diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index bdb66f436..f004e75a9 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -10,11 +10,9 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -52,8 +50,8 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := &p2p.ChannelDescriptor{ID: mempool.MempoolChannel} - rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) + chDesc := GetChannelShims(cfg.Mempool)[0] + rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) for nodeID := range rts.network.Nodes { rts.kvstores[nodeID] = kvstore.NewApplication() diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 15e561d9b..642114a1d 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -6,6 +6,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/types" ) @@ -16,6 +17,7 @@ var ( chID = p2p.ChannelID(1) chDesc = &p2p.ChannelDescriptor{ ID: chID, + MessageType: &p2ptest.Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index dff5b6a75..c808ad3e0 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -138,12 +137,10 @@ func (n *Network) NodeIDs() []types.NodeID { func (n *Network) MakeChannels( t *testing.T, chDesc *p2p.ChannelDescriptor, - messageType proto.Message, - size int, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size) + channels[node.NodeID] = node.MakeChannel(t, chDesc) } return channels } @@ -154,12 +151,10 @@ func (n *Network) MakeChannels( func (n *Network) MakeChannelsNoCleanup( t *testing.T, chDesc *p2p.ChannelDescriptor, - messageType proto.Message, - size int, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size) + channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc) } return channels } @@ -281,9 +276,11 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { // MakeChannel opens a channel, with automatic error handling and cleanup. On // test cleanup, it also checks that the channel is empty, to make sure // all expected messages have been asserted. -func (n *Node) MakeChannel(t *testing.T, chDesc *p2p.ChannelDescriptor, - messageType proto.Message, size int) *p2p.Channel { - channel, err := n.Router.OpenChannel(chDesc, messageType, size) +func (n *Node) MakeChannel( + t *testing.T, + chDesc *p2p.ChannelDescriptor, +) *p2p.Channel { + channel, err := n.Router.OpenChannel(chDesc) require.NoError(t, err) require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID)) t.Cleanup(func() { @@ -298,11 +295,8 @@ func (n *Node) MakeChannel(t *testing.T, chDesc *p2p.ChannelDescriptor, func (n *Node) MakeChannelNoCleanup( t *testing.T, chDesc *p2p.ChannelDescriptor, - messageType proto.Message, - size int, ) *p2p.Channel { - - channel, err := n.Router.OpenChannel(chDesc, messageType, size) + channel, err := n.Router.OpenChannel(chDesc) require.NoError(t, err) return channel } @@ -336,6 +330,7 @@ func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { return &p2p.ChannelDescriptor{ ID: chID, + MessageType: &Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 3598baba0..a9fc16a34 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -24,6 +24,8 @@ func RequireEmpty(t *testing.T, channels ...*p2p.Channel) { // RequireReceive requires that the given envelope is received on the channel. func RequireReceive(t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { + t.Helper() + timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks defer timer.Stop() diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 28476e67d..b7e1a01c3 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -380,9 +380,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { // NOTE: we don't assert that the channels get drained after stopping the // reactor - rts.pexChannels = rts.network.MakeChannelsNoCleanup( - t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), chBuf, - ) + rts.pexChannels = rts.network.MakeChannelsNoCleanup(t, pex.ChannelDescriptor()) idx := 0 for nodeID := range rts.network.Nodes { @@ -446,9 +444,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { }) r.network.Nodes[node.NodeID] = node nodeID := node.NodeID - r.pexChannels[nodeID] = node.MakeChannelNoCleanup( - t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), r.opts.BufferSize, - ) + r.pexChannels[nodeID] = node.MakeChannelNoCleanup(t, pex.ChannelDescriptor()) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index fbf2eea6b..6c4694624 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -354,7 +354,7 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(chDesc *ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { +func (r *Router) OpenChannel(chDesc *ChannelDescriptor) (*Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() @@ -364,9 +364,11 @@ func (r *Router) OpenChannel(chDesc *ChannelDescriptor, messageType proto.Messag } r.chDescs = append(r.chDescs, chDesc) - queue := r.queueFactory(size) - outCh := make(chan Envelope, size) - errCh := make(chan PeerError, size) + messageType := chDesc.MessageType + + queue := r.queueFactory(chDesc.RecvBufferCapacity) + outCh := make(chan Envelope, chDesc.RecvBufferCapacity) + errCh := make(chan PeerError, chDesc.RecvBufferCapacity) channel := NewChannel(id, messageType, queue.dequeue(), outCh, errCh) var wrapper Wrapper diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 7ba42b9b0..997f02a06 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -50,7 +50,7 @@ func TestRouter_Network(t *testing.T) { network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 8}) local := network.RandomNode() peers := network.Peers(local.NodeID) - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) network.Start(t) @@ -119,17 +119,18 @@ func TestRouter_Channel_Basic(t *testing.T) { }) // Opening a channel should work. - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(chDesc) require.NoError(t, err) require.Contains(t, router.NodeInfo().Channels, byte(chDesc.ID)) // Opening the same channel again should fail. - _, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + _, err = router.OpenChannel(chDesc) require.Error(t, err) // Opening a different channel should work. - chDesc2 := &p2p.ChannelDescriptor{ID: 2} - _, err = router.OpenChannel(chDesc2, &p2ptest.Message{}, 0) + chDesc2 := &p2p.ChannelDescriptor{ID: 2, MessageType: &p2ptest.Message{}} + _, err = router.OpenChannel(chDesc2) + require.NoError(t, err) require.Contains(t, router.NodeInfo().Channels, byte(chDesc2.ID)) @@ -137,7 +138,7 @@ func TestRouter_Channel_Basic(t *testing.T) { channel.Close() time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async... - channel, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err = router.OpenChannel(chDesc) require.NoError(t, err) // We should be able to send on the channel, even though there are no peers. @@ -163,9 +164,9 @@ func TestRouter_Channel_SendReceive(t *testing.T) { ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a, b, c := channels[aID], channels[bID], channels[cID] - otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9), &p2ptest.Message{}, 0) + otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9)) network.Start(t) @@ -222,7 +223,7 @@ func TestRouter_Channel_Broadcast(t *testing.T) { ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a, b, c, d := channels[aID], channels[bID], channels[cID], channels[dID] network.Start(t) @@ -250,7 +251,15 @@ func TestRouter_Channel_Wrapper(t *testing.T) { ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &wrapperMessage{}, 0) + chDesc := &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &wrapperMessage{}, + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: 10, + } + + channels := network.MakeChannels(t, chDesc) a, b := channels[aID], channels[bID] network.Start(t) @@ -310,7 +319,7 @@ func TestRouter_Channel_Error(t *testing.T) { ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a := channels[aID] // Erroring b should cause it to be disconnected. It will reconnect shortly after. @@ -897,7 +906,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { Status: p2p.PeerStatusUp, }) - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(chDesc) require.NoError(t, err) channel.Out <- p2p.Envelope{ diff --git a/node/node.go b/node/node.go index cf2d104dc..0a48bf0b7 100644 --- a/node/node.go +++ b/node/node.go @@ -1098,7 +1098,7 @@ func makeChannelsFromShims( channels := map[p2p.ChannelID]*p2p.Channel{} for idx := range chDescs { chDesc := chDescs[idx] - ch, err := router.OpenChannel(chDesc, chDesc.MessageType, chDesc.RecvBufferCapacity) + ch, err := router.OpenChannel(chDesc) if err != nil { panic(fmt.Sprintf("failed to open channel %v: %v", chDesc.ID, err)) } diff --git a/node/setup.go b/node/setup.go index fb5599dfc..fc611d946 100644 --- a/node/setup.go +++ b/node/setup.go @@ -30,7 +30,6 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" - protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -485,7 +484,7 @@ func createPEXReactor( router *p2p.Router, ) (service.Service, error) { - channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128) + channel, err := router.OpenChannel(pex.ChannelDescriptor()) if err != nil { return nil, err } From ca8f004112bc9567363c55b96ab8ee04d434ba70 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 15 Oct 2021 16:08:09 -0400 Subject: [PATCH 105/174] p2p: remove final shims from p2p package (#7136) This is, perhaps, the trival final piece of #7075 that I've been working on. There's more work to be done: - push more of the setup into the pacakges themselves - move channel-based sending/filtering out of the - simplify the buffering throuhgout the p2p stack. --- internal/blocksync/reactor.go | 33 +++++------- internal/consensus/reactor.go | 15 +++--- internal/evidence/reactor.go | 33 +++++------- internal/mempool/v0/reactor.go | 25 ++++----- internal/mempool/v0/reactor_test.go | 2 +- internal/mempool/v1/reactor.go | 25 ++++----- internal/mempool/v1/reactor_test.go | 2 +- internal/p2p/conn/connection.go | 31 ++++++----- internal/statesync/reactor.go | 81 ++++++++++++++--------------- node/node.go | 37 ++++++------- node/setup.go | 54 +++++++++++++------ 11 files changed, 158 insertions(+), 180 deletions(-) diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index eebfda764..43c3e83cd 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -17,27 +17,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = []*p2p.ChannelDescriptor{ - { - ID: BlockSyncChannel, - MessageType: new(bcproto.Message), - Priority: 5, - SendQueueCapacity: 1000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: MaxMsgSize, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( // BlockSyncChannel is a channel for blocks and status updates @@ -55,6 +35,17 @@ const ( syncTimeout = 60 * time.Second ) +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: BlockSyncChannel, + MessageType: new(bcproto.Message), + Priority: 5, + SendQueueCapacity: 1000, + RecvBufferCapacity: 1024, + RecvMessageCapacity: MaxMsgSize, + } +} + type consensusReactor interface { // For when we switch from block sync reactor to the consensus // machine. diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index eb8345b72..62517fd4f 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -21,15 +21,12 @@ import ( var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*tmcons.Message)(nil) +) - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = []*p2p.ChannelDescriptor{ +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptors() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ { ID: StateChannel, MessageType: new(tmcons.Message), @@ -66,7 +63,7 @@ var ( RecvMessageCapacity: maxMsgSize, }, } -) +} const ( StateChannel = p2p.ChannelID(0x20) diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 6f96b3906..c2f25bd36 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -15,26 +15,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = []*p2p.ChannelDescriptor{ - { - ID: EvidenceChannel, - MessageType: new(tmproto.EvidenceList), - Priority: 6, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( EvidenceChannel = p2p.ChannelID(0x38) @@ -48,6 +29,18 @@ const ( broadcastEvidenceIntervalS = 10 ) +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: EvidenceChannel, + MessageType: new(tmproto.EvidenceList), + Priority: 6, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 32, + } +} + // Reactor handles evpool evidence broadcasting amongst peers. type Reactor struct { service.BaseService diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index d2a621829..118321645 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -83,14 +83,9 @@ func NewReactor( return r } -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(cfg *config.MempoolConfig) []*p2p.ChannelDescriptor { +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -98,14 +93,12 @@ func GetChannelShims(cfg *config.MempoolConfig) []*p2p.ChannelDescriptor { }, } - return []*p2p.ChannelDescriptor{ - { - ID: mempool.MempoolChannel, - MessageType: new(protomem.Message), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - }, + return &p2p.ChannelDescriptor{ + ID: mempool.MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, } } diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 104e1d4bd..69582284b 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -50,7 +50,7 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := GetChannelShims(config)[0] + chDesc := GetChannelDescriptor(config) chDesc.RecvBufferCapacity = int(chBuf) rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc) diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index e4b04c931..8ef5a6bd8 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -90,14 +90,9 @@ func NewReactor( func defaultObservePanic(r interface{}) {} -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(cfg *config.MempoolConfig) []*p2p.ChannelDescriptor { +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -105,14 +100,12 @@ func GetChannelShims(cfg *config.MempoolConfig) []*p2p.ChannelDescriptor { }, } - return []*p2p.ChannelDescriptor{ - { - ID: mempool.MempoolChannel, - MessageType: new(protomem.Message), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - }, + return &p2p.ChannelDescriptor{ + ID: mempool.MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, } } diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index f004e75a9..56e6057a1 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -50,7 +50,7 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := GetChannelShims(cfg.Mempool)[0] + chDesc := GetChannelDescriptor(cfg.Mempool) rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) for nodeID := range rts.network.Nodes { diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 1e149a2e5..a99e83dc5 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -81,8 +81,8 @@ type MConnection struct { recvMonitor *flow.Monitor send chan struct{} pong chan struct{} - channels []*Channel - channelsIdx map[ChannelID]*Channel + channels []*channel + channelsIdx map[ChannelID]*channel onReceive receiveCbFunc onError errorCbFunc errored uint32 @@ -186,8 +186,8 @@ func NewMConnectionWithConfig( } // Create channels - var channelsIdx = map[ChannelID]*Channel{} - var channels = []*Channel{} + var channelsIdx = map[ChannelID]*channel{} + var channels = []*channel{} for _, desc := range chDescs { channel := newChannel(mconn, *desc) @@ -436,7 +436,7 @@ func (c *MConnection) sendPacketMsg() bool { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 - var leastChannel *Channel + var leastChannel *channel for _, channel := range c.channels { // If nothing to send, skip this channel if !channel.isSendPending() { @@ -639,9 +639,8 @@ func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { return } -// TODO: lowercase. // NOTE: not goroutine-safe. -type Channel struct { +type channel struct { // Exponential moving average. // This field must be accessed atomically. // It is first in the struct to ensure correct alignment. @@ -660,12 +659,12 @@ type Channel struct { Logger log.Logger } -func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { +func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { desc = desc.FillDefaults() if desc.Priority <= 0 { panic("Channel default priority must be a positive integer") } - return &Channel{ + return &channel{ conn: conn, desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), @@ -674,14 +673,14 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { } } -func (ch *Channel) SetLogger(l log.Logger) { +func (ch *channel) SetLogger(l log.Logger) { ch.Logger = l } // Queues message to send to this channel. // Goroutine-safe // Times out (and returns false) after defaultSendTimeout -func (ch *Channel) sendBytes(bytes []byte) bool { +func (ch *channel) sendBytes(bytes []byte) bool { select { case ch.sendQueue <- bytes: atomic.AddInt32(&ch.sendQueueSize, 1) @@ -694,7 +693,7 @@ func (ch *Channel) sendBytes(bytes []byte) bool { // Returns true if any PacketMsgs are pending to be sent. // Call before calling nextPacketMsg() // Goroutine-safe -func (ch *Channel) isSendPending() bool { +func (ch *channel) isSendPending() bool { if len(ch.sending) == 0 { if len(ch.sendQueue) == 0 { return false @@ -706,7 +705,7 @@ func (ch *Channel) isSendPending() bool { // Creates a new PacketMsg to send. // Not goroutine-safe -func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { +func (ch *channel) nextPacketMsg() tmp2p.PacketMsg { packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} maxSize := ch.maxPacketMsgPayloadSize packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] @@ -723,7 +722,7 @@ func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { // Writes next PacketMsg to w and updates c.recentlySent. // Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { +func (ch *channel) writePacketMsgTo(w io.Writer) (n int, err error) { packet := ch.nextPacketMsg() n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) atomic.AddInt64(&ch.recentlySent, int64(n)) @@ -733,7 +732,7 @@ func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { // Handles incoming PacketMsgs. It returns a message bytes if message is // complete, which is owned by the caller and will not be modified. // Not goroutine-safe -func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { +func (ch *channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) if recvCap < recvReceived { @@ -750,7 +749,7 @@ func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { // Call this periodically to update stats for throttling purposes. // Not goroutine-safe -func (ch *Channel) updateStats() { +func (ch *channel) updateStats() { // Exponential decay of stats. // TODO: optimize. atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 99cb4e6ab..939fb409c 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -28,48 +28,6 @@ import ( var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*ssproto.Message)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = []*p2p.ChannelDescriptor{ - { - ID: SnapshotChannel, - MessageType: new(ssproto.Message), - Priority: 6, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - RecvBufferCapacity: 128, - }, - { - ID: ChunkChannel, - Priority: 3, - MessageType: new(ssproto.Message), - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - RecvBufferCapacity: 128, - }, - { - ID: LightBlockChannel, - MessageType: new(ssproto.Message), - Priority: 5, - SendQueueCapacity: 10, - RecvMessageCapacity: lightBlockMsgSize, - RecvBufferCapacity: 128, - }, - { - ID: ParamsChannel, - MessageType: new(ssproto.Message), - Priority: 2, - SendQueueCapacity: 10, - RecvMessageCapacity: paramMsgSize, - RecvBufferCapacity: 128, - }, - } ) const ( @@ -113,6 +71,45 @@ const ( maxLightBlockRequestRetries = 20 ) +func GetChannelDescriptors() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + + ID: SnapshotChannel, + MessageType: new(ssproto.Message), + Priority: 6, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: ChunkChannel, + Priority: 3, + MessageType: new(ssproto.Message), + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: LightBlockChannel, + MessageType: new(ssproto.Message), + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: lightBlockMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: ParamsChannel, + MessageType: new(ssproto.Message), + Priority: 2, + SendQueueCapacity: 10, + RecvMessageCapacity: paramMsgSize, + RecvBufferCapacity: 128, + }, + } + +} + // Metricer defines an interface used for the rpc sync info query, please see statesync.metrics // for the details. type Metricer interface { diff --git a/node/node.go b/node/node.go index 0a48bf0b7..727722279 100644 --- a/node/node.go +++ b/node/node.go @@ -303,11 +303,14 @@ func makeNode(cfg *config.Config, sm.BlockExecutorWithMetrics(nodeMetrics.state), ) - csReactor, csState := createConsensusReactor( + csReactor, csState, err := createConsensusReactor( cfg, state, blockExec, blockStore, mp, evPool, privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, peerManager, router, consensusLogger, ) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) + } // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. @@ -334,7 +337,17 @@ func makeNode(cfg *config.Config, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 ssLogger := logger.With("module", "statesync") - channels := makeChannelsFromShims(router, statesync.ChannelShims) + ssChDesc := statesync.GetChannelDescriptors() + channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc)) + for idx := range ssChDesc { + chd := ssChDesc[idx] + ch, err := router.OpenChannel(chd) + if err != nil { + return nil, err + } + + channels[ch.ID] = ch + } peerUpdates := peerManager.Subscribe() stateSyncReactor := statesync.NewReactor( @@ -1088,23 +1101,3 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt return opts } - -// FIXME: Temporary helper function, shims should be removed. -func makeChannelsFromShims( - router *p2p.Router, - chDescs []*p2p.ChannelDescriptor, -) map[p2p.ChannelID]*p2p.Channel { - - channels := map[p2p.ChannelID]*p2p.Channel{} - for idx := range chDescs { - chDesc := chDescs[idx] - ch, err := router.OpenChannel(chDesc) - if err != nil { - panic(fmt.Sprintf("failed to open channel %v: %v", chDesc.ID, err)) - } - - channels[chDesc.ID] = ch - } - - return channels -} diff --git a/node/setup.go b/node/setup.go index fc611d946..40ced410c 100644 --- a/node/setup.go +++ b/node/setup.go @@ -198,13 +198,15 @@ func createMempoolReactor( ) (service.Service, mempool.Mempool, error) { logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) - channelShims := mempoolv0.GetChannelShims(cfg.Mempool) - - channels := makeChannelsFromShims(router, channelShims) peerUpdates := peerManager.Subscribe() switch cfg.Mempool.Version { case config.MempoolV0: + ch, err := router.OpenChannel(mempoolv0.GetChannelDescriptor(cfg.Mempool)) + if err != nil { + return nil, nil, err + } + mp := mempoolv0.NewCListMempool( cfg.Mempool, proxyApp.Mempool(), @@ -221,7 +223,7 @@ func createMempoolReactor( cfg.Mempool, peerManager, mp, - channels[mempool.MempoolChannel], + ch, peerUpdates, ) @@ -232,6 +234,11 @@ func createMempoolReactor( return reactor, mp, nil case config.MempoolV1: + ch, err := router.OpenChannel(mempoolv1.GetChannelDescriptor(cfg.Mempool)) + if err != nil { + return nil, nil, err + } + mp := mempoolv1.NewTxMempool( logger, cfg.Mempool, @@ -247,7 +254,7 @@ func createMempoolReactor( cfg.Mempool, peerManager, mp, - channels[mempool.MempoolChannel], + ch, peerUpdates, ) @@ -283,9 +290,14 @@ func createEvidenceReactor( return nil, nil, fmt.Errorf("creating evidence pool: %w", err) } + ch, err := router.OpenChannel(evidence.GetChannelDescriptor()) + if err != nil { + return nil, nil, fmt.Errorf("creating evidence channel: %w", err) + } + evidenceReactor := evidence.NewReactor( logger, - makeChannelsFromShims(router, evidence.ChannelShims)[evidence.EvidenceChannel], + ch, peerManager.Subscribe(), evidencePool, ) @@ -307,12 +319,16 @@ func createBlockchainReactor( logger = logger.With("module", "blockchain") - channels := makeChannelsFromShims(router, blocksync.ChannelShims) + ch, err := router.OpenChannel(blocksync.GetChannelDescriptor()) + if err != nil { + return nil, err + } + peerUpdates := peerManager.Subscribe() reactor, err := blocksync.NewReactor( logger, state.Copy(), blockExec, blockStore, csReactor, - channels[blocksync.BlockSyncChannel], peerUpdates, blockSync, + ch, peerUpdates, blockSync, metrics, ) if err != nil { @@ -336,7 +352,7 @@ func createConsensusReactor( peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*consensus.Reactor, *consensus.State) { +) (*consensus.Reactor, *consensus.State, error) { consensusState := consensus.NewState( cfg.Consensus, @@ -352,13 +368,19 @@ func createConsensusReactor( consensusState.SetPrivValidator(privValidator) } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) + csChDesc := consensus.GetChannelDescriptors() + channels := make(map[p2p.ChannelID]*p2p.Channel, len(csChDesc)) + for idx := range csChDesc { + chd := csChDesc[idx] + ch, err := router.OpenChannel(chd) + if err != nil { + return nil, nil, err + } - channels = makeChannelsFromShims(router, consensus.ChannelShims) - peerUpdates = peerManager.Subscribe() + channels[ch.ID] = ch + } + + peerUpdates := peerManager.Subscribe() reactor := consensus.NewReactor( logger, @@ -376,7 +398,7 @@ func createConsensusReactor( // consensusReactor will set it on consensusState and blockExecutor. reactor.SetEventBus(eventBus) - return reactor, consensusState + return reactor, consensusState, nil } func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { From 19ec4a532239be8124a9ed2ed5891cefa4eef9a4 Mon Sep 17 00:00:00 2001 From: Marko Date: Mon, 18 Oct 2021 08:22:02 +0000 Subject: [PATCH 106/174] tools: clone proto files from spec (#6976) ## Description clone proto files from spec in order to have them in a single location closes https://github.com/tendermint/spec/issues/343 --- CHANGELOG_PENDING.md | 1 + CONTRIBUTING.md | 2 +- abci/types/types.pb.go | 2 +- proto/tendermint/abci/types.proto | 369 ------------------------- proto/tendermint/blocksync/types.proto | 41 --- proto/tendermint/consensus/types.proto | 92 ------ proto/tendermint/mempool/types.proto | 14 - proto/tendermint/p2p/conn.proto | 30 -- proto/tendermint/p2p/pex.proto | 37 --- proto/tendermint/p2p/types.proto | 42 --- proto/tendermint/statesync/types.proto | 62 ----- proto/tendermint/types/block.proto | 15 - proto/tendermint/types/evidence.proto | 38 --- proto/tendermint/types/params.proto | 69 ----- proto/tendermint/types/types.proto | 157 ----------- proto/tendermint/types/validator.proto | 25 -- scripts/protocgen.sh | 28 ++ 17 files changed, 31 insertions(+), 993 deletions(-) delete mode 100644 proto/tendermint/abci/types.proto delete mode 100644 proto/tendermint/blocksync/types.proto delete mode 100644 proto/tendermint/consensus/types.proto delete mode 100644 proto/tendermint/mempool/types.proto delete mode 100644 proto/tendermint/p2p/conn.proto delete mode 100644 proto/tendermint/p2p/pex.proto delete mode 100644 proto/tendermint/p2p/types.proto delete mode 100644 proto/tendermint/statesync/types.proto delete mode 100644 proto/tendermint/types/block.proto delete mode 100644 proto/tendermint/types/evidence.proto delete mode 100644 proto/tendermint/types/params.proto delete mode 100644 proto/tendermint/types/types.proto delete mode 100644 proto/tendermint/types/validator.proto diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9d8148783..33d2c89c2 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -15,6 +15,7 @@ Special thanks to external contributors on this release: - [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair). - Apps + - [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository. - P2P Protocol diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16bef07cc..e4613f84e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,7 +109,7 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. -We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. +We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`. ### Visual Studio Code diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6b00c587a..6e4b53a1d 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1838,7 +1838,7 @@ type ResponseCheckTx struct { Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto deleted file mode 100644 index 7126488d0..000000000 --- a/proto/tendermint/abci/types.proto +++ /dev/null @@ -1,369 +0,0 @@ -syntax = "proto3"; -package tendermint.abci; - -option go_package = "github.com/tendermint/tendermint/abci/types"; - -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md -import "tendermint/crypto/proof.proto"; -import "tendermint/types/types.proto"; -import "tendermint/crypto/keys.proto"; -import "tendermint/types/params.proto"; -import "google/protobuf/timestamp.proto"; -import "gogoproto/gogo.proto"; - -// This file is copied from http://github.com/tendermint/abci -// NOTE: When using custom types, mind the warnings. -// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues - -//---------------------------------------- -// Request types - -message Request { - oneof value { - RequestEcho echo = 1; - RequestFlush flush = 2; - RequestInfo info = 3; - RequestInitChain init_chain = 4; - RequestQuery query = 5; - RequestBeginBlock begin_block = 6; - RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8; - RequestEndBlock end_block = 9; - RequestCommit commit = 10; - RequestListSnapshots list_snapshots = 11; - RequestOfferSnapshot offer_snapshot = 12; - RequestLoadSnapshotChunk load_snapshot_chunk = 13; - RequestApplySnapshotChunk apply_snapshot_chunk = 14; - } -} - -message RequestEcho { - string message = 1; -} - -message RequestFlush {} - -message RequestInfo { - string version = 1; - uint64 block_version = 2; - uint64 p2p_version = 3; - string abci_version = 4; -} - -message RequestInitChain { - google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - string chain_id = 2; - tendermint.types.ConsensusParams consensus_params = 3; - repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; - bytes app_state_bytes = 5; - int64 initial_height = 6; -} - -message RequestQuery { - bytes data = 1; - string path = 2; - int64 height = 3; - bool prove = 4; -} - -message RequestBeginBlock { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; -} - -enum CheckTxType { - NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; - RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; -} - -message RequestCheckTx { - bytes tx = 1; - CheckTxType type = 2; -} - -message RequestDeliverTx { - bytes tx = 1; -} - -message RequestEndBlock { - int64 height = 1; -} - -message RequestCommit {} - -// lists available snapshots -message RequestListSnapshots {} - -// offers a snapshot to the application -message RequestOfferSnapshot { - Snapshot snapshot = 1; // snapshot offered by peers - bytes app_hash = 2; // light client-verified app hash for snapshot height -} - -// loads a snapshot chunk -message RequestLoadSnapshotChunk { - uint64 height = 1; - uint32 format = 2; - uint32 chunk = 3; -} - -// Applies a snapshot chunk -message RequestApplySnapshotChunk { - uint32 index = 1; - bytes chunk = 2; - string sender = 3; -} - -//---------------------------------------- -// Response types - -message Response { - oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseInitChain init_chain = 5; - ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7; - ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9; - ResponseEndBlock end_block = 10; - ResponseCommit commit = 11; - ResponseListSnapshots list_snapshots = 12; - ResponseOfferSnapshot offer_snapshot = 13; - ResponseLoadSnapshotChunk load_snapshot_chunk = 14; - ResponseApplySnapshotChunk apply_snapshot_chunk = 15; - } -} - -// nondeterministic -message ResponseException { - string error = 1; -} - -message ResponseEcho { - string message = 1; -} - -message ResponseFlush {} - -message ResponseInfo { - string data = 1; - - // this is the software version of the application. TODO: remove? - string version = 2; - uint64 app_version = 3; - - int64 last_block_height = 4; - bytes last_block_app_hash = 5; -} - -message ResponseInitChain { - tendermint.types.ConsensusParams consensus_params = 1; - repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; - bytes app_hash = 3; -} - -message ResponseQuery { - uint32 code = 1; - // bytes data = 2; // use "value" instead. - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 index = 5; - bytes key = 6; - bytes value = 7; - tendermint.crypto.ProofOps proof_ops = 8; - int64 height = 9; - string codespace = 10; -} - -message ResponseBeginBlock { - repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCheckTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - string codespace = 8; - string sender = 9; - int64 priority = 10; - - // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. - string mempool_error = 11; -} - -message ResponseDeliverTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic - string codespace = 8; -} - -message ResponseEndBlock { - repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; - tendermint.types.ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCommit { - // reserve 1 - bytes data = 2; - int64 retain_height = 3; -} - -message ResponseListSnapshots { - repeated Snapshot snapshots = 1; -} - -message ResponseOfferSnapshot { - Result result = 1; - - enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Snapshot accepted, apply chunks - ABORT = 2; // Abort all snapshot restoration - REJECT = 3; // Reject this specific snapshot, try others - REJECT_FORMAT = 4; // Reject all snapshots of this format, try others - REJECT_SENDER = 5; // Reject all snapshots from the sender(s), try others - } -} - -message ResponseLoadSnapshotChunk { - bytes chunk = 1; -} - -message ResponseApplySnapshotChunk { - Result result = 1; - repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply - repeated string reject_senders = 3; // Chunk senders to reject and ban - - enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Chunk successfully accepted - ABORT = 2; // Abort all snapshot restoration - RETRY = 3; // Retry chunk (combine with refetch and reject) - RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) - REJECT_SNAPSHOT = 5; // Reject this snapshot, try others - } -} - -//---------------------------------------- -// Misc. - -message LastCommitInfo { - int32 round = 1; - repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; -} - -// Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. -// Later, transactions may be queried using these events. -message Event { - string type = 1; - repeated EventAttribute attributes = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes,omitempty"]; -} - -// EventAttribute is a single key-value pair, associated with an event. -message EventAttribute { - string key = 1; - string value = 2; - bool index = 3; // nondeterministic -} - -// TxResult contains results of executing the transaction. -// -// One usage is indexing transaction results. -message TxResult { - int64 height = 1; - uint32 index = 2; - bytes tx = 3; - ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; -} - -//---------------------------------------- -// Blockchain Types - -// Validator -message Validator { - bytes address = 1; // The first 20 bytes of SHA256(public key) - // PubKey pub_key = 2 [(gogoproto.nullable)=false]; - int64 power = 3; // The voting power -} - -// ValidatorUpdate -message ValidatorUpdate { - tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; - int64 power = 2; -} - -// VoteInfo -message VoteInfo { - Validator validator = 1 [(gogoproto.nullable) = false]; - bool signed_last_block = 2; -} - -enum EvidenceType { - UNKNOWN = 0; - DUPLICATE_VOTE = 1; - LIGHT_CLIENT_ATTACK = 2; -} - -message Evidence { - EvidenceType type = 1; - // The offending validator - Validator validator = 2 [(gogoproto.nullable) = false]; - // The height when the offense occurred - int64 height = 3; - // The corresponding time where the offense occurred - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - // Total voting power of the validator set in case the ABCI application does - // not store historical validators. - // https://github.com/tendermint/tendermint/issues/4581 - int64 total_voting_power = 5; -} - -//---------------------------------------- -// State Sync Types - -message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // Number of chunks in the snapshot - bytes hash = 4; // Arbitrary snapshot hash, equal only if identical - bytes metadata = 5; // Arbitrary application metadata -} - -//---------------------------------------- -// Service Definition - -service ABCIApplication { - rpc Echo(RequestEcho) returns (ResponseEcho); - rpc Flush(RequestFlush) returns (ResponseFlush); - rpc Info(RequestInfo) returns (ResponseInfo); - rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); - rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); - rpc Query(RequestQuery) returns (ResponseQuery); - rpc Commit(RequestCommit) returns (ResponseCommit); - rpc InitChain(RequestInitChain) returns (ResponseInitChain); - rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); - rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); - rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); - rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); - rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); - rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); -} diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto deleted file mode 100644 index 8c187c793..000000000 --- a/proto/tendermint/blocksync/types.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; -package tendermint.blocksync; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; - -import "tendermint/types/block.proto"; - -// BlockRequest requests a block for a specific height -message BlockRequest { - int64 height = 1; -} - -// NoBlockResponse informs the node that the peer does not have block at the requested height -message NoBlockResponse { - int64 height = 1; -} - -// BlockResponse returns block to the requested -message BlockResponse { - tendermint.types.Block block = 1; -} - -// StatusRequest requests the status of a peer. -message StatusRequest { -} - -// StatusResponse is a peer response to inform their status. -message StatusResponse { - int64 height = 1; - int64 base = 2; -} - -message Message { - oneof sum { - BlockRequest block_request = 1; - NoBlockResponse no_block_response = 2; - BlockResponse block_response = 3; - StatusRequest status_request = 4; - StatusResponse status_response = 5; - } -} diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto deleted file mode 100644 index 6e1f41371..000000000 --- a/proto/tendermint/consensus/types.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; -package tendermint.consensus; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/libs/bits/types.proto"; - -// NewRoundStep is sent for every step taken in the ConsensusState. -// For every height/round/step transition -message NewRoundStep { - int64 height = 1; - int32 round = 2; - uint32 step = 3; - int64 seconds_since_start_time = 4; - int32 last_commit_round = 5; -} - -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. -// In case the block is also committed, then IsCommit flag is set to true. -message NewValidBlock { - int64 height = 1; - int32 round = 2; - tendermint.types.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray block_parts = 4; - bool is_commit = 5; -} - -// Proposal is sent when a new block is proposed. -message Proposal { - tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; -} - -// ProposalPOL is sent when a previous proposal is re-proposed. -message ProposalPOL { - int64 height = 1; - int32 proposal_pol_round = 2; - tendermint.libs.bits.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; -} - -// BlockPart is sent when gossipping a piece of the proposed block. -message BlockPart { - int64 height = 1; - int32 round = 2; - tendermint.types.Part part = 3 [(gogoproto.nullable) = false]; -} - -// Vote is sent when voting for a proposal (or lack thereof). -message Vote { - tendermint.types.Vote vote = 1; -} - -// HasVote is sent to indicate that a particular vote has been received. -message HasVote { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - int32 index = 4; -} - -// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. -message VoteSetMaj23 { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; -} - -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. -message VoteSetBits { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray votes = 5 [(gogoproto.nullable) = false]; -} - -message Message { - oneof sum { - NewRoundStep new_round_step = 1; - NewValidBlock new_valid_block = 2; - Proposal proposal = 3; - ProposalPOL proposal_pol = 4; - BlockPart block_part = 5; - Vote vote = 6; - HasVote has_vote = 7; - VoteSetMaj23 vote_set_maj23 = 8; - VoteSetBits vote_set_bits = 9; - } -} diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto deleted file mode 100644 index b55d9717b..000000000 --- a/proto/tendermint/mempool/types.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; -package tendermint.mempool; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/mempool"; - -message Txs { - repeated bytes txs = 1; -} - -message Message { - oneof sum { - Txs txs = 1; - } -} diff --git a/proto/tendermint/p2p/conn.proto b/proto/tendermint/p2p/conn.proto deleted file mode 100644 index b12de6c82..000000000 --- a/proto/tendermint/p2p/conn.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; -import "tendermint/crypto/keys.proto"; - -message PacketPing {} - -message PacketPong {} - -message PacketMsg { - int32 channel_id = 1 [(gogoproto.customname) = "ChannelID"]; - bool eof = 2 [(gogoproto.customname) = "EOF"]; - bytes data = 3; -} - -message Packet { - oneof sum { - PacketPing packet_ping = 1; - PacketPong packet_pong = 2; - PacketMsg packet_msg = 3; - } -} - -message AuthSigMessage { - tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; - bytes sig = 2; -} diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto deleted file mode 100644 index 1f78c9864..000000000 --- a/proto/tendermint/p2p/pex.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; - -message PexAddress { - string id = 1 [(gogoproto.customname) = "ID"]; - string ip = 2 [(gogoproto.customname) = "IP"]; - uint32 port = 3; -} - -message PexRequest {} - -message PexResponse { - repeated PexAddress addresses = 1 [(gogoproto.nullable) = false]; -} - -message PexAddressV2 { - string url = 1 [(gogoproto.customname) = "URL"]; -} - -message PexRequestV2 {} - -message PexResponseV2 { - repeated PexAddressV2 addresses = 1 [(gogoproto.nullable) = false]; -} - -message PexMessage { - oneof sum { - PexRequest pex_request = 1; - PexResponse pex_response = 2; - PexRequestV2 pex_request_v2 = 3; - PexResponseV2 pex_response_v2 = 4; - } -} diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto deleted file mode 100644 index 216a6d8d0..000000000 --- a/proto/tendermint/p2p/types.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; - -message ProtocolVersion { - uint64 p2p = 1 [(gogoproto.customname) = "P2P"]; - uint64 block = 2; - uint64 app = 3; -} - -message NodeInfo { - ProtocolVersion protocol_version = 1 [(gogoproto.nullable) = false]; - string node_id = 2 [(gogoproto.customname) = "NodeID"]; - string listen_addr = 3; - string network = 4; - string version = 5; - bytes channels = 6; - string moniker = 7; - NodeInfoOther other = 8 [(gogoproto.nullable) = false]; -} - -message NodeInfoOther { - string tx_index = 1; - string rpc_address = 2 [(gogoproto.customname) = "RPCAddress"]; -} - -message PeerInfo { - string id = 1 [(gogoproto.customname) = "ID"]; - repeated PeerAddressInfo address_info = 2; - google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true]; -} - -message PeerAddressInfo { - string address = 1; - google.protobuf.Timestamp last_dial_success = 2 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_dial_failure = 3 [(gogoproto.stdtime) = true]; - uint32 dial_failures = 4; -} diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto deleted file mode 100644 index fcfd05f68..000000000 --- a/proto/tendermint/statesync/types.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; -package tendermint.statesync; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/params.proto"; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; - -message Message { - oneof sum { - SnapshotsRequest snapshots_request = 1; - SnapshotsResponse snapshots_response = 2; - ChunkRequest chunk_request = 3; - ChunkResponse chunk_response = 4; - LightBlockRequest light_block_request = 5; - LightBlockResponse light_block_response = 6; - ParamsRequest params_request = 7; - ParamsResponse params_response = 8; - } -} - -message SnapshotsRequest {} - -message SnapshotsResponse { - uint64 height = 1; - uint32 format = 2; - uint32 chunks = 3; - bytes hash = 4; - bytes metadata = 5; -} - -message ChunkRequest { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; -} - -message ChunkResponse { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; - bytes chunk = 4; - bool missing = 5; -} - -message LightBlockRequest { - uint64 height = 1; -} - -message LightBlockResponse { - tendermint.types.LightBlock light_block = 1; -} - -message ParamsRequest { - uint64 height = 1; -} - -message ParamsResponse { - uint64 height = 1; - tendermint.types.ConsensusParams consensus_params = 2 [(gogoproto.nullable) = false]; -} \ No newline at end of file diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto deleted file mode 100644 index 84e9bb15d..000000000 --- a/proto/tendermint/types/block.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/evidence.proto"; - -message Block { - Header header = 1 [(gogoproto.nullable) = false]; - Data data = 2 [(gogoproto.nullable) = false]; - tendermint.types.EvidenceList evidence = 3 [(gogoproto.nullable) = false]; - Commit last_commit = 4; -} diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto deleted file mode 100644 index 451b8dca3..000000000 --- a/proto/tendermint/types/evidence.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/validator.proto"; - -message Evidence { - oneof sum { - DuplicateVoteEvidence duplicate_vote_evidence = 1; - LightClientAttackEvidence light_client_attack_evidence = 2; - } -} - -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. -message DuplicateVoteEvidence { - tendermint.types.Vote vote_a = 1; - tendermint.types.Vote vote_b = 2; - int64 total_voting_power = 3; - int64 validator_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. -message LightClientAttackEvidence { - tendermint.types.LightBlock conflicting_block = 1; - int64 common_height = 2; - repeated tendermint.types.Validator byzantine_validators = 3; - int64 total_voting_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -message EvidenceList { - repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto deleted file mode 100644 index cc926b64e..000000000 --- a/proto/tendermint/types/params.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; - -option (gogoproto.equal_all) = true; - -// ConsensusParams contains consensus critical parameters that determine the -// validity of blocks. -message ConsensusParams { - BlockParams block = 1; - EvidenceParams evidence = 2; - ValidatorParams validator = 3; - VersionParams version = 4; -} - -// BlockParams contains limits on the block size. -message BlockParams { - // Max block size, in bytes. - // Note: must be greater than 0 - int64 max_bytes = 1; - // Max gas per block. - // Note: must be greater or equal to -1 - int64 max_gas = 2; -} - -// EvidenceParams determine how we handle evidence of malfeasance. -message EvidenceParams { - // Max age of evidence, in blocks. - // - // The basic formula for calculating this is: MaxAgeDuration / {average block - // time}. - int64 max_age_num_blocks = 1; - - // Max age of evidence, in time. - // - // It should correspond with an app's "unbonding period" or other similar - // mechanism for handling [Nothing-At-Stake - // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). - google.protobuf.Duration max_age_duration = 2 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; - - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. - // Default is 1048576 or 1MB - int64 max_bytes = 3; -} - -// ValidatorParams restrict the public key types validators can use. -// NOTE: uses ABCI pubkey naming, not Amino names. -message ValidatorParams { - repeated string pub_key_types = 1; -} - -// VersionParams contains the ABCI application version. -message VersionParams { - uint64 app_version = 1; -} - -// HashedParams is a subset of ConsensusParams. -// -// It is hashed into the Header.ConsensusHash. -message HashedParams { - int64 block_max_bytes = 1; - int64 block_max_gas = 2; -} diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto deleted file mode 100644 index 8d4f00972..000000000 --- a/proto/tendermint/types/types.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/crypto/proof.proto"; -import "tendermint/version/types.proto"; -import "tendermint/types/validator.proto"; - -// BlockIdFlag indicates which BlcokID the signature is for -enum BlockIDFlag { - option (gogoproto.goproto_enum_stringer) = true; - option (gogoproto.goproto_enum_prefix) = false; - - BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; - BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; - BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; - BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; -} - -// SignedMsgType is a type of signed message in the consensus. -enum SignedMsgType { - option (gogoproto.goproto_enum_stringer) = true; - option (gogoproto.goproto_enum_prefix) = false; - - SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; - // Votes - SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; - SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; - - // Proposals - SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; -} - -// PartsetHeader -message PartSetHeader { - uint32 total = 1; - bytes hash = 2; -} - -message Part { - uint32 index = 1; - bytes bytes = 2; - tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false]; -} - -// BlockID -message BlockID { - bytes hash = 1; - PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; -} - -// -------------------------------- - -// Header defines the structure of a Tendermint block header. -message Header { - // basic block info - tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; - string chain_id = 2 [(gogoproto.customname) = "ChainID"]; - int64 height = 3; - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - - // prev block info - BlockID last_block_id = 5 [(gogoproto.nullable) = false]; - - // hashes of block data - bytes last_commit_hash = 6; // commit from validators from the last block - bytes data_hash = 7; // transactions - - // hashes from the app output from the prev block - bytes validators_hash = 8; // validators for the current block - bytes next_validators_hash = 9; // validators for the next block - bytes consensus_hash = 10; // consensus params for current block - bytes app_hash = 11; // state after txs from the previous block - bytes last_results_hash = 12; // root hash of all results from the txs from the previous block - - // consensus info - bytes evidence_hash = 13; // evidence included in the block - bytes proposer_address = 14; // original proposer of the block -} - -// Data contains the set of transactions included in the block -message Data { - // Txs that will be applied by state @ block.Height+1. - // NOTE: not all txs here are valid. We're just agreeing on the order first. - // This means that block.AppHash does not include these txs. - repeated bytes txs = 1; -} - -// Vote represents a prevote, precommit, or commit vote from validators for -// consensus. -message Vote { - SignedMsgType type = 1; - int64 height = 2; - int32 round = 3; - BlockID block_id = 4 - [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. - google.protobuf.Timestamp timestamp = 5 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes validator_address = 6; - int32 validator_index = 7; - bytes signature = 8; -} - -// Commit contains the evidence that a block was committed by a set of validators. -message Commit { - int64 height = 1; - int32 round = 2; - BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; - repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; -} - -// CommitSig is a part of the Vote included in a Commit. -message CommitSig { - BlockIDFlag block_id_flag = 1; - bytes validator_address = 2; - google.protobuf.Timestamp timestamp = 3 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 4; -} - -message Proposal { - SignedMsgType type = 1; - int64 height = 2; - int32 round = 3; - int32 pol_round = 4; - BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - google.protobuf.Timestamp timestamp = 6 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 7; -} - -message SignedHeader { - Header header = 1; - Commit commit = 2; -} - -message LightBlock { - SignedHeader signed_header = 1; - tendermint.types.ValidatorSet validator_set = 2; -} - -message BlockMeta { - BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - int64 block_size = 2; - Header header = 3 [(gogoproto.nullable) = false]; - int64 num_txs = 4; -} - -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. -message TxProof { - bytes root_hash = 1; - bytes data = 2; - tendermint.crypto.Proof proof = 3; -} diff --git a/proto/tendermint/types/validator.proto b/proto/tendermint/types/validator.proto deleted file mode 100644 index 49860b96d..000000000 --- a/proto/tendermint/types/validator.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "tendermint/crypto/keys.proto"; - -message ValidatorSet { - repeated Validator validators = 1; - Validator proposer = 2; - int64 total_voting_power = 3; -} - -message Validator { - bytes address = 1; - tendermint.crypto.PublicKey pub_key = 2 [(gogoproto.nullable) = false]; - int64 voting_power = 3; - int64 proposer_priority = 4; -} - -message SimpleValidator { - tendermint.crypto.PublicKey pub_key = 1; - int64 voting_power = 2; -} diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 1b84c2c56..cd205eee8 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -1,7 +1,35 @@ #!/usr/bin/env bash +VERS=0.7.1 + set -eo pipefail +# Edit this line to clone your branch, if you are modifying protobuf files +curl -qL "https://github.com/tendermint/spec/archive/refs/tags/v${VERS}.tar.gz" | tar -xjf - spec-"$VERS"/proto/ + +cp -r ./spec-"$VERS"/proto/tendermint/** ./proto/tendermint + buf generate --path proto/tendermint mv ./proto/tendermint/abci/types.pb.go ./abci/types + +echo "proto files have been generated" + +echo "removing copied files" + +rm -rf ./proto/tendermint/abci/types.proto +rm -rf ./proto/tendermint/blocksync/types.proto +rm -rf ./proto/tendermint/consensus/types.proto +rm -rf ./proto/tendermint/mempool/types.proto +rm -rf ./proto/tendermint/p2p/types.proto +rm -rf ./proto/tendermint/p2p/conn.proto +rm -rf ./proto/tendermint/p2p/pex.proto +rm -rf ./proto/tendermint/statesync/types.proto +rm -rf ./proto/tendermint/types/block.proto +rm -rf ./proto/tendermint/types/evidence.proto +rm -rf ./proto/tendermint/types/params.proto +rm -rf ./proto/tendermint/types/types.proto +rm -rf ./proto/tendermint/types/validator.proto +rm -rf ./proto/tendermint/version/version.proto + +rm -rf ./spec-"$VERS" From 0408888a5edd6a128a8f6d4dcd3e0eac0fcfe7f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Oct 2021 07:47:36 +0000 Subject: [PATCH 107/174] build(deps): Bump actions/checkout from 2.3.4 to 2.3.5 (#7139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/checkout](https://github.com/actions/checkout) from 2.3.4 to 2.3.5.
Release notes

Sourced from actions/checkout's releases.

v2.3.5

Update dependencies

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=2.3.4&new-version=2.3.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/coverage.yml | 8 ++++---- .github/workflows/docker.yml | 2 +- .github/workflows/e2e-nightly-34x.yml | 2 +- .github/workflows/e2e-nightly-35x.yml | 2 +- .github/workflows/e2e-nightly-master.yml | 2 +- .github/workflows/e2e.yml | 2 +- .github/workflows/fuzz-nightly.yml | 2 +- .github/workflows/jepsen.yml | 2 +- .github/workflows/linkchecker.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/linter.yml | 2 +- .github/workflows/proto-docker.yml | 2 +- .github/workflows/proto.yml | 4 ++-- .github/workflows/release.yml | 2 +- .github/workflows/tests.yml | 6 +++--- 15 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 4a3b89074..e8f5a600b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -12,7 +12,7 @@ jobs: split-test-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -47,7 +47,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -70,7 +70,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -99,7 +99,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index e773526fd..606d66cdb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Prepare id: prep run: | diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index d43bff12f..4b6fc8c93 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 with: ref: 'v0.34.x' diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index b3acdf62b..14662bbea 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 with: ref: 'v0.35.x' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index da8b07d70..0d5dbb830 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -24,7 +24,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 134ae979c..e061afc5e 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 38ca6896d..9e67aed53 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -17,7 +17,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Install go-fuzz working-directory: test/fuzz diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 0e358af6e..2266e5ceb 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.3.5 with: repository: 'tendermint/jepsen' diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index c97b22cd1..14998c136 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -6,7 +6,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3e257e47c..948a10951 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 953170c59..afb04132d 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.3.5 - name: Lint Code Base uses: docker://github/super-linter:v3 env: diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml index ee26bd111..ddb189b4b 100644 --- a/.github/workflows/proto-docker.yml +++ b/.github/workflows/proto-docker.yml @@ -16,7 +16,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Prepare id: prep run: | diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml index 43a1972ec..ed73f07d7 100644 --- a/.github/workflows/proto.yml +++ b/.github/workflows/proto.yml @@ -11,13 +11,13 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 4 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: lint run: make proto-lint proto-breakage: runs-on: ubuntu-latest timeout-minutes: 4 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: check-breakage run: make proto-check-breaking-ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 567a607ca..dcf4f957a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.3.5 with: fetch-depth: 0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index abb1e848e..176a686aa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -50,7 +50,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -81,7 +81,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | From b0130c88fb1aa26242b1aa5d19421bd1f5e51988 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 19 Oct 2021 09:30:52 -0400 Subject: [PATCH 108/174] mempool: remove panic when recheck-tx was not sent to ABCI application (#7134) This pull request fixes a panic that exists in both mempools. The panic occurs when the ABCI client misses a response from the ABCI application. This happen when the ABCI client drops the request as a result of a full client queue. The fix here was to loop through the ordered list of recheck-tx in the callback until one matches the currently observed recheck request. --- internal/mempool/v0/clist_mempool.go | 34 +++++- internal/mempool/v0/clist_mempool_test.go | 53 +++++++++ internal/mempool/v1/mempool.go | 133 +++++++++++++--------- 3 files changed, 163 insertions(+), 57 deletions(-) diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 7816730c1..0a12c7000 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "sync" "sync/atomic" @@ -450,12 +449,35 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { case *abci.Response_CheckTx: tx := req.GetCheckTx().Tx memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(tx, memTx.tx) { - panic(fmt.Sprintf( - "Unexpected tx response from proxy during recheck\nExpected %X, got %X", - memTx.tx, - tx)) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, memTx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + mem.logger.Error( + "re-CheckTx transaction mismatch", + "got", types.Tx(tx), + "expected", memTx.tx, + ) + + if mem.recheckCursor == mem.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + mem.recheckCursor = nil + return + } + + mem.recheckCursor = mem.recheckCursor.Next() + memTx = mem.recheckCursor.Value.(*mempoolTx) } + var postCheckErr error if mem.postCheck != nil { postCheckErr = mem.postCheck(tx, r.CheckTx) diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index b61a8333e..a5d6ae8da 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -13,9 +13,11 @@ import ( "github.com/gogo/protobuf/proto" gogotypes "github.com/gogo/protobuf/types" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" abciclient "github.com/tendermint/tendermint/abci/client" + abciclimocks "github.com/tendermint/tendermint/abci/client/mocks" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" @@ -214,6 +216,57 @@ func TestMempoolUpdate(t *testing.T) { } } +func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { + var callback abciclient.Callback + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + + mockClient.On("Error").Return(nil).Times(4) + mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil) + mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) + + cc := func() (abciclient.Client, error) { + return mockClient, nil + } + + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. + txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} + for _, tx := range txs { + reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx})) + reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK}) + // SetDone allows the ReqRes to process its callback synchronously. + // This simulates the Response being ready for the client immediately. + reqRes.SetDone() + + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) + err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + require.NoError(t, err) + } + + // Calling update to remove the first transaction from the mempool. + // This call also triggers the mempool to recheck its remaining transactions. + err := mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.Nil(t, err) + + // The mempool has now sent its requests off to the client to be rechecked + // and is waiting for the corresponding callbacks to be called. + // We now call the mempool-supplied callback on the first and third transaction. + // This simulates the client dropping the second request. + // Previous versions of this code panicked when the ABCI application missed + // a recheck-tx request. + resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK} + req := abci.RequestCheckTx{Tx: txs[1]} + callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) + + req = abci.RequestCheckTx{Tx: txs[3]} + callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) + mockClient.AssertExpectations(t) +} + func TestMempool_KeepInvalidTxsInCache(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index a12fbc51b..d78d9d287 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "reflect" "sync/atomic" "time" @@ -639,58 +640,88 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) txmp.metrics.RecheckTimes.Add(1) checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if ok { - tx := req.GetCheckTx().Tx - wtx := txmp.recheckCursor.Value.(*WrappedTx) - if !bytes.Equal(tx, wtx.tx) { - panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), types.Tx(tx).Key())) - } - - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, checkTxRes.CheckTx) - } - - if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { - wtx.priority = checkTxRes.CheckTx.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", checkTxRes.CheckTx.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } - } - - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() - } - - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) + if !ok { + txmp.logger.Error("received incorrect type in mempool callback", + "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), + "got", reflect.TypeOf(res.Value).Name(), + ) + return } + tx := req.GetCheckTx().Tx + wtx := txmp.recheckCursor.Value.(*WrappedTx) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, wtx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + txmp.logger.Error( + "re-CheckTx transaction mismatch", + "got", wtx.tx.Hash(), + "expected", types.Tx(tx).Key(), + ) + + if txmp.recheckCursor == txmp.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + txmp.recheckCursor = nil + return + } + + txmp.recheckCursor = txmp.recheckCursor.Next() + wtx = txmp.recheckCursor.Value.(*WrappedTx) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes.CheckTx) + } + + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + wtx.priority = checkTxRes.CheckTx.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.CheckTx.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") + } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) + } + } + + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() + } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) } // updateReCheckTxs updates the recheck cursors by using the gossipIndex. For From f7f4067968a0795414399c410592f3e9bc6ccaa0 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 19 Oct 2021 15:32:13 -0700 Subject: [PATCH 109/174] pubsub: simplify and improve server concurrency handling (#7070) Rework the internal plumbing of the server. This change does not modify the exported interfaces or semantics of the package, and all the existing tests still pass. The main changes here are to: - Simplify the interface for subscription indexing with a typed index rather than a single nested map. - Ensure orderly shutdown of channels, so that there is no longer a dynamic race with concurrent publishers & subscribers at shutdown. - Remove a layer of indirection between publishers and subscribers. This mainly helps legibility. - Remove order dependencies between registration and delivery. - Add documentation comments where they seemed helpful, and clarified the existing comments where it was practical. Although performance was not a primary goal of this change, the simplifications did very slightly reduce memory use and increase throughput on the existing benchmarks, though the delta is not statistically significant. BENCHMARK BEFORE AFTER SPEEDUP (%) B/op (B) B/op (A) Benchmark10Clients-12 5947 5566 6.4 2017 1942 Benchmark100Clients-12 6111 5762 5.7 1992 1910 Benchmark1000Clients-12 6983 6344 9.2 2046 1959 --- libs/pubsub/pubsub.go | 540 ++++++++++++++---------------------- libs/pubsub/subindex.go | 113 ++++++++ libs/pubsub/subscription.go | 4 +- 3 files changed, 323 insertions(+), 334 deletions(-) create mode 100644 libs/pubsub/subindex.go diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 68d1ec941..1ae111f63 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -1,14 +1,12 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). +// Package pubsub implements an event dispatching server with a single publisher +// and multiple subscriber clients. Multiple goroutines can safely publish to a +// single Server instance. // -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. +// Clients register subscriptions with a query to select which messages they +// wish to receive. When messages are published, they are broadcast to all +// clients whose subscription query matches that message. Queries are +// constructed using the github.com/tendermint/tendermint/libs/pubsub/query +// package. // // Example: // @@ -16,7 +14,7 @@ // if err != nil { // return err // } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) +// ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) // defer cancel() // subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) // if err != nil { @@ -38,22 +36,12 @@ import ( "context" "errors" "fmt" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/service" ) -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - var ( // ErrSubscriptionNotFound is returned when a client tries to unsubscribe // from not existing subscription. @@ -62,6 +50,10 @@ var ( // ErrAlreadySubscribed is returned when a client tries to subscribe twice or // more using the same query. ErrAlreadySubscribed = errors.New("already subscribed") + + // ErrServerStopped is returned when attempting to publish or subscribe to a + // server that has been stopped. + ErrServerStopped = errors.New("pubsub server is stopped") ) // Query defines an interface for a query to be used for subscribing. A query @@ -75,17 +67,21 @@ type Query interface { String() string } +// UnsubscribeArgs are the parameters to remove a subscription. +// The subscriber ID must be populated, and at least one of the client ID or +// the registered query. type UnsubscribeArgs struct { - ID string - Subscriber string - Query Query + Subscriber string // subscriber ID chosen by the client (required) + ID string // subscription ID (assigned by the server) + Query Query // the query registered with the subscription } +// Validate returns nil if args are valid to identify a subscription to remove. +// Otherwise, it reports an error. func (args UnsubscribeArgs) Validate() error { if args.Subscriber == "" { return errors.New("must specify a subscriber") } - if args.ID == "" && args.Query == nil { return fmt.Errorf("subscription is not fully defined [subscriber=%q]", args.Subscriber) } @@ -93,35 +89,28 @@ func (args UnsubscribeArgs) Validate() error { return nil } -type cmd struct { - op operation - - // subscribe, unsubscribe - query Query - subscription *Subscription - clientID string - - // publish - msg interface{} - events []types.Event -} - // Server allows clients to subscribe/unsubscribe for messages, publishing // messages with or without events, and manages internal state. type Server struct { service.BaseService - cmds chan cmd - cmdsCap int + queue chan item + done <-chan struct{} // closed when server should exit + stop func() // signal the server to exit + pubs sync.RWMutex // excl: shutdown; shared: active publisher + exited chan struct{} // server exited - // check if we have subscription before - // subscribing or unsubscribing - mtx tmsync.RWMutex + // All subscriptions currently known. + // Lock exclusive to add, remove, or cancel subscriptions. + // Lock shared to look up or publish to subscriptions. + subs struct { + sync.RWMutex + index *subIndex + } - // subscriber -> [query->id (string) OR id->query (string))], - // track connections both by ID (new) and query (legacy) to - // avoid breaking the interface. - subscriptions map[string]map[string]string + // TODO(creachadair): Rework the options so that this does not need to live + // as a field. It is not otherwise needed. + queueCap int } // Option sets a parameter for the server. @@ -131,37 +120,34 @@ type Option func(*Server) // for a detailed description of how to configure buffering. If no options are // provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]string), + s := new(Server) + for _, opt := range options { + opt(s) } s.BaseService = *service.NewBaseService(nil, "PubSub", s) - for _, option := range options { - option(s) - } + // The queue receives items to be published. + s.queue = make(chan item, s.queueCap) - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) + // The index tracks subscriptions by ID and query terms. + s.subs.index = newSubIndex() return s } -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). +// BufferCapacity allows you to specify capacity for publisher's queue. This +// is the number of messages that can be published without blocking. If no +// buffer is specified, publishing is synchronous with delivery. This function +// will panic if cap < 0. func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap - } + if cap < 0 { + panic("negative buffer capacity") } + return func(s *Server) { s.queueCap = cap } } -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} +// BufferCapacity returns capacity of the publication queue. +func (s *Server) BufferCapacity() int { return cap(s.queue) } // Subscribe creates a subscription for the given client. // @@ -195,331 +181,223 @@ func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query } func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if ok { + s.subs.Lock() + defer s.subs.Unlock() + + if s.subs.index == nil { + return nil, ErrServerStopped + } else if s.subs.index.contains(clientID, query.String()) { return nil, ErrAlreadySubscribed } - subscription := NewSubscription(outCapacity) - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]string) - } - s.subscriptions[clientID][query.String()] = subscription.id - s.subscriptions[clientID][subscription.id] = query.String() - s.mtx.Unlock() - return subscription, nil - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.Quit(): - return nil, nil - } + sub := NewSubscription(outCapacity) + s.subs.index.add(&subInfo{ + clientID: clientID, + query: query, + subID: sub.id, + sub: sub, + }) + return sub, nil } -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. +// Unsubscribe removes the subscription for the given client and/or query. It +// returns ErrSubscriptionNotFound if no such subscription exists. func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { if err := args.Validate(); err != nil { return err } - var qs string - - if args.Query != nil { - qs = args.Query.String() + s.subs.Lock() + defer s.subs.Unlock() + if s.subs.index == nil { + return ErrServerStopped } - clientSubscriptions, err := func() (map[string]string, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() + // TODO(creachadair): Do we need to support unsubscription for an "empty" + // query? I believe that case is not possible by the Query grammar, but we + // should make sure. + // + // Revisit this logic once we are able to remove indexing by query. - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] - - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return nil, err - } - } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] + var evict subInfoSet + if args.Subscriber != "" { + evict = s.subs.index.findClientID(args.Subscriber) + if args.Query != nil { + evict = evict.withQuery(args.Query.String()) } - - if !ok { - return nil, ErrSubscriptionNotFound - } - - return clientSubscriptions, nil - }() - - if err != nil { - return err + } else { + evict = s.subs.index.findQuery(args.Query.String()) } - select { - case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(clientSubscriptions, args.ID) - delete(clientSubscriptions, qs) - - if len(clientSubscriptions) == 0 { - delete(s.subscriptions, args.Subscriber) - } - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { + if len(evict) == 0 { return ErrSubscriptionNotFound } + s.removeSubs(evict, ErrUnsubscribed) + return nil +} - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - defer s.mtx.Unlock() +// UnsubscribeAll removes all subscriptions for the given client ID. +// It returns ErrSubscriptionNotFound if no subscriptions exist for that client. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.subs.Lock() + defer s.subs.Unlock() - delete(s.subscriptions, clientID) - - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil + evict := s.subs.index.findClientID(clientID) + if len(evict) == 0 { + return ErrSubscriptionNotFound } + s.removeSubs(evict, ErrUnsubscribed) + return nil } // NumClients returns the number of clients. func (s *Server) NumClients() int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions) + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.byClient) } // NumClientSubscriptions returns the number of subscriptions the client has. func (s *Server) NumClientSubscriptions(clientID string) int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions[clientID]) / 2 + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.findClientID(clientID)) } // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithEvents(ctx, msg, []types.Event{}) + return s.publish(ctx, msg, []types.Event{}) } // PublishWithEvents publishes the given message with the set of events. The set // is matched with clients queries. If there is a match, the message is sent to // the client. func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []types.Event) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, events: events}: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } + return s.publish(ctx, msg, events) } // OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} +func (s *Server) OnStop() { s.stop() } -// NOTE: not goroutine safe -type state struct { - // query string -> client -> subscription - subscriptions map[string]map[string]*Subscription - // query string -> queryPlusRefCount - queries map[string]*queryPlusRefCount -} - -// queryPlusRefCount holds a pointer to a query and reference counter. When -// refCount is zero, query will be removed. -type queryPlusRefCount struct { - q Query - refCount int -} +// Wait implements Service.Wait by blocking until the server has exited, then +// yielding to the base service wait. +func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() } // OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - subscriptions: make(map[string]map[string]*Subscription), - queries: make(map[string]*queryPlusRefCount), - }) - return nil +func (s *Server) OnStart() error { s.run(); return nil } + +// OnReset implements Service.OnReset. It has no effect for this service. +func (s *Server) OnReset() error { return nil } + +func (s *Server) publish(ctx context.Context, data interface{}, events []types.Event) error { + s.pubs.RLock() + defer s.pubs.RUnlock() + + select { + case <-s.done: + return ErrServerStopped + case <-ctx.Done(): + return ctx.Err() + case s.queue <- item{ + Data: data, + Events: events, + }: + return nil + } } -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} +func (s *Server) run() { + // The server runs until ctx is canceled. + ctx, cancel := context.WithCancel(context.Background()) + s.done = ctx.Done() + s.stop = cancel -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query.String(), cmd.subscription.id, ErrUnsubscribed) - } else { - state.removeClient(cmd.clientID, ErrUnsubscribed) - } - case shutdown: - state.removeAll(nil) - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.subscription) - case pub: - if err := state.send(cmd.msg, cmd.events); err != nil { - s.Logger.Error("Error querying for events", "err", err) + // Shutdown monitor: When the context ends, wait for any active publish + // calls to exit, then close the queue to signal the sender to exit. + go func() { + <-ctx.Done() + s.pubs.Lock() + defer s.pubs.Unlock() + close(s.queue) + }() + + s.exited = make(chan struct{}) + go func() { + defer close(s.exited) + + // Sender: Service the queue and forward messages to subscribers. + for it := range s.queue { + if err := s.send(it.Data, it.Events); err != nil { + s.Logger.Error("Error sending event", "err", err) } } - } + // Terminate all subscribers without error before exit. + s.subs.Lock() + defer s.subs.Unlock() + for si := range s.subs.index.all { + si.sub.cancel(nil) + } + s.subs.index = nil + }() } -func (state *state) add(clientID string, q Query, subscription *Subscription) { - qStr := q.String() - - // initialize subscription for this client per query if needed - if _, ok := state.subscriptions[qStr]; !ok { - state.subscriptions[qStr] = make(map[string]*Subscription) +// removeSubs cancels and removes all the subscriptions in evict with the given +// error. The caller must hold the s.subs lock. +func (s *Server) removeSubs(evict subInfoSet, reason error) { + for si := range evict { + si.sub.cancel(reason) } - - if _, ok := state.subscriptions[subscription.id]; !ok { - state.subscriptions[subscription.id] = make(map[string]*Subscription) - } - - // create subscription - state.subscriptions[qStr][clientID] = subscription - state.subscriptions[subscription.id][clientID] = subscription - - // initialize query if needed - if _, ok := state.queries[qStr]; !ok { - state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0} - } - // increment reference counter - state.queries[qStr].refCount++ + s.subs.index.removeAll(evict) } -func (state *state) remove(clientID string, qStr, id string, reason error) { - clientSubscriptions, ok := state.subscriptions[qStr] - if !ok { - return - } - - subscription, ok := clientSubscriptions[clientID] - if !ok { - return - } - - subscription.cancel(reason) - - // remove client from query map. - // if query has no other clients subscribed, remove it. - delete(state.subscriptions[qStr], clientID) - delete(state.subscriptions[id], clientID) - if len(state.subscriptions[qStr]) == 0 { - delete(state.subscriptions, qStr) - } - - // decrease ref counter in queries - if ref, ok := state.queries[qStr]; ok { - ref.refCount-- - if ref.refCount == 0 { - // remove the query if nobody else is using it - delete(state.queries, qStr) +// send delivers the given message to all matching subscribers. An error in +// query matching stops transmission and is returned. +func (s *Server) send(data interface{}, events []types.Event) error { + // At exit, evict any subscriptions that were too slow. + evict := make(subInfoSet) + defer func() { + if len(evict) != 0 { + s.subs.Lock() + defer s.subs.Unlock() + s.removeSubs(evict, ErrOutOfCapacity) } - } -} + }() -func (state *state) removeClient(clientID string, reason error) { - seen := map[string]struct{}{} - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[clientID]; ok { - if _, ok = seen[sub.id]; ok { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - state.remove(clientID, qStr, sub.id, reason) - seen[sub.id] = struct{}{} - } - } -} + // N.B. Order is important here. We must acquire and defer the lock release + // AFTER deferring the eviction cleanup: The cleanup must happen after the + // reader lock has released, or it will deadlock. + s.subs.RLock() + defer s.subs.RUnlock() -func (state *state) removeAll(reason error) { - for qStr, clientSubscriptions := range state.subscriptions { - sub, ok := clientSubscriptions[qStr] - if !ok || ok && sub.id == qStr { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - - for clientID := range clientSubscriptions { - state.remove(clientID, qStr, sub.id, reason) - } - } -} - -func (state *state) send(msg interface{}, events []types.Event) error { - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[qStr]; ok && sub.id == qStr { - continue - } - var q Query - if qi, ok := state.queries[qStr]; ok { - q = qi.q - } else { - continue - } - - match, err := q.Matches(events) + for si := range s.subs.index.all { + match, err := si.query.Matches(events) if err != nil { - return fmt.Errorf("failed to match against query %s: %w", q.String(), err) + return fmt.Errorf("match failed against query: %w", err) + // TODO(creachadair): Should we evict this subscription? + } else if !match { + continue } - if match { - for clientID, subscription := range clientSubscriptions { - if cap(subscription.out) == 0 { - // block on unbuffered channel - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - case <-subscription.canceled: - } - } else { - // don't block on buffered channels - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - default: - state.remove(clientID, qStr, subscription.id, ErrOutOfCapacity) - } - } - } + // Subscriptions may be buffered or unbuffered. Unbuffered subscriptions + // are intended for internal use such as indexing, where we don't want to + // penalize a slow reader. Buffered subscribers must keep up with their + // queue, or they will be terminated. + // + // TODO(creachadair): Unbuffered subscriptions used by the event indexer + // to avoid losing events if it happens to be slow. Rework this so that + // use case doesn't require this affordance, and then remove unbuffered + // subscriptions. + msg := NewMessage(si.sub.id, data, events) + if cap(si.sub.out) == 0 { + si.sub.out <- msg + continue + } + select { + case si.sub.out <- msg: + // ok, delivered + default: + // slow subscriber, cancel them + evict.add(si) } } diff --git a/libs/pubsub/subindex.go b/libs/pubsub/subindex.go new file mode 100644 index 000000000..48dccf72d --- /dev/null +++ b/libs/pubsub/subindex.go @@ -0,0 +1,113 @@ +package pubsub + +import "github.com/tendermint/tendermint/abci/types" + +// An item to be published to subscribers. +type item struct { + Data interface{} + Events []types.Event +} + +// A subInfo value records a single subscription. +type subInfo struct { + clientID string // chosen by the client + query Query // chosen by the client + subID string // assigned at registration + sub *Subscription // receives published events +} + +// A subInfoSet is an unordered set of subscription info records. +type subInfoSet map[*subInfo]struct{} + +func (s subInfoSet) contains(si *subInfo) bool { _, ok := s[si]; return ok } +func (s subInfoSet) add(si *subInfo) { s[si] = struct{}{} } +func (s subInfoSet) remove(si *subInfo) { delete(s, si) } + +// withQuery returns the subset of s whose query string matches qs. +func (s subInfoSet) withQuery(qs string) subInfoSet { + out := make(subInfoSet) + for si := range s { + if si.query.String() == qs { + out.add(si) + } + } + return out +} + +// A subIndex is an indexed collection of subscription info records. +// The index is not safe for concurrent use without external synchronization. +type subIndex struct { + all subInfoSet // all subscriptions + byClient map[string]subInfoSet // per-client subscriptions + byQuery map[string]subInfoSet // per-query subscriptions + + // TODO(creachadair): We allow indexing by query to support existing use by + // the RPC service methods for event streaming. Fix up those methods not to + // require this, and then remove indexing by query. +} + +// newSubIndex constructs a new, empty subscription index. +func newSubIndex() *subIndex { + return &subIndex{ + all: make(subInfoSet), + byClient: make(map[string]subInfoSet), + byQuery: make(map[string]subInfoSet), + } +} + +// findClients returns the set of subscriptions for the given client ID, or nil. +func (idx *subIndex) findClientID(id string) subInfoSet { return idx.byClient[id] } + +// findQuery returns the set of subscriptions on the given query string, or nil. +func (idx *subIndex) findQuery(qs string) subInfoSet { return idx.byQuery[qs] } + +// contains reports whether idx contains any subscription matching the given +// client ID and query pair. +func (idx *subIndex) contains(clientID, query string) bool { + csubs, qsubs := idx.byClient[clientID], idx.byQuery[query] + if len(csubs) == 0 || len(qsubs) == 0 { + return false + } + for si := range csubs { + if qsubs.contains(si) { + return true + } + } + return false +} + +// add adds si to the index, replacing any previous entry with the same terms. +// It is the caller's responsibility to check for duplicates before adding. +// See also the contains method. +func (idx *subIndex) add(si *subInfo) { + idx.all.add(si) + if m := idx.byClient[si.clientID]; m == nil { + idx.byClient[si.clientID] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } + qs := si.query.String() + if m := idx.byQuery[qs]; m == nil { + idx.byQuery[qs] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } +} + +// removeAll removes all the elements of s from the index. +func (idx *subIndex) removeAll(s subInfoSet) { + for si := range s { + idx.all.remove(si) + idx.byClient[si.clientID].remove(si) + if len(idx.byClient[si.clientID]) == 0 { + delete(idx.byClient, si.clientID) + } + if si.query != nil { + qs := si.query.String() + idx.byQuery[qs].remove(si) + if len(idx.byQuery[qs]) == 0 { + delete(idx.byQuery, qs) + } + } + } +} diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 40b84711e..4210416b6 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -44,9 +44,7 @@ func NewSubscription(outCapacity int) *Subscription { // Out returns a channel onto which messages and events are published. // Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from // receiving a nil message. -func (s *Subscription) Out() <-chan Message { - return s.out -} +func (s *Subscription) Out() <-chan Message { return s.out } func (s *Subscription) ID() string { return s.id } From a30860a3077e5acb0bd979b2d2c0edb51041535c Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 20 Oct 2021 10:38:29 -0400 Subject: [PATCH 110/174] e2e: always enable blocksync (#7144) I believe it was the case that blocksync was not consistently enabled in master, and this makes makes it the default in the master tests. --- test/e2e/generator/generate.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 61b4bf7d3..f8b01420d 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -278,6 +278,7 @@ func generateNode( Database: nodeDatabases.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), Mempool: nodeMempools.Choose(r), + BlockSync: "v0", StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), From a8917040a816ff2072e2148e8e95c85d899a0956 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 20 Oct 2021 12:38:59 -0400 Subject: [PATCH 111/174] e2e: avoid unset defaults in generated tests (#7145) I've observed a few cases in tests that are probably wrong, and added some tests to cover this. --- test/e2e/generator/generate.go | 7 +++++++ test/e2e/generator/generate_test.go | 28 ++++++++++++++++++++-------- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index f8b01420d..10a180f6a 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -286,6 +286,13 @@ func generateNode( Perturb: nodePerturbations.Choose(r), } + if node.Mempool == "" { + node.Mempool = "v1" + } + if node.PrivvalProtocol == "" { + node.PrivvalProtocol = "file" + } + if startAt > 0 { node.StateSync = nodeStateSyncs.Choose(r) if manifest.InitialHeight-startAt <= 5 && node.StateSync == e2e.StateSyncDisabled { diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 79a20f27e..389483027 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -26,15 +26,27 @@ func TestGenerator(t *testing.T) { numStateSyncs++ } t.Run(name, func(t *testing.T) { - if node.StartAt > m.InitialHeight+5 && !node.Stateless() { - require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + t.Run("StateSync", func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) + } + }) + if e2e.Mode(node.Mode) != e2e.ModeLight { + t.Run("Mempool", func(t *testing.T) { + require.NotZero(t, node.Mempool) + }) + t.Run("PrivvalProtocol", func(t *testing.T) { + require.NotZero(t, node.PrivvalProtocol) + }) + t.Run("BlockSync", func(t *testing.T) { + require.NotZero(t, node.BlockSync) + }) } - if node.StateSync != e2e.StateSyncDisabled { - require.Zero(t, node.Seeds, node.StateSync) - require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, - "peers: %v", node.PersistentPeers) - } - }) } require.True(t, numStateSyncs <= 2) From a8ff617773542b815136f63f5e8c01b221fe0ff6 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Thu, 21 Oct 2021 13:40:43 +0200 Subject: [PATCH 112/174] state: add height assertion to rollback function (#7143) --- internal/state/rollback.go | 17 +++++++ internal/state/rollback_test.go | 79 ++++++++++++++------------------- 2 files changed, 50 insertions(+), 46 deletions(-) diff --git a/internal/state/rollback.go b/internal/state/rollback.go index 6e13da0e2..e78957b02 100644 --- a/internal/state/rollback.go +++ b/internal/state/rollback.go @@ -19,6 +19,23 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { return -1, nil, errors.New("no state found") } + height := bs.Height() + + // NOTE: persistence of state and blocks don't happen atomically. Therefore it is possible that + // when the user stopped the node the state wasn't updated but the blockstore was. In this situation + // we don't need to rollback any state and can just return early + if height == invalidState.LastBlockHeight+1 { + return invalidState.LastBlockHeight, invalidState.AppHash, nil + } + + // If the state store isn't one below nor equal to the blockstore height than this violates the + // invariant + if height != invalidState.LastBlockHeight { + return -1, nil, fmt.Errorf("statestore height (%d) is not one below or equal to blockstore height (%d)", + invalidState.LastBlockHeight, height) + } + + // state store height is equal to blockstore height. We're good to proceed with rolling back state rollbackHeight := invalidState.LastBlockHeight rollbackBlock := bs.LoadBlockMeta(rollbackHeight) if rollbackBlock == nil { diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go index ae5c8ee84..e782b4d89 100644 --- a/internal/state/rollback_test.go +++ b/internal/state/rollback_test.go @@ -14,42 +14,14 @@ import ( ) func TestRollback(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) - blockStore := &mocks.BlockStore{} var ( height int64 = 100 appVersion uint64 = 10 ) - - valSet, _ := factory.RandValidatorSet(5, 10) - - params := types.DefaultConsensusParams() - params.Version.AppVersion = appVersion - newParams := types.DefaultConsensusParams() - newParams.Block.MaxBytes = 10000 - - initialState := state.State{ - Version: state.Version{ - Consensus: version.Consensus{ - Block: version.BlockProtocol, - App: 10, - }, - Software: version.TMVersion, - }, - ChainID: factory.DefaultTestChainID, - InitialHeight: 10, - LastBlockID: factory.MakeBlockID(), - AppHash: factory.RandomHash(), - LastResultsHash: factory.RandomHash(), - LastBlockHeight: height, - LastValidators: valSet, - Validators: valSet.CopyIncrementProposerPriority(1), - NextValidators: valSet.CopyIncrementProposerPriority(2), - LastHeightValidatorsChanged: height + 1, - ConsensusParams: *params, - LastHeightConsensusParamsChanged: height + 1, - } - require.NoError(t, stateStore.Bootstrap(initialState)) + blockStore := &mocks.BlockStore{} + stateStore := setupStateStore(t, height) + initialState, err := stateStore.Load() + require.NoError(t, err) height++ block := &types.BlockMeta{ @@ -61,9 +33,13 @@ func TestRollback(t *testing.T) { }, } blockStore.On("LoadBlockMeta", height).Return(block) + blockStore.On("Height").Return(height) + // perform the rollback over a version bump appVersion++ + newParams := types.DefaultConsensusParams() newParams.Version.AppVersion = appVersion + newParams.Block.MaxBytes = 1000 nextState := initialState.Copy() nextState.LastBlockHeight = height nextState.Version.Consensus.App = appVersion @@ -102,19 +78,34 @@ func TestRollbackNoState(t *testing.T) { } func TestRollbackNoBlocks(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) + const height = int64(100) + stateStore := setupStateStore(t, height) blockStore := &mocks.BlockStore{} - var ( - height int64 = 100 - appVersion uint64 = 10 - ) + blockStore.On("Height").Return(height) + blockStore.On("LoadBlockMeta", height).Return(nil) + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "block at height 100 not found") +} + +func TestRollbackDifferentStateHeight(t *testing.T) { + const height = int64(100) + stateStore := setupStateStore(t, height) + blockStore := &mocks.BlockStore{} + blockStore.On("Height").Return(height + 2) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Equal(t, err.Error(), "statestore height (100) is not one below or equal to blockstore height (102)") +} + +func setupStateStore(t *testing.T, height int64) state.Store { + stateStore := state.NewStore(dbm.NewMemDB()) valSet, _ := factory.RandValidatorSet(5, 10) params := types.DefaultConsensusParams() - params.Version.AppVersion = appVersion - newParams := types.DefaultConsensusParams() - newParams.Block.MaxBytes = 10000 + params.Version.AppVersion = 10 initialState := state.State{ Version: state.Version{ @@ -137,10 +128,6 @@ func TestRollbackNoBlocks(t *testing.T) { ConsensusParams: *params, LastHeightConsensusParamsChanged: height + 1, } - require.NoError(t, stateStore.Save(initialState)) - blockStore.On("LoadBlockMeta", height).Return(nil) - - _, _, err := state.Rollback(blockStore, stateStore) - require.Error(t, err) - require.Contains(t, err.Error(), "block at height 100 not found") + require.NoError(t, stateStore.Bootstrap(initialState)) + return stateStore } From 88bdd328ede6e126e964584d9b379d6279c9b844 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 21 Oct 2021 18:02:26 +0200 Subject: [PATCH 113/174] e2e: evidence test refactor (#7146) I've seen this failure a few times and this change seems like it reduces the number of times that we're waiting (and can therefore hit a timeout.) --- test/e2e/runner/evidence.go | 43 ++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 25c4a1cc4..720357fae 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -33,14 +33,10 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo var targetNode *e2e.Node for _, idx := range r.Perm(len(testnet.Nodes)) { - targetNode = testnet.Nodes[idx] - - if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { - targetNode = nil - continue + if !testnet.Nodes[idx].Stateless() { + targetNode = testnet.Nodes[idx] + break } - - break } if targetNode == nil { @@ -55,15 +51,14 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } // request the latest block and validator set from the node - blockRes, err := client.Block(context.Background(), nil) + blockRes, err := client.Block(ctx, nil) if err != nil { return err } - evidenceHeight := blockRes.Block.Height - waitHeight := blockRes.Block.Height + 3 + evidenceHeight := blockRes.Block.Height - 3 nValidators := 100 - valRes, err := client.Validators(context.Background(), &evidenceHeight, nil, &nValidators) + valRes, err := client.Validators(ctx, &evidenceHeight, nil, &nValidators) if err != nil { return err } @@ -79,12 +74,8 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - wctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, waitHeight) + // request the latest block and validator set from the node + blockRes, err = client.Block(ctx, &evidenceHeight) if err != nil { return err } @@ -104,24 +95,28 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - _, err := client.BroadcastEvidence(context.Background(), ev) + _, err := client.BroadcastEvidence(ctx, ev) if err != nil { return err } } - wctx, cancel = context.WithTimeout(ctx, 30*time.Second) + logger.Info("Finished sending evidence", + "node", testnet.Name, + "amount", amount, + "height", evidenceHeight, + ) + + wctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, blockRes.Block.Height+2) + // wait for the node to make progress after submitting + // evidence (3 (forged height) + 1 (progress)) + _, err = waitForNode(wctx, targetNode, evidenceHeight+4) if err != nil { return err } - logger.Info(fmt.Sprintf("Finished sending evidence (height %d)", blockRes.Block.Height+2)) - return nil } From 68ca65f5d79905abd55ea999536b1a3685f9f19d Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Fri, 22 Oct 2021 14:07:46 +0200 Subject: [PATCH 114/174] pex: remove legacy proto messages (#7147) This PR implements the proto changes made in https://github.com/tendermint/spec/pull/352, removing the legacy messages that were used in the pex reactor. --- .gitignore | 7 + abci/types/types.pb.go | 200 ++++-- internal/p2p/pex/reactor.go | 153 +---- internal/p2p/pex/reactor_test.go | 176 +----- node/node_test.go | 2 +- proto/tendermint/blocksync/types.pb.go | 30 +- proto/tendermint/consensus/types.pb.go | 50 +- proto/tendermint/consensus/wal.pb.go | 25 +- proto/tendermint/crypto/keys.pb.go | 5 +- proto/tendermint/crypto/proof.pb.go | 25 +- proto/tendermint/libs/bits/types.pb.go | 5 +- proto/tendermint/mempool/types.pb.go | 10 +- proto/tendermint/p2p/conn.pb.go | 25 +- proto/tendermint/p2p/pex.go | 8 - proto/tendermint/p2p/pex.pb.go | 818 ++----------------------- proto/tendermint/p2p/types.pb.go | 25 +- proto/tendermint/privval/types.pb.go | 55 +- proto/tendermint/state/types.pb.go | 25 +- proto/tendermint/statesync/types.pb.go | 45 +- proto/tendermint/types/block.pb.go | 5 +- proto/tendermint/types/canonical.pb.go | 20 +- proto/tendermint/types/events.pb.go | 5 +- proto/tendermint/types/evidence.pb.go | 20 +- proto/tendermint/types/params.pb.go | 30 +- proto/tendermint/types/types.pb.go | 65 +- proto/tendermint/types/validator.pb.go | 15 +- proto/tendermint/version/types.pb.go | 5 +- scripts/protocgen.sh | 14 +- 28 files changed, 669 insertions(+), 1199 deletions(-) diff --git a/.gitignore b/.gitignore index 7f412d461..38a280c64 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,10 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip +proto/tendermint/blocksync/types.proto +proto/tendermint/consensus/types.proto +proto/tendermint/mempool/*.proto +proto/tendermint/p2p/*.proto +proto/tendermint/statesync/*.proto +proto/tendermint/types/*.proto +proto/tendermint/version/*.proto diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6e4b53a1d..f424009bc 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -7715,7 +7715,10 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -7797,7 +7800,10 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -7847,7 +7853,10 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -7999,7 +8008,10 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8237,7 +8249,10 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8392,7 +8407,10 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8576,7 +8594,10 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8679,7 +8700,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8763,7 +8787,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8832,7 +8859,10 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8882,7 +8912,10 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8932,7 +8965,10 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9052,7 +9088,10 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9159,7 +9198,10 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9294,7 +9336,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9869,7 +9914,10 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9951,7 +9999,10 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10033,7 +10084,10 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10083,7 +10137,10 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10269,7 +10326,10 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10423,7 +10483,10 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10730,7 +10793,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10814,7 +10880,10 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11168,7 +11237,10 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11439,7 +11511,10 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11593,7 +11668,10 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11696,7 +11774,10 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11780,7 +11861,10 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11849,7 +11933,10 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11933,7 +12020,10 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12110,7 +12200,10 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12213,7 +12306,10 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12329,7 +12425,10 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12463,7 +12562,10 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12618,7 +12720,10 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12721,7 +12826,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12823,7 +12931,10 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12926,7 +13037,10 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13099,7 +13213,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13274,7 +13391,10 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 645cc19e1..9a5535a91 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -1,7 +1,6 @@ package pex import ( - "context" "fmt" "runtime/debug" "sync" @@ -21,8 +20,6 @@ var ( _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) -// TODO: Consolidate with params file. -// See https://github.com/tendermint/tendermint/issues/6371 const ( // PexChannel is a channel for PEX messages PexChannel = 0x00 @@ -46,9 +43,6 @@ const ( // the maximum amount of addresses that can be included in a response maxAddresses uint16 = 100 - // allocated time to resolve a node address into a set of endpoints - resolveTimeout = 3 * time.Second - // How long to wait when there are no peers available before trying again noAvailablePeersWaitPeriod = 1 * time.Second @@ -217,16 +211,22 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *protop2p.PexRequest: - // Check if the peer hasn't sent a prior request too close to this one - // in time. + // check if the peer hasn't sent a prior request too close to this one + // in time if err := r.markPeerRequest(envelope.From); err != nil { return err } - // parse and send the legacy PEX addresses - pexAddresses := r.resolve(r.peerManager.Advertise(envelope.From, maxAddresses)) + // request peers from the peer manager and parse the NodeAddresses into + // URL strings + nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) + pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses)) + for idx, addr := range nodeAddresses { + pexAddresses[idx] = protop2p.PexAddress{ + URL: addr.String(), + } + } r.pexCh.Out <- p2p.Envelope{ To: envelope.From, Message: &protop2p.PexResponse{Addresses: pexAddresses}, @@ -247,9 +247,7 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { } for _, pexAddress := range msg.Addresses { - // no protocol is prefixed so we assume the default (mconn) - peerAddress, err := p2p.ParseNodeAddress( - fmt.Sprintf("%s@%s:%d", pexAddress.ID, pexAddress.IP, pexAddress.Port)) + peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL) if err != nil { continue } @@ -264,58 +262,6 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { r.totalPeers++ } - // V2 PEX MESSAGES - case *protop2p.PexRequestV2: - // check if the peer hasn't sent a prior request too close to this one - // in time - if err := r.markPeerRequest(envelope.From); err != nil { - return err - } - - // request peers from the peer manager and parse the NodeAddresses into - // URL strings - nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) - pexAddressesV2 := make([]protop2p.PexAddressV2, len(nodeAddresses)) - for idx, addr := range nodeAddresses { - pexAddressesV2[idx] = protop2p.PexAddressV2{ - URL: addr.String(), - } - } - r.pexCh.Out <- p2p.Envelope{ - To: envelope.From, - Message: &protop2p.PexResponseV2{Addresses: pexAddressesV2}, - } - - case *protop2p.PexResponseV2: - // check if the response matches a request that was made to that peer - if err := r.markPeerResponse(envelope.From); err != nil { - return err - } - - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) - } - - for _, pexAddress := range msg.Addresses { - peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL) - if err != nil { - continue - } - added, err := r.peerManager.Add(peerAddress) - if err != nil { - logger.Error("failed to add V2 PEX address", "address", peerAddress, "err", err) - } - if added { - r.newPeers++ - logger.Debug("added V2 PEX address", "address", peerAddress) - } - r.totalPeers++ - } - default: return fmt.Errorf("received unknown message: %T", msg) } @@ -323,55 +269,6 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { return nil } -// resolve resolves a set of peer addresses into PEX addresses. -// -// FIXME: This is necessary because the current PEX protocol only supports -// IP/port pairs, while the P2P stack uses NodeAddress URLs. The PEX protocol -// should really use URLs too, to exchange DNS names instead of IPs and allow -// different transport protocols (e.g. QUIC and MemoryTransport). -// -// FIXME: We may want to cache and parallelize this, but for now we'll just rely -// on the operating system to cache it for us. -func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { - limit := len(addresses) - pexAddresses := make([]protop2p.PexAddress, 0, limit) - - for _, address := range addresses { - ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout) - endpoints, err := address.Resolve(ctx) - r.Logger.Debug("resolved node address", "endpoints", endpoints) - cancel() - - if err != nil { - r.Logger.Debug("failed to resolve address", "address", address, "err", err) - continue - } - - for _, endpoint := range endpoints { - r.Logger.Debug("checking endpint", "IP", endpoint.IP, "Port", endpoint.Port) - if len(pexAddresses) >= limit { - return pexAddresses - - } else if endpoint.IP != nil { - r.Logger.Debug("appending pex address") - // PEX currently only supports IP-networked transports (as - // opposed to e.g. p2p.MemoryTransport). - // - // FIXME: as the PEX address contains no information about the - // protocol, we jam this into the ID. We won't need to this once - // we support URLs - pexAddresses = append(pexAddresses, protop2p.PexAddress{ - ID: string(address.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - } - - return pexAddresses -} - // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. @@ -444,17 +341,10 @@ func (r *Reactor) sendRequestForPeers() { break } - // The node accommodates for both pex systems - if r.isLegacyPeer(peerID) { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequest{}, - } - } else { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequestV2{}, - } + // send out the pex request + r.pexCh.Out <- p2p.Envelope{ + To: peerID, + Message: &protop2p.PexRequest{}, } // remove the peer from the abvailable peers list and mark it in the requestsSent map @@ -538,14 +428,3 @@ func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.availablePeers[peer] = struct{}{} return nil } - -// all addresses must use a MCONN protocol for the peer to be considered part of the -// legacy p2p pex system -func (r *Reactor) isLegacyPeer(peer types.NodeID) bool { - for _, addr := range r.peerManager.Addresses(peer) { - if addr.Protocol != p2p.MConnProtocol { - return false - } - } - return true -} diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index b7e1a01c3..04b347cb5 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -1,7 +1,6 @@ package pex_test import ( - "context" "strings" "testing" "time" @@ -27,7 +26,6 @@ const ( firstNode = 0 secondNode = 1 thirdNode = 2 - fourthNode = 3 ) func TestReactorBasic(t *testing.T) { @@ -44,8 +42,8 @@ func TestReactorBasic(t *testing.T) { // assert that when a mock node sends a request it receives a response (and // the correct one) - testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil)) + testNet.sendRequest(t, firstNode, secondNode) + testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { @@ -71,17 +69,17 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*p2pproto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponse) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } peerErr := <-r.pexErrCh @@ -102,8 +100,8 @@ func TestReactorSendsResponseWithoutRequest(t *testing.T) { // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send // two responses to counter that. - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) + testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) // secondNode should evict the firstNode testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) @@ -136,10 +134,10 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { require.NoError(t, err) require.True(t, added) - addresses := make([]p2pproto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddress, 101) for i := 0; i < len(addresses); i++ { nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = p2pproto.PexAddressV2{ + addresses[i] = p2pproto.PexAddress{ URL: nodeAddress.String(), } } @@ -152,12 +150,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequest); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &p2pproto.PexResponseV2{ + Message: &p2pproto.PexResponse{ Addresses: addresses, }, } @@ -239,38 +237,6 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } } -func TestReactorIntegrationWithLegacyHandleRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 3, - }) - testNet.connectAll(t) - testNet.start(t) - t.Log(testNet.nodes) - - // mock node sends a V1 Pex message to the second node - testNet.sendRequest(t, firstNode, secondNode, false) - addrs := testNet.getAddressesFor(t, []int{thirdNode}) - testNet.listenForLegacyResponse(t, secondNode, firstNode, shortWait, addrs) -} - -func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 4, - BufferSize: 4, - }) - testNet.connectPeers(t, firstNode, secondNode) - testNet.connectPeers(t, firstNode, thirdNode) - testNet.connectPeers(t, firstNode, fourthNode) - testNet.start(t) - - testNet.listenForRequest(t, secondNode, firstNode, shortWait) - // send a v1 response instead - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode, fourthNode}, false) - testNet.requireNumberOfPeers(t, secondNode, len(testNet.nodes)-1, shortWait) -} - type singleTestReactor struct { reactor *pex.Reactor pexInCh chan p2p.Envelope @@ -484,11 +450,11 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, r.logger.Info("Listening for request", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexRequestV2) + _, ok := msg.Message.(*p2pproto.PexRequest) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message) + require.Equal(t, &p2pproto.PexRequest{}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -503,11 +469,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*p2pproto.PexResponseV2) + m, ok := msg.Message.(*p2pproto.PexResponse) if !ok { require.Fail(t, "expected pex response v2") return true @@ -519,34 +485,14 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( // if we didn't get the right length, we wait and send the // request again time.Sleep(300 * time.Millisecond) - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(t, toNode, fromNode) return false } - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(t, toNode, fromNode) r.listenFor(t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForResponse( - t *testing.T, - fromNode, toNode int, - waitPeriod time.Duration, - addresses []p2pproto.PexAddressV2, -) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) - to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) - r.logger.Info("message", msg, "ok", ok) - return ok && msg.From == from - } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message) - return true - } - r.listenFor(t, to, conditional, assertion, waitPeriod) -} - -func (r *reactorTestSuite) listenForLegacyResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, @@ -556,6 +502,7 @@ func (r *reactorTestSuite) listenForLegacyResponse( to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexResponse) + r.logger.Info("message", msg, "ok", ok) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { @@ -591,46 +538,22 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 { - addresses := make([]p2pproto.PexAddressV2, len(nodes)) +func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { + addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - addresses[idx] = p2pproto.PexAddressV2{ + addresses[idx] = p2pproto.PexAddress{ URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress { - addresses := make([]p2pproto.PexAddress, len(nodes)) - for idx, node := range nodes { - nodeID := r.nodes[node] - nodeAddrs := r.network.Nodes[nodeID].NodeAddress - endpoints, err := nodeAddrs.Resolve(context.Background()) - require.NoError(t, err) - require.Len(t, endpoints, 1) - addresses[idx] = p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoints[0].IP.String(), - Port: uint32(endpoints[0].Port), - } - } - return addresses -} - -func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bool) { +func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int) { to, from := r.checkNodePair(t, toNode, fromNode) - if v2 { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequestV2{}, - } - } else { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequest{}, - } + r.pexChannels[from].Out <- p2p.Envelope{ + To: to, + Message: &p2pproto.PexRequest{}, } } @@ -638,25 +561,14 @@ func (r *reactorTestSuite) sendResponse( t *testing.T, fromNode, toNode int, withNodes []int, - v2 bool, ) { from, to := r.checkNodePair(t, fromNode, toNode) - if v2 { - addrs := r.getV2AddressesFor(withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponseV2{ - Addresses: addrs, - }, - } - } else { - addrs := r.getAddressesFor(t, withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponse{ - Addresses: addrs, - }, - } + addrs := r.getAddressesFor(withNodes) + r.pexChannels[from].Out <- p2p.Envelope{ + To: to, + Message: &p2pproto.PexResponse{ + Addresses: addrs, + }, } } @@ -759,32 +671,6 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int require.True(t, added) } -// nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress { - var addresses []p2pproto.PexAddress - for _, i := range nodeIndices { - if i < len(r.nodes) { - require.Fail(t, "index for pex address is greater than number of nodes") - } - nodeAddrs := r.network.Nodes[r.nodes[i]].NodeAddress - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - endpoints, err := nodeAddrs.Resolve(ctx) - cancel() - require.NoError(t, err) - for _, endpoint := range endpoints { - if endpoint.IP != nil { - addresses = append(addresses, p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - - } - return addresses -} - func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { require.NotEqual(t, first, second) require.Less(t, first, r.total) diff --git a/node/node_test.go b/node/node_test.go index 19f27a640..0586cc491 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -132,7 +132,7 @@ func TestNodeSetAppVersion(t *testing.T) { n := getTestNode(t, cfg, log.TestingLogger()) // default config uses the kvstore app - var appVersion uint64 = kvstore.ProtocolVersion + appVersion := kvstore.ProtocolVersion // check version is set in state state, err := n.stateStore.Load() diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index fcbef7107..43ad139ec 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -899,7 +899,10 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -968,7 +971,10 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1054,7 +1060,10 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1104,7 +1113,10 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1192,7 +1204,10 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1417,7 +1432,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 6372a88d4..67efd1c2c 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -1932,7 +1932,10 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2109,7 +2112,10 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2192,7 +2198,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2313,7 +2322,10 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2434,7 +2446,10 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2520,7 +2535,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2646,7 +2664,10 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2786,7 +2807,10 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2959,7 +2983,10 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3324,7 +3351,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index fd80819cd..86ff1be01 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,7 +921,10 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1061,7 +1064,10 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1130,7 +1136,10 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1320,7 +1329,10 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1439,7 +1451,10 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 24c6c1b1b..8ff4c4a4f 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,7 +687,10 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthKeys + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 82fb943fc..97350c64c 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,7 +820,10 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -940,7 +943,10 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1086,7 +1092,10 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1236,7 +1245,10 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1320,7 +1332,10 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index c0ebcb976..ad87f854f 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,7 +307,10 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 11e259551..3487652bc 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,7 +370,10 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -455,7 +458,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 47a3bb0cd..7c26d3fcd 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,7 +723,10 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -773,7 +776,10 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -896,7 +902,10 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1051,7 +1060,10 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1168,7 +1180,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 38c8239dd..61036142f 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -13,10 +13,6 @@ func (m *PexMessage) Wrap(pb proto.Message) error { m.Sum = &PexMessage_PexRequest{PexRequest: msg} case *PexResponse: m.Sum = &PexMessage_PexResponse{PexResponse: msg} - case *PexRequestV2: - m.Sum = &PexMessage_PexRequestV2{PexRequestV2: msg} - case *PexResponseV2: - m.Sum = &PexMessage_PexResponseV2{PexResponseV2: msg} default: return fmt.Errorf("unknown pex message: %T", msg) } @@ -31,10 +27,6 @@ func (m *PexMessage) Unwrap() (proto.Message, error) { return msg.PexRequest, nil case *PexMessage_PexResponse: return msg.PexResponse, nil - case *PexMessage_PexRequestV2: - return msg.PexRequestV2, nil - case *PexMessage_PexResponseV2: - return msg.PexResponseV2, nil default: return nil, fmt.Errorf("unknown pex message: %T", msg) } diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 63882c364..25d636e43 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -24,9 +24,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PexAddress struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (m *PexAddress) Reset() { *m = PexAddress{} } @@ -62,27 +60,13 @@ func (m *PexAddress) XXX_DiscardUnknown() { var xxx_messageInfo_PexAddress proto.InternalMessageInfo -func (m *PexAddress) GetID() string { +func (m *PexAddress) GetURL() string { if m != nil { - return m.ID + return m.URL } return "" } -func (m *PexAddress) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -func (m *PexAddress) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - type PexRequest struct { } @@ -163,136 +147,10 @@ func (m *PexResponse) GetAddresses() []PexAddress { return nil } -type PexAddressV2 struct { - URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` -} - -func (m *PexAddressV2) Reset() { *m = PexAddressV2{} } -func (m *PexAddressV2) String() string { return proto.CompactTextString(m) } -func (*PexAddressV2) ProtoMessage() {} -func (*PexAddressV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{3} -} -func (m *PexAddressV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexAddressV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexAddressV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexAddressV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexAddressV2.Merge(m, src) -} -func (m *PexAddressV2) XXX_Size() int { - return m.Size() -} -func (m *PexAddressV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexAddressV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexAddressV2 proto.InternalMessageInfo - -func (m *PexAddressV2) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -type PexRequestV2 struct { -} - -func (m *PexRequestV2) Reset() { *m = PexRequestV2{} } -func (m *PexRequestV2) String() string { return proto.CompactTextString(m) } -func (*PexRequestV2) ProtoMessage() {} -func (*PexRequestV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{4} -} -func (m *PexRequestV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexRequestV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexRequestV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexRequestV2.Merge(m, src) -} -func (m *PexRequestV2) XXX_Size() int { - return m.Size() -} -func (m *PexRequestV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexRequestV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexRequestV2 proto.InternalMessageInfo - -type PexResponseV2 struct { - Addresses []PexAddressV2 `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"` -} - -func (m *PexResponseV2) Reset() { *m = PexResponseV2{} } -func (m *PexResponseV2) String() string { return proto.CompactTextString(m) } -func (*PexResponseV2) ProtoMessage() {} -func (*PexResponseV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{5} -} -func (m *PexResponseV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexResponseV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexResponseV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexResponseV2.Merge(m, src) -} -func (m *PexResponseV2) XXX_Size() int { - return m.Size() -} -func (m *PexResponseV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexResponseV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexResponseV2 proto.InternalMessageInfo - -func (m *PexResponseV2) GetAddresses() []PexAddressV2 { - if m != nil { - return m.Addresses - } - return nil -} - type PexMessage struct { // Types that are valid to be assigned to Sum: // *PexMessage_PexRequest // *PexMessage_PexResponse - // *PexMessage_PexRequestV2 - // *PexMessage_PexResponseV2 Sum isPexMessage_Sum `protobuf_oneof:"sum"` } @@ -300,7 +158,7 @@ func (m *PexMessage) Reset() { *m = PexMessage{} } func (m *PexMessage) String() string { return proto.CompactTextString(m) } func (*PexMessage) ProtoMessage() {} func (*PexMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{6} + return fileDescriptor_81c2f011fd13be57, []int{3} } func (m *PexMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,22 +194,14 @@ type isPexMessage_Sum interface { } type PexMessage_PexRequest struct { - PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` + PexRequest *PexRequest `protobuf:"bytes,3,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` } type PexMessage_PexResponse struct { - PexResponse *PexResponse `protobuf:"bytes,2,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` -} -type PexMessage_PexRequestV2 struct { - PexRequestV2 *PexRequestV2 `protobuf:"bytes,3,opt,name=pex_request_v2,json=pexRequestV2,proto3,oneof" json:"pex_request_v2,omitempty"` -} -type PexMessage_PexResponseV2 struct { - PexResponseV2 *PexResponseV2 `protobuf:"bytes,4,opt,name=pex_response_v2,json=pexResponseV2,proto3,oneof" json:"pex_response_v2,omitempty"` + PexResponse *PexResponse `protobuf:"bytes,4,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` } -func (*PexMessage_PexRequest) isPexMessage_Sum() {} -func (*PexMessage_PexResponse) isPexMessage_Sum() {} -func (*PexMessage_PexRequestV2) isPexMessage_Sum() {} -func (*PexMessage_PexResponseV2) isPexMessage_Sum() {} +func (*PexMessage_PexRequest) isPexMessage_Sum() {} +func (*PexMessage_PexResponse) isPexMessage_Sum() {} func (m *PexMessage) GetSum() isPexMessage_Sum { if m != nil { @@ -374,27 +224,11 @@ func (m *PexMessage) GetPexResponse() *PexResponse { return nil } -func (m *PexMessage) GetPexRequestV2() *PexRequestV2 { - if x, ok := m.GetSum().(*PexMessage_PexRequestV2); ok { - return x.PexRequestV2 - } - return nil -} - -func (m *PexMessage) GetPexResponseV2() *PexResponseV2 { - if x, ok := m.GetSum().(*PexMessage_PexResponseV2); ok { - return x.PexResponseV2 - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*PexMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PexMessage_PexRequest)(nil), (*PexMessage_PexResponse)(nil), - (*PexMessage_PexRequestV2)(nil), - (*PexMessage_PexResponseV2)(nil), } } @@ -402,42 +236,33 @@ func init() { proto.RegisterType((*PexAddress)(nil), "tendermint.p2p.PexAddress") proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") proto.RegisterType((*PexResponse)(nil), "tendermint.p2p.PexResponse") - proto.RegisterType((*PexAddressV2)(nil), "tendermint.p2p.PexAddressV2") - proto.RegisterType((*PexRequestV2)(nil), "tendermint.p2p.PexRequestV2") - proto.RegisterType((*PexResponseV2)(nil), "tendermint.p2p.PexResponseV2") proto.RegisterType((*PexMessage)(nil), "tendermint.p2p.PexMessage") } func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } var fileDescriptor_81c2f011fd13be57 = []byte{ - // 407 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdd, 0x8a, 0xda, 0x40, - 0x14, 0xc7, 0xf3, 0x61, 0x2d, 0x9e, 0x44, 0x0b, 0x43, 0x29, 0xa9, 0x6d, 0xa3, 0xe4, 0xca, 0xde, - 0x24, 0x30, 0xa5, 0x97, 0x2d, 0x36, 0x08, 0xb5, 0x50, 0xa9, 0x1d, 0xd8, 0x5c, 0xec, 0x8d, 0xe8, - 0x66, 0xc8, 0x06, 0x56, 0x33, 0x9b, 0x49, 0x16, 0x1f, 0x63, 0xdf, 0x61, 0x5f, 0xc6, 0x4b, 0x2f, - 0xf7, 0x4a, 0x96, 0xf8, 0x22, 0x8b, 0x13, 0x31, 0x23, 0xba, 0x7b, 0x37, 0xe7, 0x7f, 0xbe, 0x7e, - 0xe7, 0xcc, 0x01, 0x2b, 0xa3, 0x8b, 0x90, 0xa6, 0xf3, 0x78, 0x91, 0x79, 0x0c, 0x33, 0x8f, 0xd1, - 0xa5, 0xcb, 0xd2, 0x24, 0x4b, 0x50, 0xab, 0xf2, 0xb8, 0x0c, 0xb3, 0xf6, 0xfb, 0x28, 0x89, 0x12, - 0xe1, 0xf2, 0x76, 0xaf, 0x32, 0xca, 0x19, 0x03, 0x8c, 0xe9, 0xf2, 0x57, 0x18, 0xa6, 0x94, 0x73, - 0xf4, 0x01, 0xb4, 0x38, 0xb4, 0xd4, 0xae, 0xda, 0x6b, 0xf8, 0xf5, 0x62, 0xd3, 0xd1, 0xfe, 0x0c, - 0x88, 0x16, 0x87, 0x42, 0x67, 0x96, 0x26, 0xe9, 0x63, 0xa2, 0xc5, 0x0c, 0x21, 0xa8, 0xb1, 0x24, - 0xcd, 0x2c, 0xbd, 0xab, 0xf6, 0x9a, 0x44, 0xbc, 0x1d, 0x53, 0x54, 0x24, 0xf4, 0x36, 0xa7, 0x3c, - 0x73, 0x46, 0x60, 0x08, 0x8b, 0xb3, 0x64, 0xc1, 0x29, 0xfa, 0x09, 0x8d, 0x69, 0xd9, 0x8b, 0x72, - 0x4b, 0xed, 0xea, 0x3d, 0x03, 0xb7, 0xdd, 0x63, 0x50, 0xb7, 0xe2, 0xf1, 0x6b, 0xab, 0x4d, 0x47, - 0x21, 0x55, 0x8a, 0xf3, 0x15, 0xcc, 0xca, 0x1d, 0x60, 0xf4, 0x11, 0xf4, 0x3c, 0xbd, 0xd9, 0x13, - 0xbf, 0x2d, 0x36, 0x1d, 0xfd, 0x82, 0xfc, 0x25, 0x3b, 0xcd, 0x69, 0x89, 0xd0, 0x3d, 0x47, 0x80, - 0x9d, 0xff, 0xd0, 0x94, 0x48, 0x02, 0x8c, 0xfa, 0xa7, 0x2c, 0x9f, 0x5f, 0x66, 0x09, 0xf0, 0x29, - 0xcd, 0x83, 0x26, 0x66, 0x1d, 0x51, 0xce, 0xa7, 0x11, 0x45, 0x3f, 0xc0, 0x60, 0x74, 0x39, 0x49, - 0xcb, 0x96, 0x02, 0xea, 0xfc, 0x78, 0x7b, 0xa8, 0xa1, 0x42, 0x80, 0x1d, 0x2c, 0xd4, 0x07, 0xb3, - 0x4c, 0x2f, 0x09, 0xc5, 0xba, 0x0d, 0xfc, 0xe9, 0x6c, 0x7e, 0x19, 0x32, 0x54, 0x88, 0xc1, 0xa4, - 0xed, 0x0e, 0xa0, 0x25, 0x01, 0x4c, 0xee, 0xb0, 0xf8, 0x98, 0xf3, 0x63, 0x1d, 0x16, 0x33, 0x54, - 0x88, 0xc9, 0x24, 0x1b, 0xfd, 0x86, 0x77, 0x32, 0xc7, 0xae, 0x4c, 0x4d, 0x94, 0xf9, 0xf2, 0x0a, - 0x8a, 0xa8, 0xd3, 0x64, 0xb2, 0xe0, 0xbf, 0x01, 0x9d, 0xe7, 0x73, 0xff, 0xdf, 0xaa, 0xb0, 0xd5, - 0x75, 0x61, 0xab, 0x4f, 0x85, 0xad, 0xde, 0x6f, 0x6d, 0x65, 0xbd, 0xb5, 0x95, 0xc7, 0xad, 0xad, - 0x5c, 0x7e, 0x8f, 0xe2, 0xec, 0x3a, 0x9f, 0xb9, 0x57, 0xc9, 0xdc, 0x93, 0xee, 0x58, 0x3e, 0x69, - 0x71, 0xaf, 0xc7, 0x37, 0x3e, 0xab, 0x0b, 0xf5, 0xdb, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, - 0x9b, 0xfd, 0x75, 0xfc, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x89, 0xa4, 0xe7, + 0xa7, 0xe7, 0x83, 0xa5, 0xf4, 0x41, 0x2c, 0x88, 0x2a, 0x25, 0x63, 0x2e, 0xae, 0x80, 0xd4, 0x0a, + 0xc7, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x21, 0x49, 0x2e, 0xe6, 0xd2, 0xa2, 0x1c, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xf6, 0x47, 0xf7, 0xe4, 0x99, 0x43, 0x83, 0x7c, 0x82, 0x40, 0x62, + 0x5e, 0x2c, 0x1c, 0x4c, 0x02, 0xcc, 0x5e, 0x2c, 0x1c, 0xcc, 0x02, 0x2c, 0x4a, 0x3c, 0x60, 0x4d, + 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xbe, 0x5c, 0xdc, 0x60, 0x5e, 0x71, 0x41, 0x7e, + 0x5e, 0x71, 0xaa, 0x90, 0x1d, 0x17, 0x67, 0x22, 0xc4, 0xb8, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66, + 0x0d, 0x6e, 0x23, 0x29, 0x3d, 0x54, 0xb7, 0xe8, 0x21, 0xac, 0x74, 0x62, 0x39, 0x71, 0x4f, 0x9e, + 0x21, 0x08, 0xa1, 0x45, 0x69, 0x01, 0x23, 0xd8, 0x74, 0xdf, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, + 0x21, 0x5b, 0x2e, 0xee, 0x82, 0xd4, 0x8a, 0xf8, 0x22, 0x88, 0x65, 0x12, 0xcc, 0x0a, 0x8c, 0x38, + 0x0c, 0x84, 0x3a, 0xc7, 0x83, 0x21, 0x88, 0xab, 0x00, 0xce, 0x13, 0x72, 0xe0, 0xe2, 0x81, 0x68, + 0x87, 0xb8, 0x4e, 0x82, 0x05, 0xac, 0x5f, 0x1a, 0xab, 0x7e, 0x88, 0x12, 0x0f, 0x86, 0x20, 0xee, + 0x02, 0x04, 0xd7, 0x89, 0x95, 0x8b, 0xb9, 0xb8, 0x34, 0xd7, 0x8b, 0x85, 0x83, 0x51, 0x80, 0x09, + 0x12, 0x0a, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, + 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, + 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0x14, 0x33, 0xc8, 0x91, 0x04, + 0x8e, 0x01, 0xd4, 0x58, 0x4b, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa7, + 0x1d, 0xdd, 0x6f, 0xce, 0x01, 0x00, 0x00, } func (m *PexAddress) Marshal() (dAtA []byte, err error) { @@ -460,22 +285,10 @@ func (m *PexAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Port != 0 { - i = encodeVarintPex(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintPex(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintPex(dAtA, i, uint64(len(m.ID))) + if len(m.URL) > 0 { + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa } @@ -542,96 +355,6 @@ func (m *PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PexAddressV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexAddressV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexAddressV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.URL) > 0 { - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PexRequestV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *PexResponseV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *PexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -681,7 +404,7 @@ func (m *PexMessage_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } return len(dAtA) - i, nil } @@ -702,48 +425,6 @@ func (m *PexMessage_PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexRequestV2 != nil { - { - size, err := m.PexRequestV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexResponseV2 != nil { - { - size, err := m.PexResponseV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 } return len(dAtA) - i, nil @@ -765,17 +446,10 @@ func (m *PexAddress) Size() (n int) { } var l int _ = l - l = len(m.ID) + l = len(m.URL) if l > 0 { n += 1 + l + sovPex(uint64(l)) } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - if m.Port != 0 { - n += 1 + sovPex(uint64(m.Port)) - } return n } @@ -803,43 +477,6 @@ func (m *PexResponse) Size() (n int) { return n } -func (m *PexAddressV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - return n -} - -func (m *PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovPex(uint64(l)) - } - } - return n -} - func (m *PexMessage) Size() (n int) { if m == nil { return 0 @@ -876,30 +513,6 @@ func (m *PexMessage_PexResponse) Size() (n int) { } return n } -func (m *PexMessage_PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequestV2 != nil { - l = m.PexRequestV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponseV2 != nil { - l = m.PexResponseV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} func sovPex(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 @@ -938,7 +551,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -966,66 +579,18 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -1075,7 +640,10 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -1159,223 +727,10 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { return ErrInvalidLengthPex } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexAddressV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexAddressV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexAddressV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexRequestV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexRequestV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexRequestV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexResponseV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexResponseV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexResponseV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, PexAddressV2{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -1419,7 +774,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: PexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexRequest", wireType) } @@ -1454,7 +809,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexRequest{v} iNdEx = postIndex - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexResponse", wireType) } @@ -1489,83 +844,16 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexResponse{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexRequestV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexRequestV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexRequestV2{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexResponseV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexResponseV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexResponseV2{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index bffa6884f..a0e647ee7 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,7 +917,10 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1227,7 +1230,10 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1341,7 +1347,10 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1493,7 +1502,10 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1666,7 +1678,10 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index 56b35e727..da30f7527 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,7 +1708,10 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1909,7 +1915,10 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2027,7 +2036,10 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2146,7 +2158,10 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2264,7 +2279,10 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2383,7 +2401,10 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2433,7 +2454,10 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2483,7 +2507,10 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2813,7 +2840,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2930,7 +2960,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada..d94724fff 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -1069,7 +1069,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1174,7 +1177,10 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1276,7 +1282,10 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1391,7 +1400,10 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1857,7 +1869,10 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 5541c2803..93e844730 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,7 +1740,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1965,7 +1971,10 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2072,7 +2081,10 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2233,7 +2245,10 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2302,7 +2317,10 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2388,7 +2406,10 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2457,7 +2478,10 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2559,7 +2583,10 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index f2077aad8..aacb90fab 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -389,7 +389,10 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index 709831043..38b17ddb1 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -775,7 +775,10 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -878,7 +881,10 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1087,7 +1093,10 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1277,7 +1286,10 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index a9aa26a79..1c49aef64 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,7 +285,10 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index daab3dc34..3d9e8f2c5 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -825,7 +825,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1018,7 +1021,10 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1209,7 +1215,10 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1293,7 +1302,10 @@ func (m *EvidenceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 5a9f103a9..a295bca9e 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -1123,7 +1123,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1211,7 +1214,10 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1332,7 +1338,10 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1414,7 +1423,10 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1483,7 +1495,10 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1571,7 +1586,10 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 73090558e..27a936097 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -2267,7 +2267,10 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2403,7 +2406,10 @@ func (m *Part) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2520,7 +2526,10 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3026,7 +3035,10 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3108,7 +3120,10 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3368,7 +3383,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3523,7 +3541,10 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3693,7 +3714,10 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3919,7 +3943,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4041,7 +4068,10 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4163,7 +4193,10 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4317,7 +4350,10 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4471,7 +4507,10 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 23b30ed3c..2c3468b83 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,7 +583,10 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -738,7 +741,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -843,7 +849,10 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 6e224392e..9aeb3ae1a 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -265,7 +265,10 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index cd205eee8..240570d61 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -1,11 +1,15 @@ #!/usr/bin/env bash -VERS=0.7.1 - set -eo pipefail +: ${VERS:=master} +URL_PATH=archive/ +if [[ VERS -ne master ]]; then + URL_PATH=archive/refs/tags/v +fi + # Edit this line to clone your branch, if you are modifying protobuf files -curl -qL "https://github.com/tendermint/spec/archive/refs/tags/v${VERS}.tar.gz" | tar -xjf - spec-"$VERS"/proto/ +curl -qL "https://github.com/tendermint/spec/${URL_PATH}${VERS}.tar.gz" | tar -xjf - spec-"$VERS"/proto/ cp -r ./spec-"$VERS"/proto/tendermint/** ./proto/tendermint @@ -17,7 +21,7 @@ echo "proto files have been generated" echo "removing copied files" -rm -rf ./proto/tendermint/abci/types.proto +rm -rf ./proto/tendermint/abci rm -rf ./proto/tendermint/blocksync/types.proto rm -rf ./proto/tendermint/consensus/types.proto rm -rf ./proto/tendermint/mempool/types.proto @@ -30,6 +34,6 @@ rm -rf ./proto/tendermint/types/evidence.proto rm -rf ./proto/tendermint/types/params.proto rm -rf ./proto/tendermint/types/types.proto rm -rf ./proto/tendermint/types/validator.proto -rm -rf ./proto/tendermint/version/version.proto +rm -rf ./proto/tendermint/version/types.proto rm -rf ./spec-"$VERS" From 0d68161cc8879f2a8d4d4bee4c58a76fc067945c Mon Sep 17 00:00:00 2001 From: Marko Date: Fri, 22 Oct 2021 16:14:43 +0000 Subject: [PATCH 115/174] docs: add reactor sections (#6510) --- docs/tendermint-core/README.md | 7 +- .../{block-sync.md => block-sync/README.md} | 10 +- .../block-sync/img/bc-reactor-routines.png | Bin 0 -> 271695 bytes .../block-sync/img/bc-reactor.png | Bin 0 -> 44211 bytes .../block-sync/implementation.md | 47 +++ docs/tendermint-core/block-sync/reactor.md | 278 +++++++++++++ docs/tendermint-core/consensus/README.md | 41 ++ docs/tendermint-core/consensus/reactor.md | 370 ++++++++++++++++++ docs/tendermint-core/evidence/README.md | 13 + docs/tendermint-core/mempool.md | 48 --- docs/tendermint-core/mempool/README.md | 71 ++++ docs/tendermint-core/mempool/config.md | 105 +++++ docs/tendermint-core/pex/README.md | 177 +++++++++ docs/tendermint-core/state-sync.md | 18 - docs/tendermint-core/state-sync/README.md | 85 ++++ 15 files changed, 1200 insertions(+), 70 deletions(-) rename docs/tendermint-core/{block-sync.md => block-sync/README.md} (93%) create mode 100644 docs/tendermint-core/block-sync/img/bc-reactor-routines.png create mode 100644 docs/tendermint-core/block-sync/img/bc-reactor.png create mode 100644 docs/tendermint-core/block-sync/implementation.md create mode 100644 docs/tendermint-core/block-sync/reactor.md create mode 100644 docs/tendermint-core/consensus/README.md create mode 100644 docs/tendermint-core/consensus/reactor.md create mode 100644 docs/tendermint-core/evidence/README.md delete mode 100644 docs/tendermint-core/mempool.md create mode 100644 docs/tendermint-core/mempool/README.md create mode 100644 docs/tendermint-core/mempool/config.md create mode 100644 docs/tendermint-core/pex/README.md delete mode 100644 docs/tendermint-core/state-sync.md create mode 100644 docs/tendermint-core/state-sync/README.md diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index f83349db2..c4c54a514 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -5,8 +5,6 @@ parent: order: 5 --- -# Overview - This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) @@ -17,5 +15,8 @@ This section dives into the internals of Go-Tendermint. - [State Sync](./state-sync.md) - [Mempool](./mempool.md) - [Light Client](./light-client.md) +- [Consensus](./consensus/README.md) +- [Pex](./pex/README.md) +- [Evidence](./evidence/README.md) -For full specifications refer to the [spec repo](https://github.com/tendermint/spec). \ No newline at end of file +For full specifications refer to the [spec repo](https://github.com/tendermint/spec). diff --git a/docs/tendermint-core/block-sync.md b/docs/tendermint-core/block-sync/README.md similarity index 93% rename from docs/tendermint-core/block-sync.md rename to docs/tendermint-core/block-sync/README.md index 43e849fcc..3ffb0953d 100644 --- a/docs/tendermint-core/block-sync.md +++ b/docs/tendermint-core/block-sync/README.md @@ -1,7 +1,11 @@ --- -order: 10 +order: 1 +parent: + title: Block Sync + order: 6 --- + # Block Sync *Formerly known as Fast Sync* @@ -61,3 +65,7 @@ another event for exposing the fast-sync `complete` status and the state `height The user can query the events by subscribing `EventQueryBlockSyncStatus` Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. + +## Implementation + +To read more on the implamentation please see the [reactor doc](./reactor.md) and the [implementation doc](./implementation.md) diff --git a/docs/tendermint-core/block-sync/img/bc-reactor-routines.png b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png new file mode 100644 index 0000000000000000000000000000000000000000..3f574a79b1ad304e7c03cfdb717c4f9aa600359a GIT binary patch literal 271695 zcmcF~Wmp|q)-Dj--QC^Y-QC>@8X&k!AV_cv?hqh21b0aA;2zxFU2oAn-80kO-~78g z51gW^wk%)k-Rp!aD@q~2;=+P}fFQ_7i>rcwK#GEZfZIVs0(ZKhKRtthz~Weoi7Cs7 zi4iM1IlQ&Dvj72+4o~_FrHsCZ+Sif#GyL07yvYEMWo+XnG0&&L8aO$9<8V_+M5R@6 z6ckX2STb4x(6#a zFQqWfq-t{@!pqQ9PO5V-(C%c1or>#4U?BM@n#0lxx)vP4);~MwH#DAgcrPnAvb3EKmb%6Z857h}DoL0)$i{p{EAH<@4k8<^$JZ} z%P`Lxco+SrB5!LcBYr$3%r0+5gtMY;pxwbEeR&f!@F^yIwN{mk=VhniF8ecb_I00M zDH^fVIoPA-gQPw#vj}OJR0a}u=bI)cVN$fJSbg{~;Trl_SW-|b4puhTIuJG%YdE10 zyv)HWu`k#Ij$75P->@OQ?!SPPWrf$hTOR8;1Rr;JPLh+{w#W)4Z}Q*L{siJ8_$HwF zR!i}WV`3c?gb}`G@^;U*Y5EKd-f03H6hWHUQW0g(5Dyy!%^1cmkj(LoVJA^=fI3KetOo5~7(Q!m$7xWGg7`uh@k13GSy|# zVj)YR879SyC^XZk{-TX#2hEAuf}s~65c00e%`CJkVGaTxkt%TVyUtpxBfUN?BbGb5 zJFGhlS7ds3TTjBNlgFD@G(q&y9?K26)2Q2sTa!n2zf@vzWlC-2R%rZgOi_~DkTHoy zg%nbHw2$Fm#p&{dSUxwVYs=bE8d3<7wv)t3!;(A4BhiF>i%^&46>C;ZBW|OLh~?;) zcf#h0YR%S{I;LEq>c~~5rbXA2rKU)#$(|5y67>}Iq!Pv$Lvh4dg+4^5#I% zPnu8CW58q0hp(amqV=K?-DVPbd8wv}rq!k~>#gw_@kJAyg_MO*gWwiyTUJ|SgT8}u zv|njOXuUNSG~TNJ&@j-bRZmwZDp8rTn~IY83|eHoMMEn#audoYD=t*EWVqCCa_qB@Ed|Wl zJlb&ELhniM?eFLB4el!u*x~0Ow;1kXbmPmx&mcI*hs2k_e}g}SpUq9f{XR1-V>>e{vw_#W#aMsQ0I@l;X|S=z z08f8sNq(um<*GSh>1lCjG26?^<5ZxT-$aRPR4glIhOTm_+~Eeyu);93mC(D?i^}_D zHR$T>n&!rHyY!^xXTxaBe#3m_xWo9$a>v+4N0t5Q z^{O68JP8X4L2lz%=h(v7M}%lRM28euHpC$Wyrtr17Uzv9h4}P!E`vGx#?-nyeQrvA z>Q{?mTdDMB$4v!A@f`0(DL8K4Sa z^+eJ{cw!u&>fyAY?_g+QHJ}oqufzu^K`ZTUhM%r`w1wA%r-qBkq{%eNYz$xygeL6V z9X~FhQ(KtHO3@mO9ainCb^^fTN?5p>sHlwC}>C^Jy z@>6$Xupo?wT=SZ#1D7pOB&fMccvJ346a(jTQ=NYQp%GJ5}(|Z#it|MY2ljDL)QIQ)>lvg@yQ|8wxU z`033@%8z|W#z?1#k%ZrPw2jF16E)a9`!AIBSn|0mZ;#e~hB_o-C#e&X5X$he8t$ts zyjS1b4a_GR${Ug#JQ~uYEug#BpeNYC#}l;oZf#Qi!rF1(6P|*0^EIP%;j6}1IYm>Y zFWEktXxfuEz;crFt*|M)xI_cE7DV?q4 zF=&~voOR!8p1!)^qPKThW^bAI8`{*=>v->f5YsDEkC+1gZHcpOiN#8Aj^msC?t0bF z(=9$k_lhOgHg8+=r~Wak&fT`^s_HZE3~#oZjgteDbJXXgvtDIeyUFi z_eu;Klp9!-_>}kA_X57$16e+2&V!>wMNU~NSxNFk3S7h3!x`DGX#_s)Yj;x-1z1DQ zO&!oLs%NT4TQ?mG&kI6C{6bfsUTn5pyYRxfTpgm8ujxIQ_;uv9^$iETsm>~%+vXqI zkwl2TB(n*w3x4)feVMwNf4ay|3QkHQ$IQNxfXzV;&CdhzFn|J~03SyT3adcXp0w+* z#ou+X*%3NTt9US_1eux#L#~1YK@!i$m(s^EVMW!V$nRhXKtOo8Tw$OX95XJ*OpTFF zaHpelFbC;$1w9S#sHyR>v)Dq*BT~5m-BmQ4{8Snufqn@SuCY&qcAu>HfKbWQm9}i& z@g2w&VH~A(oIyY^DPDg;WmL&eK|nyWt<|+%v=!ue%^cn_nwUG7S}=ONa|GT70pa)H z1uoxNxR?-oytA`+=JgOD{p}52;QIA86Djd;uejI>kZLO^6N@=GSrBtDvNJN13c?Z- z6Z1QnzvWdGm;Bdo;6DLUD;F0>UM41YcXvj2Hbw_0OC}Z`9v&uURwhCd@=q5Q9Rd6lg_EbO$!t>0PLI|FM7vT!o9^8YsAZ;$@HW<1x(Ddt5|nV_77^5`gTDxR1`q1uC$zEeGI>p`Ki{R0 z)#+Q0Ot8~ATc+jfZw-?V$q%zD(Bac;FO#t>e$o>$7klzQ@ebNJ79Qg}=5D`vnIt&z zSl+p7rD|r^|phu>3v5-$6%d)5ZVyi?p4@!4Ba6 zYc3E7e9Zunu%0Yk&dC3B!QZw4N#cX_KQ;xtvswiPFN%LXRb%ph?fiFon4#Ca|8MGk zdnXjb7j10)$C~g*9x8X;Ml=) zKTC`y{z>rvUZ?6CK$ToX4gdd^?V%(f+ocRx&EWqnTRAvp=<09OL;suG|9>~<^g&o& z9-dIpC#<-b`bx)h>6^7$!j~8vHb@v4nA`OTCixlH!#R}j@bLZFla3ern)de_z5Pja z$P1V&qK*fQtdu>V|BJ#39d+l`~m6dcuqSb?`opq{)wpp0`l~5`tUV+w6L< zy0(^kU8dZPxRg9W)`LPQtDJX<%i`yAIWE=ceZzRX*px3wSLtfRVLPMl{Uwpn0C#Y< z)$=0K{r^~g9ax@TaS)8=)n9ckLnNl(%67iro1g%8E@0LRpJ3BqH;-yD^!dj`p`5}D zO^@b#4{Ef}E?CroVWz%U`x;lab5$mkzmn;7R1svZkA7L!9Cy4tzwu5=sd5DU2Rn&D zF-AVeCa(C+;sOupavtOPysyjd3ojRx4W*={HWr(lZ8}vd^m(pyVE7Vwcy6oOggrbO zapR{djaCX+UmjLol&ei8)W533bUfde4tO#CqmSJNtT07wjrBL{>gNO-EZC6kRB5m` zNZ|GM^7?wAL+z}qUZN`DYtZK9icX{Y-CU2cpAgK%#N>7-+kdRt)iwdv+tagzL?#NC zW2#f!Q!`{k6%75K9D9Egz(l9xCd;8$E}DA>0&Fnb8OgNie81WmXt$DOHwlvJD8NaB zGs4)kmWYw0s3=VD)WbsqD!r3pIg+6whHBAYn=ci!{yiLx+wYP4t41lL0d*cfozWn& zzRy26&R8~py3NRoVNF#);rr{sPTC(Tywvny_#=t99G1nyQ7Iy^ET3-9u*NO>3E|Uc z(b_9gxqTirTDfclKCG*rTLYqq?OgCL!c6W!$pmhxq!@RKZf*^q@{Xj$ zk(sB#nN=d{>3`RtT&cnLh^;J0MxpaI-Q^cbXwt@K} zNYZtueA8)GNB5r)#Sc#(>dhccthN?Q76@w&=`Q&-=j~2H3sOQG-*BPJc?I;C+^p&s zbK9s>XV|2Cm1^jDf3J&DeF)a|wq_GFn=yc3m!7nXL;_mK|Op~LNZitME7NXhX z&rxy^m3gq*u-_hx^+x&bUWGlo9`DG@tvl9hG`Fhs8x0TQPT>+4Aw?&L5s7-mV@5qA zOyhe#l#k~WtKa9nsdYyPZoR3}Dxv#6uz*@==q#X-fli(poz5hZ>>e^2o;Nz1v+et( z2Dc6I3SU>MqWw%C6gjr0g?Ec6=)UDl0LLd7#UJ8S@0l`uL!ss_!2;cxu z^u%-N3O)DP86jvr?S|V=&d$!h&){?4NAASEsoq>BE z-R{nZdsr1v&^d_QF<)Zv@F-9RdOmm^ z5<^y+oOi)#Aw!=<7l+kg-wS5p`~K3*ABU9|hon?1f2aOIm~fs=ab`ee)(11-UKQ(m zsM)@hh&CpP92{sv;?9EK+_js*rW_NrOgn{`dU@BELBrb;tv6GS?vs#A10LuJkvykVgb`~C%fKL$pU$-?&so=whK z)!7jAzAkL|fM@K^f;|rI0a2ZMc%>YeA9e$Dws&36^KnQ|(2tV9p&Hzp)`qonm(kd1 zt$K$QLWMMTl@1?Y-*P{G!hcAH10!EOTqDN6ydN9b;3MI$`gdv_eh)`nYwUZYDkKYA-Wf&RaX~Y9y?1@J$y7h5&~h*M6fOTv%#Pia(RX-REr&i(F$Bi z%j6Uk#k7y|0(i-6#^HhS+nbq$&9#4ID zn&k{?yM5K&#UfW#ssQ3=ca+QI)~9tb2F2i5;;`%zgzO4c@-ROJH|PII%6Y;Q^JY-| zq3+J#VyK9oA^o@lL6q{Rx1NxW z*Wt&^V$LInvebE4G)XA4wBg*`bP)X&*>{e{-0U++0_o!(KRm%*5IW0}Rl8xI1kXhg zM6We^N_3v@tHg9>n7%-KZh59@lURJO?q#xjA8)mP9wP`HPL$!rdQpqlE>gOb#eF3s z175(g=RMJ*H%IU}0J6PM4Pn}KqhImXmqtMOTN{63Zu`X z%Hl9m*pYL$!KbHx%^71iAb`k!+W-Rknh5h75;HR^ADvY@Z4LH0WS})LPm*m8j&NAD z(h?z#ltvq}VuhH@q0sfHAUq$FP)26Js`Q}mcyUDM0#5qAzn9tK}_?omhLh?FcqXHH>mId=< zn{k(pXIWUEC?Ey6r+6uhBuWDh&fx)CT5@V%0-Z1eF-)yO?P9j z=G6P)q%1j-vj||I=d18`X$?OsuU~hRFlHqtDe4?`HLTt3X+a z1};z_MWpx5XcNjhxX7VJJTNDr*uTSVS$w&YYN)<$Sz&%TuVu~9P;%uig+MjLx1Ru; zG^B}8*ht<;P-W#Bz{~RVMV;NWmQq#NR%i zWILGGFhPYG79JkR+820UXEXhkijk`CxEMinad7)b^iVae*z|&*_LG~z&r@}lwG;sc z=nDs%kFYY-tnC=B2Hh0GIx)Qy>7rakAtR?FaVyFGejwl16es%rhll$XZg*{FvK`B?xXuaOP`=I~5 z`%&m!kRwa^p;7*ezTrC_1kswFTMN< z>c-%WVd2sj0V7tr*0HVu;|r-iroddOm+G4xib(J&3Xf}jFo|xy0i}%RuUu5_R0OPd zZZF)V?$vs=^@!=|<%#?bS2~D}7V6YG4E>*6ZHX3ghhluOqqwOo(cLsY1Wv_V&J-@Q zKApHY2R|}62a^w-IW;wrR1UQ6!+4xBql_?m#*Y3lr%03;&R-AYhd3`tyC8cavz@WI zR)CzavQ+i?P&P#IlDt7FCy<90*JnrYlZb&+5XWFa;xdDNgM2847qLtuVMhCF9}IA@XB? zw=ng2dcB&B#?kyr)n!>zgi>59^z;e^x%7(f|&E0^L#gS}lZuy@7;R4qXh7h?oo|()fx! z(y|&>?zYHjFkg#sF@n{TxXi)2luq_gB5t;oSm)@O4`tYT4<-3%K)7;KiCA?I zu!?o-zVePYPndmQSy>rRAG-bxfB3J>vodrNlTVs21_DWKfG_OfT=g5F=UcM&`+d5H zGGbc^Kn)~_+1b^P81m%n{q8TVU3SMr5F~ls4#O<7JbP*T%ya)51B3$)$$?4vG8tx) zUf~X0FaXaAiitro^giN%$7QFEDsM~yLw9rughC=T?g>G#VRky0VNkEo$MJo*mdKZk z!f)HNBl;VQi4{O0NvtOvAzp3#kj(Ms*Z`eDm;FIKg#+MvjreN1Lwgrc!9<;&I&vW2 zo%SHrZZ`5m|81VmtXBkVa^BeBRfq*-Ig7*k%`P<8$4luWn)(36%AM-MO{;2tAwK5= zRA1J_8QRcA!T8Rjf+Xu^AbW`b|y(Ys-sKuPoaJ2lBQv0E1eet2Rsh6{n~6A7SP9 z%bIXN#qFH(v=xB;oozs>qkbUv^1;jra=J| zfVpd{CRn_xMSp>?zJ3O-AH7Zu_}%%IT4?+JuLcK7xwY+A`IeELc0m#4C}j&4>o?W7 zB6h0&hRFQau5^e2sl&y3LccE53)bRxr1~Z{Cx@8h(c_@JVfE0Q@9P|so)8^>W5T$m~< ziq7z{A;+QgJ{u8fX=&)kr>DvCH(I}8KrmphwZ)q_y$1V|K`#L7SK0!WISJEz&MdVZ6rhsAB2d= zFOe!N<1Jgl5B&|x4*eQN@gY_s0&^`vNw90WREuCRB`MVGR^D-LsO3BKp@h+TjKTGMHs}Gdhg05%ipv_`z3j0gUze8QjrGHF`m@WudnT_*?nL*+}5$#T@uw=Xj# zm_f(~joX*13@E{yE@JiBfcgWbJ($cC10b^NeMJ7>%&X}m{$>=hvOobti}-LSi4{yO z=1Wt#lwsgBg{%X>LOX4{(i{U;$fy_?g^2zfzV|LtHiExIUtJ?3#7u=j1fxOooW+th z&0huR2oD&Vn-?KM5OU!sTSmem6)|%b=;}$Xx15zT8y{lRxg5TN_5%sX^jhVz|I3rz zvZ?0le*Y^b{h@oW^qzoj5O0cz%17gp0;11sXQ}n9n}8K%)NjmzBz^^0aRcB9c-hk% z5~p8XOY9Q_KR-Y7+hOe1ixIy04i>w=fxL6-fE_B_O$s)?N6xcLO$g~XMUKDr4zkVC z@8pK&@jNfU4Z!F3P(~2Gzdm}{{OGLo8CaRMyW_(}6`;y*J&ds41}eo;sdkxOPQ0hI<)l3LFpZQs|)qv>K(QlKp@E&JXP z>q-8pvj5_PCKkX5aeVi8e3B=!;zZfxk@Kx^S{A;0qK3Dd(-p-=!xFWwPT-vHbfQ#> z7_HlruL1|}Ey=3v_Ehz+jss<$F{WJOcKpOTj1*VP346vT4CaM!NYXKlCD!4z=SmpQ ztz?<+9CU$V7Uz}619~6e=!tW{Nz7IVz<2aJLy7#t+>RD>a?~+J{<^h41UFX;gtPow zG+$3PAFY%4!aJ4<4Jh?q4Ep(IIFb?9v)N)0bP@D+19l8GGJWP|2`+Y;&yi7jnkLmg z9gpSjr5Ep6AeT;i0#yi9ltp6*Y^hAf?mWKPF@s;ECxW6f8qa0?aI|GXojl;Tbh%7) zn1mHgWN_+!RaMnxI3EihPzq=s?|tkLM4uj~k2U1K3oKl?FPebGc81==%WGltmY@2s znSePFpsbQRvRJ|LCp)yRVIsSG96dGg1`iaY%J&n@55n^) z4JMd1BUPEG%jud(OfDj|&ofGbr2b5mDXPjj>Q)ID@WcHiyarn7-IwrGNt*Wtx%~!ibI-AN7wH$LZCwDtd)a z>HDJFQb507V#@7rcT9An`&4tk_H_dV6$OG@PAS%8n#F0vN!{iGwD&WAAg$cBnOnp; zHii$1Pz+(zT-3(+lddm5&J3izWFP1AbxtsewLmuod5L}%cbb>FtZjl_TZV{qy`Zm~ zo+n`O-Zt7SuUuE6uktuEyCzz&m1qpECkGuu zvdO5t3y%wy@BX*IhWoA_a`JANkLUfpO`kePi^xAD$MhA0fokjBBRAuh1(3ZN+CYGj z;&t(RPeKI97V{4bJ>-b_Ssr5(le4d6V9sXcOZ)AOs<;t5l5$ z(>ts&UVM^I^pr?EGN_Uc`DtV0_npolFvj;S^0_vty)Y;mzzjL&tcA-ENA1AJhWf;V)LNN(met9wLn4tV$Cz%bUB zFag0tCk+WdJiq9bT0ZzI>S zv%DHUds>oG)xQ}kD7NTgW0^+{aIJS272qT)Fz<`?zJ0qxXiNmMGbhlkn@4{q^{thy zrA90L@aWFE6zIiI;i7k75>sm|Nz0_8vXv-;@(_t|`WDZzE$$~@1ErttuN1gy)=;l~ z>PQ5U6hP_VPQ4}vJL$9j3>W;lfY%}8RzN0o4vviF5miNqN~)o!Imh2FH;n#ag9Hrn z#cd6|NmEZ2sY#TgpJB=lt9GX~x!8R^9ftUBFezWNJm3%gW09i| z_7q9&q9{uw%oB^TFn|F9J_xzTi~Qw=sY38mqVgtag{0Ivj?Nwc$I;3-#C?K@=-x|x z!LiTUFbWM0E)MK(_kB1V{2&r$kcp@n0)~mPdi6zuN9W(o0aYnL=3bN&>4*N2dwzw% z!EZCJij0m3UbIH_S9A;pQBeP`c_$6lB5b2gP$N~DgHvl_5;31%P8n6P=w%P}CT%Ry zzzq@cZ0*Om9hgCnMaIXTsaE75LY4~wu?~~_vG43)?6JpMriuk}Iwnmv@ zPaj?@5>a#KY|M9KwQ*D#`Nt+VpX;>!c?pX#0$fkpW4n+^aJ)P4LrWgkRg?zzSG5p5 z&5~;Tl4=OMsf0tEY>iZ+%~R`xE1p5f8m4~S1ZE;^4>Uz9d5=Ou`h_9k4&w8jV?vuAe46n##S1e>5NQN(w(Y*A3Lr0?s3KwSpLq7XY~sSgpk1G1 z0hFT9Fb-mc1$qF?obUq}Cn{hbQLCQXe>hs|Q*6LB#emk#PQ#0`dcIv}+PIJ@jazA8 zRkVi}!Tz-IgmaSj{tVmh>6mYL76DFU5Go<$N|?YM>j*TjijErE=pBD}=sqgelq1BY2MZh0@W{(Qp&5P07s&qt0EHhSK#^FvFe2;1!|`u6*M8|tTT zeNeDJ;O);qjMW)2UHYk@C4M(p3<($N%dyyV9A3u$1XjmuJm13fB^dN$_*}Jo!jF_n=j5cklu19OLv1yH%G)2SLAn$`7p+YHEy&;&`94^?v*<{KKZ&H`;PO ztov!D#}ED~X+>tprpq7YXpH4RZNW)GjsO!1!PR+56Oo=tzX(I`rv){1c$>r?^Y83} z^|i8eHgv1y2#S@@`jny^N$?Gu%A&z5YM<GsH)FLpd^?YL-r?0M1m)6R#Sndw7$lGat z&|i1`{km+`+@u*S+G8*+&c>QJCEY7?vx~$U+7ISRSnU!^b%Mnv%*?xw2eq5?kYEj^ zX)0$sM^3+6__TOHb?YCHIjU%($Dtz1WKv3JuIj=X@UaUcjMh*aeF2AUwn`u*-#8C{ zD*2^K0kZ)){6l7z7Mr6HVsJaEH!EAyEDHW7hje#@TVX==anJi(-PG>ylTewTD2bKo z(!w4#q{=#sAbPy+a?@h4WFX-^ln>k;4|z6kcXP}?6p~*O>C|idI(9j zb$~d@iBaW-Y`Vy;*lbU4x^#jQtUa4tsMh%zu=`=%r&`NVQb9pNpmBzs=d@wPFQT!L z+x2NIS4?51<7!6l{;Hy)A}lgeJd58m7U;21A04a#x|dQ9J1mAUU0Z; zk&qVHilQbFe|f;Sm}bErsH>#o1>5UKruusuFL*OlKL`ZnRu?q%lJAwjulqQa>n?i& z_0v%kg4uFUI7EmcotS=Mrt9w6W?~2-%1;_scrGHtkXXv z?*eh5qnJpNuFy^C`V(MQ?q)H61YYWnoytD(yaHR1bagUX7D8F>b7m4mm>0?o9V{*Q ziera_=DGD8hxR+l_PcGoQUbthPh06QL)bBP9{e26{s8oFEjGKty$UaT(#v;y7$>2- z6lk@fu;X?+Oc`hf&W%b%KGyg>Ki*LuBr_U(H3FbsKMr)24+(Q~+E;bQIy#&dgXjrQ zMxI+3t}Q@+S$Y|rdU0%Xh0opj?ZZij!i@j(-43-%9{A$Y(zNAPT@4b^9}!bq2}s?h z?aZp*K8u%?Pa%nv)9O7^+aRN2MLFHMy5}evzIqP`8weO10?WI6qIH=}DJA7oQ>s;IJh*yTU zvN-(H>*r4&1FJl|>NabnA8lK~wu_c!@=M`&fDj^JkZ-qs^!Y zK94^J7SqQy;grS!vB?&)|!oW)^4%Q3g7!z7j9Lyp;bN`JUg>MGWipWgngSW z=CSF&)Ixaj9K$?QR{~NiexuLeBm+)r9GoJwtKbbjo>y%d!?R&>wW`F7oUmsDO5TR? zD0kG5jGh-{F`VclgPh#^)rhj#rbsd5FrthCUI9B1iQ;N8ve(KKrL!hLUyd@Ricy^n z1^~{yJ=*}#5pEi|f`BFHQ!low5^k@{V!>5q(YG5?K#$&Z7WSyXO$IJB^{925rYMq> zXCyw4s%z8xjgb4YU}4$SJaEzw&~Vg5pU1Pl=n1>}px={YV>KX#YHq~*Xh03Nt1?Ai z-#%Qg;v%Q_3ljDUzDy=0i9+}gtA>i*+HuCI`|TvaW8gjiW^I$&FjZm zUnGpSi|#UcA?+&0Ugr9S0cvQ6b|FEH3|?@-ye)~VN36?+l8c8C4kYbWm1annxMv+y zXcHmo0+-FO^(E0aeETDcFqmS~?WG5hz!SwTTy_mA+)&;r&HZzDZxatm!pNq%HL_c5|JFb$|iiGigpIvJP(y9y#Ii?zWWYmu`uA56Gx`Z;0}po$COTyA=be&v^Hyj zf3KBDXf}WX&Ethpw|`qO%$E4IVqmVr71WCePAeJlkROqZF8##%vkjN?3P0~BS~$PX z^9IXKp&>rz{u8}D4bJI13;exV`l$M}QkUN3!iRS0#(-*POtUtX_k~ORV?Qei?q^GK zbj*DGBI`$|8a~t01wm;F2CVhZ5^Q6~CnSv5S}K(pwk<5~7wNt|JIE;dir^05j`TFK zXhE^SX^99<-k|>2+QC6761QfVwuz|k$+7@C&3Z~cehT|F3tS?xIE;8AlYwv^zfuct7Gmj#S460`*j~F{`q+#Y ztrQ9IN^|f;Fq&WRAn4*ygD+!=#kqDuaqX;rRDNgN&p z4(D$0wnLYfR@FOg0sZ8G{`9Z&XC=l4CSAjfrKb3dk2M5q2-1hq{Q=Rvi z^p26ibSZ*D$QsSjqD88nXE!rvyB@<>P`z%!^LC_~9}=2|-Z#m!4O?r=*o!7g4EQL` zWNyq12(vFA()PhJ`_sd*GaZqxSDQVN(npqmOcKD=qkxvV@y$h`V8kj7Co!Z5ASWg{ zCW(G-!#c8q?Z*NG8IYnA}&4p!+4@pPR>-korp zu1o;yu!ZKfL3RRgXO6?y<5ZaR>}(>SSs2z?By^xx-xwwE;pU`tXX7K5Au~;-WEAfH zowC=Md>Z?JxxgRV=&(@n=R)}Ao$#kduY{`KaS80yL67;O2+sB%WRzmkC zIB0-0u`AwAs2#iyCur_mF>o)U$0X^pWjPzOc70J?_H%?;iU1l1#LRvy@q{zYmj74} z^SFANsgcE#w_CxM$Xabr$X|)AxGe+?W)sMfsqL&2jBI#R!4_>NWktlNi8O&z(`uo|B4fFFWkJbqX;^t}EXgny+ji*jB>ak*~c7oLi zj|4(|)M0SO@6A$mEbmSu((rsxi+yMVWy4w{9)CgQI3LwG$~K1zr2BQJ<+Ji9Y2iKI z`Z>Gm8H^co!)xwasAzL}g0&;QS!~sB3HkuC z*<7;Kx$eO3f9gk43KZpPl~CPE6ga$R^+S3@&cBkk=M&| z_vM~*Ri3<5#_<{ym0SqctIw;5_FIY*3F))v^f)zkE%UNCytyRA>==Pn5*@-bJN8FQ9vn*wR zQ~zIwSdQXAICRp?XL1NR**7?+73!_q4;XCxK%wVO^1X@i85e_?E3l4>ZR#p>Em??A zq~kdN<6LI+?ujzLFqlHBenkAn%zeCEgzJ*_BseFkbn5DP^rpwDP^$ka$f}wgT5GAb z>M@Cu8u@~Y(qTC|;Un%wXAWoTGkE#G^!3PmFT^>R`XwjfIMnp4?8og@+Ii;oIB5=1 zNZn05iFf>a8#&@-BZk0$>$Uk}1l<<=zQ@(YeSCZkaw<5XQ#eV6x!e(gJUh+KQkpMF zwxKmnMnf2KfGF|oK6dS2Z;FJ$0AdyEzl`yMLcm1iW zd^Ptcm|C61F&}r!CLGL1_S~!*t)gYnuoe7Zqal}`ZTGfmFNZT0Sk_~rt}u7a5;2=Q zxw%V`AdggGKh9%3gsVa#NqNeTCz-tkr!*WtZ%Yp!L#=!DQtuebF@vS&xJ|bC4@>4y z(wlD!n{oV~hTZI%GKn&AJ(##Vthm$4Aj;Rfl6=`2 zvKmZ$S>|mZQ1ib-=%A7XF&u_taKxoMMpxj2mf=E=`SIlhYc)p-jos%m86N5j%*gau zOG5DSeZGwm`ARqIou}h;3l!y@Em%&!b4Tt96V<6^%$n6+#p6bTA;-UuR5A#6@{Ukc zPqw|-;RG2UT5{N(%!F@Lc6B)^=EFZDfwWfdb45kK*Y2v7h3_AKHhhp@j4)N{ShKo6 zG>A;PIGpw+^^)1)nO>d8^*FwF8*aQ>wriRAyHPFCzXW%s8S=;I~ORm`;{iI?p z^6kPhE2ghe0ZyL+i~ud%R5Vb>-c9-9dU=4D04ek~kX1gsDP3+5nez4f7%@W6VbKJ9 zUvxhGsC1s-N7Vvy4IPG9f=VtUzdMVo-aDW^na0}~{P})^$)fwcAikI|%t2C>9E@z@ z4sbB2MHD$zs)YoUyNM=w2RxEy^RT}oL;-<7wCNgyAer=5{JbX4j^$xOa$d3;ON~Ql zf&Vr51%9Q@n?=X|eAj`T!YB?;2(EfLA0gVEE|7$E5yB-m6s*jEh;rzIuEzK1IwI4t zJJ~tGdLb9GRVsrA1jEw$BF5R(&4QbWaR+JhD})cbp(^G}&Q1wYnq)8t-Ta4sXI%!QWQ=`m@jueSc zfP=+!oE~j3t}zI~BqzH!iJaod^unp5P+UyZ`^*UOok6TmnNwC$-v>3LU_`MShw&m9 z9y(ZlP*9!HViw&rDtsa-zIKD?3ogpR^Q?_Xn7a21Wh&ZEVle&)A+|vWp^sd$h{O#l zb&B4NgAApKpgTx}-yKfqr#lnVpk^y&Mx2uoov@WmIupN^x5cZdQJMY~KZZ{|(Nd%*-6jOfY@+M@# zdRC8_ujxX@fuyGhh-27{izHYXItjt^<7Sq{X_swJ&ixKKplo`7Wbqts!uk@N4qwUM zCfVAm*jLzZ@iZo9pwG0{f>6rL)r|U3`MU6j*d3_7kCD$`a|;;xA>Fc|7}x;DC# zviK&g2_7u>>%61$Yz`t!s}=AHcf=g;QxV}Kg-x0YH5QI;h{kBxkihm%LqC*4Nofw&7 z#H{jdEQ3r5m(?e(rBOYi$9A}cTFC&k1hGWQ0YTMVQU}#jpOA(^suZ#3bRNi zpLnYL((T(g)Bx)26yU3BHN1@NO*OUmynXrvjlkD1tD*x03>|2l3281o4wqW6fMdoi znI6FI+^-}(QdxUvX(`j40^h(=UwqsB5C=3=*)6x?0R4laK#LPMRQQ`L2hMPy)D%|4 zG)5-1G(ro`=vs(lMVGbz`HSTDvqyFNkf@*wLW|*BpM3;$rJx76huIjZ6FAmqua_E+ zt5Tw|@*(lm9L<~XUmj)2N*@^Mi{LR-b^V=(ipI~(ql$ac&FToG8IHj z*#w$Mrq4HtFN|VXl0@!?h`Hxr#qOxiI| zNHV@dg!bvCBBBlzOg^e2{3zecO3Wd(-@Bcj4kcjHpg=10n!CBE-&v&iW)kK|!Fx}H z&6m(mSrgMHWFyE>vc_we3>u#ohr^8M#FQIKVonk0^grDQ?LAks|B~$5W%jEG;YqE< zc1NR0pQz}8vhYFv~ znc~vfE%~r`^h`0qic`bUa?&${%m6~+;gih6%m2tDB>b=}FbVa*SvW_2n7zD5pZUFW ziBDn!iy!`~@SG$1A!4Jl!|{n4njhQo+`hVK_}{dHWfF>30xIt7ubG$DD&|}orR|PM zaZ~)*RZwqr2q1*Fzvoba`Jr1~o$h9sUjC^0w{R_2H_&O0g_!d({;cR7@@y7nasFu* z-*@L%M_WauWmmU|RtE;oD|uYJh)Y5Z5;0R6gta)0dqQPto#|-N%W`?#P@t>y#TI|S zYx_%ir3{-kh@xz!+Duy)vNdq`iDg~&t>>-G?S!|n#i{unoN&XHrYiKy@>efqaey}ba`dO({TjPOTo3a3Kc}^Rmm-kC_6y{iIku{)Z~D=)(MKL zB<9@s3R>*6y@blVPLt}DC^!#&hqe5w*nDT4-=4XLTzb1;99MV7x&xl>E`PNmio?iV z$SHxOmwVsnH;sg3fNiNX7^LTKI<=fzOSHp5T*QL&tD^Ks^g!Y&bILVnT&Ls(CG_+q z{ab|7eF1T(+K_38B>)O7{FN(Aj83f>Jk4L^dNh9+IvP{>5gJLPVanb@(~27rhIIF6JwfB)M(i_ZoQ4~Li75CmPgI}uVb zc?oGuf8>6HtuM|0%&MP?a)Lt3#xZaGkbErC!kg?>pguv-<6kajnlkXXv&DL=sL(Zo>XJ2S1qqZpys0TI>zeaZ;6Ij!md(?em>({Ki<=qndZ2qZ7 z6R@H7(4R}ws-lb0|I-2hU^#!XgYI3nggqLikdur{PP1Wq;qj42OP@KfWNA96u0@9n z+4q}_ipO8T?!7&Fs^ux~v6T87Vlt;>@q=;J&KJsXAxW15z%=j*lf3<0lf+wuPrgZc zo$&66ekN9Wek|@`ZM<&9pOSSDSv~uBLYEcak~a-MFtHlH0R6GN7gaxE)LDRQ#Oi4ryLCvKFHN{u`a`z&lmUeX3Lu-=k8H2xvq4 zD#6k2v1y^+KQx;LseHh6R|q{y%%`@z!w&=bOEX7#FRzi|evf+XDC1(I%wxIh{M|I^ z=pldNH?>Eh{bXIe>wD^_A?&i02^rgK`>@Lq6yJz8OPHF+#E8Di$}BfoK{~)}dR*&x z(nZNyJKZ-`&7bq_1YmXoIFQ~DcmPQFE1Q3+#n}>|@#gQpz5p`T=6<=;*L#&rNR4C+GWqXZGBOyAp{!Bwe<^MM5XZwy zEdLBjm2vDxHnO^VnIApzm-rLB7aP|pk&y&oum=5A?whI;eP)A6Da?1 z^nuU6_Hp!K*485e)t1$st`)5?^xoK{XrELO1nCV0jLP1#V`>X$GD*?02t;F9-bp$N zbDu=a%;ZG?{)&pk9qi*YEqC(Mk^S(IeYfq{?^T*u^}hXN({;WF{LZ#4?CPefxS)0E z52&OaP?C`QA8jTS+|vm0jvn)ef;WLZ!dHlY8HWrq>%BD%v})%>5JHQ@_|C5EXW4ud zrlwBCZ%E;+&yi9^kwHX@>po>?XmEmQ%%;-zg^dD(1tFPeMF{q#4Y1z3Up~x%SO}ok zhM@R{NeQtm9dhU5PZx>Cqh3d0v~8VA=||*v>uz*W21j?YWvpUC(Rtq31Exdst~`bf zNNeBKmZRIQSu$T}{pd7dLwestvo#A@Z#{9nlz?8ge(HD^vx|AjD@$-mjxp(&S!b<3 zi{9=}Yg?8uxwmO|3Uyt(YcM$6P9?;Z&ElbH=XJ=~5 zc;#HDdg{7Je)Dw$3L-gc-?x`L8 zZ~^gc9F+~U3p3@HHtnG^{lcDhvnmVAa0!WlwY- zhorVgAal0f$o?j53l`*JR`5w zt>|8(i2^QeJaJi$;}3m)Q5My7s4fM2@a(p;NgB=PqT6bD|Cc%aPiwf{-HA^+0=@xT zzQ@PpH+SM?d)}BpMktfKRDLph;=b!S;Yw$g=zRgCps`hH*N=vM8LiQebM#x^$NBT+ zXV-?hsG)|<4v?8nL75Y6W@ru*DDorA0n1WUEjcT#SVW;Op|N_%vQ83eFG9a_xlTS| zk19*;iaB8`1SC}4g+Y8oi{xnh?Al$r&dbLsHeIMb@K zq2$JL7rS!yVrLwlWAj1G951MWv!y|aotDo`*!)+FO$#SIkyfgF7jwhk(H#@ci&!mM zii-MT>dnCv^fYgYDp;32mXs!Xx9`~$S5y6CPwm8OWx50lN-vR2{BeaYDfWxY<5|@8Z;>`rr7P8_L2vYV2d^uOn8hVYJ$9vu0u&1H0UMF0H8l7C+Gb3HuREq5sgx| z%n%-%sed2}+esnx&inqH6cBMh#lnIDbbW4=+kbhYIh~3H9+MhH94QoQEme2DR`^Ls zNR;=TzKdgwW+BKnk3vJ%RR*AKcU+FrAA0M0GAZYYxP68@dPidifG;TnxMC_L-RnY{ z^wmCC`-jU>e+6QhT9t754{%uH4sIQ62(sIS+DlWE6KwKwocf#l+!P)`-Pc*pBQx;Y zo5(Yz)gdn_Mja&L*0vCS>kYd0x`wiIj&E!eEW8`$$+K%&uX;kf>ncLL)z5aHUT#sc z)@o4H`nGPKqOGP;ouK^(H_5z(+ z2XN7p3feE`lQh6;_9E{j5P$55GP)?2+6_WkB70F5&aYE}l0PwQphH26$xsD*uvzLm%@P5faC-8&5qw$!H(z-Khh zpH`cjMwaS2^+2lz$R1mL$in@DG5Gm^%H$OlL+k77AB&T*v9ZJ6UZ0R*VZRvD#C?T? zCnE!0QSG|M%J#k~cauWqTzfeKe38fVLI6W~IAO9QS(}=gD#lldvsgm7sb|3(FI(PPU4MD>CCw z#hIX_B5aRj65B76fSNRY6=PFx6o-@aW}HbCzgkklIgO;Qo2`YbKjHeBs)yFm(UM{% z)S?;hxk-p_8K90r)y1JH^Y1X4bsCIvNyZI@Y)WS8U9XI{P>?H9ow<$fkQw4uOAan$ z$9X3{ml8f_5}noVfgWl^ihQ(5+zoiON}MfeZz7XSR<{L{snJhZe?+*hVeiIXHJi)8 zUV_0{f5~`bA~L;fzGQ4%dru_h5001Wl|%a;cQYMO@bROh;)u$7!i`&iB1YNXo-V2M zif>3cw3>E98(hh&15j%5_q|V`j^nf#b7rvoImiled%RSL^ipyi&VdaJB<_ajA`;h=|ia@U$_!0)s+hnKb6kgHf z2T-zLv8lFDgi#>Yr?+OOY``TY2PdXude>DofXCbdP z=k#OWY)?k|!7jjO=fHK(6NyECZ-sp4$p_db8q``^yBMB;#?86@Z%qzsZGL*B3Dfsx zBn<$&VCdh?|B@vF@L^_iG%hY^ond!*Ah-Z_8h}^M(GCzDMxR-7lX{PfB>g|nn56{y z0Dzmn1@3?FZOSAP?_*Te&ETvS%H||?#pkeyi~s(2SA{0N_#L*RYESfkG_TBn=Shc_ zAKQxk=XsrL4za5EK*!=$-Xe@-d%Ik2mwvK%Nws^Z?bFZi?uu0nK)Gyo)3W2Ls2O$T zficYY4$=BYCl&((wq^V4?=1+-2wj0ugVEw z`H5T-Z)AX0vwa&QEuhtq_Wyl`BK0p|Bz2FU&iSY-2RsGf z%(6dJbJzdMkSYks7Xg95JD*PZZ|He=79BAS|I4mX2MdD(fynI(i);f4oNW3`i3(JE zOso!7Mo3r2@F9>qbw97sypIpV3>aU!xFZbiRVnidqsA-Y>)ticMzC|+1qf)j!54f( zbvX<%;I|arM+)A2_}}3nRUrV1Nie`0FJ{=F|JV5)b^=F252mrSm%10`J%r%1fhoum zUn3b-aoAz7WdN2%X@aD`9X?eTkc!-OHtcn%N|55gud@9v}~a|EY;=rzsaU-17M zh@&=O#7N#Y_3nKhK_uR}!rngw|5??4P?JY6i*zt5pm=;#r*oPOo9xh~vf1-8?PhG3 zz{WllDAI3&J?+10zf;Q<1_;6ekm#IWP)DG^21qW4wP?V-{sj=>>fRC8Co9;10ECP; zbk%?Mtlj&bUC9Lv*?n+8;o0?UbgjG(D}P|`{_mu9e-j;Me|tr4l*6i3?6Xrp%s;vd zczYQm>6d&}^3pcg6DTLRD-3vfM`6ECqS{J>i3Ps*=}#!@EGLPM|Fob3J)FLHQZWSX zzqbDwC(8RczmO6Bz-<@Zu(F<JC$dMd<%cCop+Oqa^)xIXW76)y{RY8AJIZ^T6}j zk0w0D;k0D|-YLe_-sBJvbQm-!#{NU|^++182pzMkj^uPWL$9pBxD~JHII|NjzV?xG+Z(*Wfe`DT2-p zA7+*oe^&O!JxA1s92Mw1G=$}y*IfT?fGt|d@4 z1niZRnlCDIL|A?XY8~}eX_@|W`>y07A;w#7W)KwV2+V3dda0R9;h$gN*&ZL)p$SEo zBo(HLlIhWQ{9%6;PnD<$`EZ&*3ak?cxg7)}a0zO#=I;wxv1Z6=&j@GNNItlGP}4$D z(qT?a9~yKf1#JF$AY_AS$wUcZ8*tWJO(G572?x3%ymH^f!MxD{>}^Lj>U&pNp zB%j?pP=+w7k^I-sGXTt^#GP#cBT0y4dQGizjqg<`-^L8ywJUV1=Z||&e{qC>qO!Ul2 zu6MuV2=i`LPI!d%sNQkGd=`C+beV805Vf>#=!60%_@S(ej}HRJ%zvTJq`WU~PFGYr z;SCPf&|h@DN#kyb}+5C!n`xN zNwjGv8^H$^JU$Lge(pZsk(y-_EY9A7mgaAi^7tEwV=P*s zLkl<3teCkk&Q|kmuY{wni|iG!t^N8fr7)Sr+G>3?;ciYE!&?N8D8-Y$(w{H)ZeBaTFDyV zNGIf{o0{!>f8FzvnFnvIgWH>sO$RqocrAP({MIlf%!bQQRVCBx469u~@OkX?)hw`pKV1E0s<|I@xfDGT(6CI8egc@Ev)sx_C9BoNR8~`z zjqs2f_QU4%vspl1R^r@V=wnc`EadUg9fq-ev%V>6bEJ>s%pRW~kz5G*F+1oD=8{Qw z7>`bhX~}mv9+7qJOZLJ4=-c5u4#bvYr0La+9e;i^r^BWhAm z;Bg?~8xVxgtqRs2ja?J!Qrf3HhL%xPYfdY=KNDZ(Ofo|cRj0hstQgj}|3?Vi%| z@;+bDn&JOtHfA9JO!>R=E-Q@@WFR1imo3xB~K zi!K+L-$Dg%2`DC`#y)jc2)`8Pu5p~0H3;lm5m5l3D)N-c{*aZwa|i^?f^wd;6UJoa ziby?C)$6@qRVo5=BTi#XSjF^bxV~=EVx(~4xZ00cWIu02#!nDFJHWw8RM?6^!J;O! z^c8*DaujtZ=-XK)>33ZJ4KO8IP}6E%7~1udvpKkWOzOXTTl-F?HxL-K)M?*NhI!J5 z78Hm=r-;c3cL(>mYJ{o8!GjQvtZ+8>m-Bpo&LNTkoGj|E>&xmSC%Lv@y zZq2Se24&a(i+xF;3#2$=xdvRZ?`Lqg*wRm-`L`DSeWco0Bz;EK#7=0z?KU{Uf2spI z!Gs*ugtJm0c=kdngNc3(RFb}340!l&eQvN?2b+VVg`^-wRisJNTFQetoReoefz9WS zf3(BlDMA8CN~3(e6=P}ASAFc0#SMkB4uK6Fql972krF2kW=JFvpU_1(DnA`J{VSZFYd~#)QBolh93>8H&!Py<|Kb@BJB{9{O>fZOn?Nbr}A$_yA*9TC%vA@ZWEX;v|5Z z|59(5PEMh@`*i;){Y>mi*YI_ceyQ&n=Ut%!s| z%M4c#mT!VzK`jkRGB5we9UneJo{hBoeSk3^k*a~?|(y)GO8l7mq4<5)LkPML}4?V07&389;bpH6?sDo zE|>_OTgnQqrI7XNmjJv6)zP*FW3oqv)wnNpG^)M}5}ROnrUR42d0B{|!9p%CME?n& zkKB&3<`1LxknbC+NyB1*CVm29)9K8QEmGhpa{mcA{%EOpI7x!B=K8FKp@It;V0UP% zn^@BAKL(vonVQ@>;8Z37a&40hjjTdq-RW3&Hiztn6b^(&;-SJ_sG|2dzkP zZ@lbCZIoa4#~YAMeW`!tY|$|UAW2lWH&Rp0l`vUXG4kLBA95Me!uM9I{(td#@?M7h z`Y-V4d&gp931Yv$H^7rAJqHujgEsz)%=0{ZiQ!fAi-(=3H~LkOUzRu~-<+U%77@+B zjsCr8oxDgEilE^tP&#`^;V|?NvJkcX3u>njUR>3gZ}gf0D<1U%UD~yu6oy=DboDG3 zcn8JtK?wxuvZvA?yvF{GpD`)}`uqrKQ~bkd+~GezuY~6MJ;DOw0TSZk5O^F`0s3uj zxP-JfP3f>8vR}{Poy^w*hE}#1!ToA{hAN>WVi;o$(4h)dHCN5q zt?+_SK1SPJvjH1;2?$mi6UxD*tYYi<6Qeh&xd|3wnByD9TTJ#zp7~svtIMnPa9%EF zfdKEuxOyq(b#l!!fLq7c{~4aqB^5MAqbxZ#NG7&~4-qIVCITs%_Z4K0rGASR831Aw z6DdJZSt1cL8@>-D6BFga7%ee(Is5k3a+b-hu*{(^lUcMUq?EyZ#&h>)QX%QqZ{0bz z{6yX_Vut#xP_dE^?|``eh{8A3j~CL`zPL-VD8_CrpB~D+;xh{>DbqFIYx7ok6aKeC zBn@tkMYkU2ndKc51Z!WV!357-Q_(4*#iIojORd;}YkvKj&x-s(G31})GO7_T1WT>s zn$ka@*C2pP#S-*fAIuor+S0Ff&wSTmAH%_k`wGhfMg`=P+AyqzBPLqFe;R>9CWs8e zhn9oFwVnM5$Y(C>*P+#*InQMl@6-wOMNUQ! zxb1*8HEUX~nXlcaVt$SQ?PHwe%YtG0SnyLZ@J7H4h zP#}^K0hFBS&hJHd!x<=OVa|HH1!F_&Wdx^Upkcb0@c1X~4OR3vc%HZup*dCPlsG+x z;BE5fPcGRPd%8@~yM4v73MHOnmSsZT(Z>+Jk|ih~KcJ>V|MZ<#E}MrwCqLS&>5}0}9V6SG+S* zaQOgWxyezCNc&-wmneXx2L6B(`@Y(Aq!5JrOp{Fs1gNl!i+w$k3*edpz2w2FCLf_K zy(_>IJ0)groElbodSHLH$`^Hsdn>Wspep(1Hw6B*D#2PDKLZm{oR9uP=HRdc7qivAgog_&m{+vo~2Eu zT?fmys1jXhKDoc9w~`7Le~HQ2S;-qJhsN2E*+l6uj%;bR>c~AMS`VnEzBgIH{Hy!f zhi!q_G^{a9{cGs$AAxqfE$^5TEA%N{quZY%2-c}|6~U`K!RG7ok{isa#q3byFOXYd zE}@lG+>F^hpzz?4V?VR!77GJ0RxQ$PF+$qsg6iWQZ|Aszt8f{dwcisARGYW+EjX@a zTt6AlGNPoRH5AG@Zho45z~JN!D4;rUzQBIWnWMC~L{=C0`JmA6m+fmlsXAymWI9P9 z3$4>BeCwxc=({X@L~U{zQi2(iNPaClNu5(PlWw6y=X zLR3bdz!`#ZQknHE!J#P6kC)hIzw4ln7UYS>)U#3<6+o~Vu*~X{_lt-1*q6hYHHcuK zp^-`WK3~r$0elsje*G^A2?>e&q)d$JWhfEqNc-zu!T{0W+vgk|G*Du}`Lzz3Y^qbw zM5FKTUL`%4h_*Vgl#CKoLmR}wU4>UGwIx(1cR$f$FJjSfY@yGU8xLK=1>6#;9EkT( zWYRWUr&Uu{0*;(s6Mw={vu*yqCKTha7;{2BJEC5-d6S*K3b|bRA!}v&hyJ({4$wb0 za5><|#A(A;DrubNwZH#H~`TMwCQ6fn%n0 z)*lx;Ub<%gmK!g&nMIrNIr;3#YBN6zCGHLBWT5FtsiNSJM%C^-4CnW#TwSRaOa!uI zckqPc$uz{|-DzUs#4+v5+I1j}t0}gm3ZB?hMZ3uKdy#LhBu}I z!mY|f^6jtJ`!Mad6R2b4>gbmGH7N-+BZoH!dd7YC0gVP`G;8cSRxrMWdt=-NC+uy3((vxs3fuSJwcwen4wY+D+QgDIer`AR;CPcGu;Ia| zd>lm$E--9Ka1<@*%~;%3kCKb!gY9yLvei88CPkz&jgX=35g)o&y^DcxV;a1K|8iX9 zeZmAouns}psTcerv23z7ga~$#vzi_*G=awIFrq>WuF_6D6!ZMSp}EqYCC|kOn(K4# zrkH456dAbMmWS#-t_$%Ux=IF36NT&^TbaY7R=;#Q^ zFXD^=jl5c?ElEK6n|Af$`|)rhN`dh{9W3eRX&6%OCIr!_IDfKyQ)LArrb z59S;X{h2wga2Ib2Sr63 z8NgkfcxnBK$X)Tc0CFGtTr&dFDPhY6M?dUe((sQvtY74% z7NN8I+gBIn%2RupU*$PdU&)+`;DziBAz?Z~Vr*{{VU&q&7w)8JC1CQyW5IGJQCNJy zkOq=_$02j&%3*?#8GA~Pf;Lujo!0XlG!CBet~$> zVj6;yJt;MyDTTc0Y{(<}dl*AY+!UfVtKriYJk9XUA&B^Zzf}8D3_>}kTue2F7R`8* z$r!HK7-Q^N^-!GfCTqIHYINMt{G!vibZvj8nZV!^|JS8V=AP_BBStHX7{;-ABkr6=z{A=hk4rAAAV{zV0FJue3J^kSP)X4^?TP*SCHV~OV75NE8V6h z`HF9r%FNXM1&dG&dMeaeG9%D#iw&m3m)BvzSD+y*{={C%)2YX=jyAZBizU2s_q3zf z3XwZok5F6uAZQ8WZ5IT~68KdVP=EiJNOHMgcsT(NX*w$)3k32sku@|UQVIu8X9szb zA&CCXytcMBmoS&unDY?%ZUop*$oEKh>vo}hoZI|f6OJ&)bx>fnA{-{e(9PIB(12!^ zgo_JDVxoFw0^VMP0zW@LAS;F==yfeJSD`UOFtIk)#$0%QN=-hMmu35nX_hjWgciwI zMaI#|Nqt4S^#XpB`xz$ptSiWglXx>qwqclo@J9@oDRhC}S(TN+*vIfkXrY}Sf5VSpbV4sgWA@+Qg=AG8G_4HGUg-=;7ExRtbSKk_&~Rl zK>+tI81RUc=P7QIw>le$qe(OJ;;{RIJC(S(+dX|p+nu_7xecnZ5h3JUpP{!jx`nl5 zC<33|Ig1Ff)kQF@DwKp_jEw5tox)ofyNL#I9y%(U5zj?0&e_m#sd4m@S2Q(Tey10D z;%GDwKR*q*cd>ia4^aqCQa?W?vB$V07zNFz$dO zf#aeffg^6X#Kbz0h@$!u(yv?HwP5buMoO0Oq@;8pBu9N?YpGB&#ZiMvWaSP%)-kx z=ck7LgaT_-)AGmIlp|SD%3y>{8F~7#v!#_MY?E0F&sST4ln|VgSqqAA#Izvx6I#Bw z;H(*XYLi<`ecGC2U7G7PqIJ5l+*%uyrA3r=XW9tZ{ojuhcAo}H*}&e7?!5|lPYp8x zw>(HSew!W;2qT}y0tSE-hVC!65X}-MSisP!&d<8KTV1EOBvzq#gl)m#NvQ<%mz)&P zKtfcc+oy!(Yhfs!>}ViyFNOp(IT%unir=e$TdaCOFt~hlS|DjOSW+38`I^Q#m%spnsgI3}qOkp!iU5 z&L6p4C|b$4xDpf`UxDw$ohS7T`#>9JWFV7_n0W%9G#G2ja+MOyh@m}MUGbtxCQCb# zEoi9Fa$h%k^@Ii%;&7{fn}Lx+N%)RxUXclkSe7!GD<;{3^soY%9UFvaB@R98e)RE5 zQ?c82ny;xsC`3}Xq$oU$=cenb`C)n56f|NgmUI6 z#ANCyGGrRnz}vP|=3*6ma-=3A@w}Jt@_iqrb2V5FStu$@L+kN$iC}dv3aRn^izX&m zY<8*d-MH2qTvm(s3N9X%xEbnE0%+4 z`F`>GA^9#7W#t$tfu`;TEK6T5KRFu^kfqL(E+tOwig__%Fn5U}Or&wEK*~Ystyh+uTnexB8tYI!Jwq`p>0d!Db2j6WtswOl~gu;Qhze%)|pi;-iPp{iuvU z5dCEN&Fp-vtW&}5CR%9$fv~}0l>^A_UXTcQ6oQKDp0X+*oqz-x$K5X-9bO#C?WdJ$ z7vDG|=X_5@EKZFb&nsdkv?nea4F40MO~V`bm6D+7%6V?>zB{#i?ro0O-)m-B-#K3| zUV;vO@GDwfimcP_b)oN7cyDTSRh<`K(I1iDgg>Ppe(f9+B7b6^y(K=zi{pk88k(Z6 zRAzVmtmP{RPili<(U{;4k}lH}57KVX434jgn>TrWy`~LL9@s>j(ZBe6ua65yYGT~_ zg$gu`g2t$Ohz1HSBaMJw21%dSHr~55#`9)PwM!_pYgW=l{t=QsXqFaGfh8pyr6_W9 z_EG-J^zF<)UKbvO8Tb|JGtHrFaCpG0JvhO!^}wHHtZMG6k9r#7;chSCmfAbq(EMdo zGBy-nWnn!3M#On}_lZ7U(ZkZ1KpR&{muEOWxpX^svW+Xh~ zgT)iABd6AfX*Oj&Z&qv7;!q00eLP8n!_Qh0aMiHWJb*9=Mis9H4pRR-{~Z^L zQ8!r)-G}f$_JIP11-k#-4KW_j4IJe%?))O=cl(a(F~-CeXor@EhpKuFjXA8@k36fO zPxC7Zvd_-T?)OZR2^lJ)H%7e@=swB}gFgRz8Eyl95^D)A{+*L5k(P&ykqsBsL6tmVFzn5ng`IiEqQ(L8=q7MBsR zoYP;zQ=F!PCtVg8-_bDesiL*68D!QD|Mq1B&ou@bb!STG+LG?4FD&_BR|E89LD$n|6pX$A)p(pk!?$60BT%!MJdIZZ9m`RE!2Hn$>8` zQe9_KM>d0*`{;?_ps`%)Q6b8SI_6;g^c9CLw)23&Cc?!x&RzCKUh)xL7&+Z=ddqa5 zDME^!HGoVgR9P_A%hbZe!?&fnIWu|LdS28O5tY_8jmSUgbu}o(e_5@f#?8j*hGFh= zx}UV7La3lKEnP9&MdFdOnMHN70V&1j6NF+S=yTNKibD1c861Hoc?G73}D{;g9t9}$zwVNdu2!Af~UfmnyNuWjb*{m?%uwj9u9 zVv1p2BU7Kp-=o#CvPV zjJi6eM*l6bu{kiiANP&X91M_Ma%pVWW|^IH$T?P9l{tD@l1C^<52`M6WDQm9?YFS^ zoc8r|NlZXVoKM{vihIAxXR*_lA0#GL+{enws>=6cd)w?FmdDU=y&={7p090S+QA-I zA!Mr1>AZ(-RwWferguQ!u&__$ZPsrsGY%bISVH3A~dA<`8Jn`xzGy^L zXJ4JSgci@O#bh|r6$e%&4lBVX=*W+kiE$c(#c%0=PNptDMWu~$5jv^8Cb#F4)7*j~vPezaV>NJq>9) z=p>5)MbPIS<>~gMZ+9Y-6atMDZfIbjXKqeod7(feN?s~apH#fMhk`DH2rI(OmcQ8s zLs836jQ3VT#Wf*zXn@sMsIsV;CV0k)UQddT$#PxGkp6T0i)BK>X|8G*DIA@ykf9}1KqGfxyWQ5mO03vU6Q1)U)kEep0m<+*MW5*0E9EGoe; zI|L68lyi{j60V|lG@C|H5(JAIQx6~0ZcjubF;qCcBNK7&SeG?5R@2a&2*GBw${sYz z_agMLznYN-2gmMVBOCQ|L+`j~nicq#N;bE@F~_(VCwRRSgA?$yxe}I~byj(N_)YLQ zLoko&e7`9m8MhR!=hDzZQLwV#RBEaS1a7lCE?DtGX_CuRK|JR+4RIFChTNL%A}@KG zvzTf%VsMs@*IJ-@JLT}{N_pj$?JBVvZR}WXJ-(E<7 z0bKndCM|6ApHJllBJ!7CvDIeao*H_pzn%HYqj|5FjFgPYeZ)61u=d_)4Nv_j|{P;?xxRCJ*UK!V6`j< zdA1shONpyCLIU8pl_U9{Z{iDKQb}BU8xYW|RMSi253y#!QpzDNwwDV+mf4g8L-AcB zMG}z>L6_jZzg{9zU-CnqqVJ`jzh^6tQ{fcSM#`x@qnd;Y5bYuOj?Au`c?z_qQ;;(W z!OqWf)o3Des(JH2Meke-^SD)1%)HAKjp~D$cX;!4Vmk_fO^k#NVp-yQC%;zTV2XAc(d{Kd>G&V z+C2?~E0Y8E@hx^+@XcR)ya38wm{ZSJHqqL?r2@w=aH0Aj))Q>9BvJdhlL#S?wyPr-`m*)wD@M=q2#|aPjk*znwSBs zg*i{5T?9~N1_VN&3`_tqR{e3bZBKesLs25|$ZEZo?lG>-8KD?EBm7^qCi#@w7iB+Nn!$>Asy9Adl|Db*uTVAO z9vliw1S1@n*Rtyr%So4obE{U{=HMy}?FxP^d}X0$u*T$M9RSce#XmXWC)uT$!l1`pp1mtew~*6$N53$JnWMVSlNaK|WIafG>-v}35 z=f6RBLr{QA0^=OTYB3rH!DP%VFm}_u6+Q)y4d6pxNw1md2dPYJr)5K8(^+Rd3Ley!%!xH@p)ooakHz6%(F~M z0>)BNAav}7JoLxjxSoNH<|E4^mz0HJD4+RHz>Si9OG#O z0uCtU%P?>rUs<|EQtHx;iWtYI(Vrf*rs9uDtQ2_ zDI4z+OBkbbk22G@&aOFSyVFWSTvYem$YP}uU^|s-dzn|Enxwg-0x!y!t|JsfsTrT- z6pJ^;m+yXt-Ij!La&Zw?Rz?A?H#2`F@qhrPGL7IJ@FM+nSkIv&gx7gDEzAEH<=-A% zd_P6yGX6OUQ75wjY=Qg3%4)~`-}!b1Bc<$M z5ox{!T=P|D$P+uUD^ICLv%3%sy8KuMnvxKiN+r(qBFXrvIxr)Mf}QifAsB9)Cw^xw z5}~t5fww&5M8(F2mX=b#Bf0^eK;{-8hYn0XP$DsRJ%bYy6WcA-L1y#2bA60klK8hS z#=umW;KbU6z~V%U+?}qn0{Sk%)h!c7TuUhE!C zsDG=7B~K#o2}>jrBcV{_7~pZX6V_8Qln7cx%7*B&_}1pp3ko;(kC+tlL6XWOv)b9Q z*Y2~MD1%^lbe<^NmS1T`5uIN*BkganjObve0K>2BEju|3E|00~sZ0wZRj6kKKIk$& zfTo10K=F-|v0`%}gzyNt=1tn;()|7{7ffCT~l=>9yOB^Sq^>y3-{S3pMh_ zJ6r|*-11G|WYA2Vl27^u==BH-J?{ZaZc{^=PGTq?+9uWO=hVl`BZ!F=! zKZXZhT_Uxythvzv-);CBkbTf%i4s?304a3;0RtIPKEn(pDU_oZ$Ty18-+o@I!Hm33 z)kDjRP<7|petS>^zMpL`MJh!Qd_DyJ?;O365hHSR`B{2L)Po%`>GHANfBV9ws`UAj zuA&G{-h;q)F!1Jgt6~BBh|c4*2|+cpdOs9L%w1y+q(^L|A_EsJ{z4-D8@;O>wg~56 zjj5ciEW&hPSnFX;-D5xjfJ6JrxgJx8l6a=pjg*EGSb%|=2#Oru_sU76F#%Q8(gQCs z3lae`Y?VeE4Ff#+6b6CR($@9`V=!!sX_hq2Uv%HmXwf!PGCUwS^!u zSn%>@w-AJ0D)BC2@IFKN%8WL{rIOB;yb7}6kT znQ3#G{S5d&$UlQGddjSEJHtzGT|<38C^ivW_lWxrIT-JI@CDD51omU$+PBX#sHAU& z;qkoPc99t$viH_CgsA+4_&|S#a}94laMHNXcc6Fg|84b^=@{ksDfE(j|NB#Q@kOTN z7Ni(@P8LD)AV?Z1?}8Fb3Ya?vd)ZP+0zMam;p492znu~KK5?Yhb&OQ;AHl2l6=~_|>5Gbrwl@w5KB2i1IogmI z;xyx1?SB}zm4d%(pyx>lMo>lGO#)9k)5qpD08@S~JJ)RM~flijR+`XnL~B>t@b(cOsOB{5pnY z`eLZ?jQRn%+x#tGH*j=ZS|p?HIc2i;S<&w=1vmbRUWNpJo1Yz0xs<3k)ZbEghAf+o z8Q-}QJ@Zm9Gzc%`jh-$Dk+$Q)q-Zq%voZNpT4u!8|pCt;AEHz1P&bmz^QL93~ zQ7HKR6*;hGh5`e2eg8C_km@?jR27XRG9MWiRFGUdnWkf2!C?}5U&=Pf8xe(2%fvISTDKY5;oHpTQ2Pq74?SGU*E1TMG#;io2;g~ zkQnm}DOn3bvweQWHJq=Xi4IB(r1)7Tu3FQzMTR>i9D7tAZ=fodWgV{%7tk4U`nD(x zRhB*be@y#CoF;~fSN*K!E2#9!N%HnfB2oH_*qevwmbFOlNK5=IP0~ko>>^cqJp6J( zpyJ6#OW@Kk-v5R-Yp81&8P9AwoEo5RIBQB_*iLIg(Y5mSDg{!ojdXm78k9R8&x?TU z%kx%JS^kms-c_`a+&HLb@$gC(O?spt`CpSaCtQCp< zE6poWZj3ZFpevPSL4VX~p#&o%DeHo;zZwntsYqjI;PCgm*kAT^;O;F&i61JHOilW;4-LRk>-ODv%A^OgmN(8m@dlUhL)d2-Z=M=_Cm*<{OTq{#6R|4aCTRsjpr zdzdkr4D?DeMG}v7lv!Cdb&gq!Q7#%a$*#hK;Eq&3oVqT^|+b^VO{JT@VtJoNVpe+otNDLWt z=ytKr<05CT-#3RPc$3Cxy32IN6o)~cZKx<8>{g(%1k#&c@ba2w_Nwu_;WAk;!5O}8 z_sRE9S*nXeNQ|iPfod6v$T)L${~x-(GN9@BTc1(}2#l6yW58%Br6k5i*9Z|oP(lHb zkW{2}z!=>~r-UM{bT=ZM(jlRwbpF5iyZ8R@oBMXJ_Wi^;&w0*so=^F&udf7Z&o&00 zZ>Ph)JU1MHBhw2O(N!gfoSSle;|Ck~zE@gNp0ucZ+31+c04fx$^y`mFpL_R=Gn{iM zeTZ=>BX7s1AB6LsAsdZt08#`$nlUX}0s^BK$@5s~E~CSa-Oe_-dJUia0)dA_qD>YL zEU|8Opw%*W<;_l~4$IxWPd_{-pWKj@G3afWT)u;RTt|HN#!VqzRLX((={@jC7k<0l zV4w7jTt@6;{*}MCcw*tYO_FC3MJ6*jR z#b7{?1oeZe+HASV+%kA;WFiUvEPexD4Rm5GbE@?B*g^iumb~95vYnJH?(@4*RSIMe zqB$}l7y;iPJ+aLkaf8@*A!8|iL~nFyu@80m)URyr4mxW+g5;0~Q*NgW3u3VQ4+z~R z23U&Q{OE5+uw=Yj2vS|v9pz&zX9lem-gkgt;8bkQ;^BVOfIF&cB$aJ(S0EVSK$(k8 zINZ~1^TZ9wIX|xMfPGoc1`p*?j#~0+96Y3t7Mljx)I0nHh)@uv&J~C=({SA9@fl`d zzz;ds8G&zNa`IG#EYrf|_1N@}bvftMk2a9g(W9wInoKM-sUJ$0@#}@PO!)n z53&V#Qv4^0bct_qmOXMJV-<^7Xta&2d}w&*sA5CDes&gwq@?IAcVso-|Jfy&KEIX5 zA?eEEKh!r1Ti_%WS-rc|>|Udmxp`}ZZG_K=?C=d$+e}lRPGHMQ?+n~H_)<q^hy)%Nv5JR#t|E{15*Iou;iZl@?n zQ~A9am^S+B&!Uw%(xuDt5Si6(^T;&>nBL>YU<0lyw8|+_erOegfp;-{c<)p2>AlGf zF6D7h2q!`ootH(eygiu3B*>!1`*owBE$H=BrJBQ4KF%%^^*PC7i0n>M12QJVrRf7{ zhHvCXsR1WSjQn;gXNO=Ie{7bnI=)t?Zu7BYfpU)$U%WQK`<*i-oWZ1I%oa2)-j08R zI+*f|*UlP7LRF!VI4&4_{&FG`Q&8P>w!P}MJMOW2%D~Ko2-ky^-60$$vFj?q6d)j*EPublO z1PuD~5gj5=Jy)dkso})K!6UYRoApk?$>|R8%3dHbr46h9KLpv-c3qGz(zI=f;J>FC zpv4D*>kIZ72z+!V3|ikypSn zbi2I^j^bSYmj5AZjX7aVZ;l;3?z&lFmJ?>miI1_r7)<&T9jTWh)ZKgdyEyhu=(&_j zpk10lPj6baFt_d<5H=ql)i%IopEJXtmTtnuEBcu>xL`5U9xz->$1>>x55;~!-3sxN zceWDY@o+BudtLpWWezm(@X#b4_5E|a+?uaS zRMd)W{Vbu+k8%m&B32STmHR~SizvBd5~IVV^^Yf@m@j`ZCRds8(afiTgLK94fRXZg z=AGqlF|SnE`W9w?5<48^7h}9U%bsxI>Sjo~iMf~74V_6@uioCCu8Wfiwxn{iqG9{g zp}TlN1%GQtVqs#P!o<(Xt}k@%_X7WSKBdk?3!}rOi|%wQMez6$r`6fpy2#T!jxQfC zbT?9coCLId2T0f}C+;bBKNtkvcSlQ-1%1`mwbhk@S-;ol$!_)Z6lZFm-sbe2c`)Ig zf?w=Y1h+edroFt?HteDp{W2j3r7?GloyKO8F6uXh~X)`?4gllGa6A-YGI{PT*x z$ie#k*8k1y-@HZ!_)p7txg%K#u8H`eT#ZtX?b$!3@n93v{=zC~=l!sP zi@qG+nmo5SbB#s3QBd%7CN4T-rsgo$OzSFn9Sm#R)t$OhBdq=!`06rg*iUC^S?V<2CaPC1Okw9)&XxUi7UGWz&HYXNj0(pPEd25tx zi`EZ>UA@(6%*LzCF4f#nN%>iNkKjh#{XD9-Y){Y|a*os*ccXk~Uw?jweZ!#=j^}ll zbJ}8ZUF?MUsiL^H>}Vp+Do%OQ+EXtRgkJz)!#9oZnH9_)4h`}YO6yD~G-eh|kE6V1 zs)*NK59InlklMTZ`Ny;)Y-8HKt^~OyH9EYXTe?=M?+0Z$iO-}D=8-`YTwAkUDmb?8 zg||h+Fplh6A!}**b!%RdH3R9^CR6%Tf*Iet6eJT|kmr>bLoKy`Ge+|#3SkyPY;CQ6 zTqWMVq^7?m{Jf3%WhuW;eDpqNuR1@mk6qiQ93SuD&rvL13-%*UByYYrvqK3E>P z3o6_<-Q8aQlHf+H*X3U>|K3!+u-Ct}LrRonq z50%nv|CEEobGs;p{A#hh-v;H6$wI@PIRo-{g=9ZC zLE3}m0b1F%uc}WC8tTq>GH%>1ef(Yy5Sx?r*-)T$lbLC{@UL4Nczw5o2Kh0tX@wnd zO{o%=`a`afUTX5R7&z_zhAOAd>k9S+p6@Eg>MvFbwO<^t=-k{_UOv-6S85nzwoOBh zaod>&_ql4I{`9>vAIHXZb30xcOm=<$XtepMO77=kt)-vO zA59gULb7fk4paQ~`yozF4>m&^u*nVCXj6gN1aAQmRc_z=wIk6w*xGj)I)SamOOoQK zh-EL&H8JpRg^p3_er)Z}{nOjNDSDO{@kvblyFGU?rta_0Y6~hKQiu099~frtoXX)H z2CTduvJxh34Bj_YxBLA4bi@k!l@8KoDU^GkpFhh3S#%rn=zx3IS`t2MY*%a(ew8gH zwB1PA9(~lkG^85$asLybpcvO4{G7KKcOWO4X#Udu!I%QQLF&PnQRS^3v@ zZjkwqd!MpujXtnETIl@;nEp{3+#EDdAecE4O%8St5{YbjPe0y*QZ#X3q|wEI`)tBP z`+sr1^D7n}RVg3{W$u(sRA}bpu(P+~euZ@z!m%t?LOroB=_N^Ly?7#2z9UrG2>Y|Q zfCd>BhJKeNJYB8}e$0iDRQ0%&QJd`?wzvp0Y;67d%!Rhn=q7Cd*5ztobsRLZmk{F5 zGuw#GPc{NyNrIvpFN9Fw{A}JX2e6=lMiwERM*qk!n4|12D~>_WBlg*+7td*5OtXZu z<dl5+$o;nEZ^pzaWMt7znOKObzp0i7vB(;`!cnpY& zeu64fLM)$(+MAH_;P|#j8|6L9Z-a$Ydwjrt3#BPd51(OAy!fFx73O{Dl{4cKeG*90 zA5;%-(YwtL`yc3W0+{nKzE52ZvF)LMZn%G3V^gmj&8SvR5_v7Bq0OGE%hzfmQkF9b0fan*4f$KB{A37@Kkp4bJDqtRWAZ{c(jl4hG~! zy|ur-gUs83K3^J5qU>u?;GtNzL;l0@0)1j&?rQXPipReaHB=D*yOz4v$=4jazrTN# z%PK{~#oLo!!O~gj=AqeVYo@KCUBiM{c zq0vA!Iu^+UN56m8q&=h@y@UJwa56gM$;!>3M_g2;UzoS_A83lN1r8b3Ju%9a&ARA> zCsiG z`=YtQ6`FS3udDLbhE!yzok^~`#6wanLZ5o zji0<$SmX42669_+s=GEzQCN#Oips}Cy2^c|Lx`@62{IUiq|^@ypYQ}T@u49(%sof> z5iM#h92X^Dm7YqQb>-53b`Z)q36IV;D1%_?&I`rN^lAk9Dfl2r9rkcGXd%z1KcYvI zrLL+qM@5((KY3)200eiw>mWw>$XXw`Pq#ywQlwm*=9}2lVuBNUp6)CQ6>ZcO}Nm(wuP6bn*G8-7n}mlQG!2X#H%~ zyeb0FjvQ^bv@$v2s%Mp8>AJcK_R+#_0@YyZk5b%+IFcNoHKWSJLBFTh2qu7;WiHOR zjAvrDDdBlSPJ)zjTb_6hU#7^9)?*PpO2H}MN)Fg4VRtOS25D|VA=oL_&KBN84Sww; zwy1H}|3x2cI+Ira?xU}qRMOSVX|bmSl`0;Hr*Uh#nt(h@EcwUbIQY$=|B!0#n0WB) z@1TryjSYcgHWiCLtw(jmbuheebvuDVCFkG`Th&G7$+LBa9tjMX%Prphv}6^Oxo?if zC1&4xe30VleGjpJjE!l@o64^?!|FtNDx;+$iYc zej52=bdqQW%7iDZusUDGer3mIFRl^)X!_!&Ga)x8G%8xKBQdJYE66bEd2oLF@^ALL z4Lu}i^brGvha^Hw_p~2R1T7 z(Bv6`G)an#2Zd4)MLVFMb7Nag)#Ug|B<&uXt$+aoX*ZD&A8lo-|7T#@M^WvecmRct z&gS?7sr(u{3eAa50-)gn?oHYHjsZI#c~9*9sKC>vqIrBVKUO6q*r5X1=hkX@mR)lALuBm?<%iZtP@ z#g%X?N9d%6SXi*pgQ$UWlH7|2twkSYqLqZ_Npevn*?)T?oAmX9nh3QZInnpCj(n66 zz4GwCCw8$rt_mX!YE|EOKtJB+KYU|CT|Zp0{8N7YKm;A9_c?T%plak(aH3nP^Y(~I z{`o;(iLkOiCgJPip#>Er8pxC0NGx9w<7uYFQG7|IT*U8kBaTm|Lu8OF2hix-53=lE zUtY`)W^;XB+FqDsLf>u>s*6V#QUA;{5U8i5)7VySIBlXo0-v7PB>5U9iBNZCc~B@1 zxWBnL0Yrxy!!Awils=pf3Yqvn96bFCllfy7WVohyA;&PNUC(Xz3AuG~Q`jP@St*-a zBZ>F>pay_FFk!+L1SiV#z+=}ovvpFy(?o&0OP!xCjzjHCe2!*3Kp;@$nCb}eeRad_5=a4>9r04lz(uf_eoEV3O5$&Z?f1pxnci(g)mn%pBwH{vy*D@5f&?#;&vbI2*JN&qx*Oc33 z9(&jw4wXEAVj_M^cs#82JtwWLkv_c`>3Zb{zco&NstWDa@uZ7+iD}})AJWKmOrGcE z5{%lstD!KCaXrE)hb<`KQHqOp4SiaMIVW}-9I442$XM69E3 zRC%jd+d4I*Mj5MGpPN$+)c{JwrG~4wNR99T%7|~e&J(JVUR`K~j7KmXdU7sjq+D~g zZwXpFbMwpT?4o2ui5PC(k$&{UgmC3Utf4zMWoh#`{a{8+?N|o;XpKM zvmU1{sUMgc^BmY+L|E}tf)kx5Ef|ujwlQ3}d$^~d4VypWz9Inc_G4OG2 z4k_uI#nDB&6U`(6Zfm4HA8gb0vpAy=GGsqDs)Qi)-Og%os}~V1C^u&BxJU&z6fP^W z<7xff80+Mcvdkoy?Ven&TeumSN%2~BgxW4I2DStDv>Ucph{6Iu)%(>FeePDaiV`;pgE{&5XAT(n&w`UE`!{}H>Bs=(c! zD)D*!&5U4-+#ga%hRQhL_am~G2rPr z2-vvs8jigvt`FQ8{j>yV*78-Ze&YTx{)e>dxUQu|*2`XUM+P0+X-0Mg29Cb&)#H38Y{O9=5qOGMHm3X8sUX+Vzk0au__lOaz6@VsE0~ z3V>ySexjro0&6Wgk7kTgV98B8waPd3G&7Xxz}7Q1xe~M#(5tuo{WkP?z$(`3QzAHK^F%cU~!;=N5tP({mZ+3*UCR4IGCji->o*PUb9{fSA?RCLelE z$Z$V-cip~jRul2l0OAB5G`TBLp=Ly%G2L!m&oF^hnV{fWY}K+aiIbaZbhQD*JrPjg$Ka+l{a6_;=ZcX=P(y8jTaY4mNY-u&w!C@N{_9|d8#LnocTVfh>1Fq>3S~)A0MBIY8M`Rd;1)fSFc0?fz-Ww z_hbP{;$o#uCA`dZgbIjT*O%qy^<`0vdkNsxNC|>*AEJiby|~9~gD>me_9a?3eSYef z-GkJFmpJj=@d>EQ?u2nTk><{zR}{y*WJ!0c*q!PYF_Ar`SNBqb21|5BW3LZ%S z(5i6Qc4D94nX>;(ABn%$ksgW?b)FIF%3GO~FY)*Ew?Y^b@LAOM)gnNG{?G8PqmS`( z2&@r!m*hqm5KseCop)2)!bp~H;6vWpKWb!R9{&=HiCD8a^}VlE{>?+-N>S;K4c0M< z^E?_z^GY-}dc`%FT1j>H=@FVfxCuq>C(T4c=%QHZMaq=1CR=KS9rl6c$4xTeDoLDK zBhf4`{-1qr{3WjpJ?NdH-1F0g*BL;?)VZ`KDY&r()4xztC;$`gC*2$o)|UDHS;CRx zdva&MV4wZeUi)Kb;yCP{E)nIfX6BmRpwdVj)uKu}?qre8<67$Hy)u}k@9N63F)}}= zr13@`U{ck}yDdemud|MWd)pCOcSk?gJsbPPL6d`$Bt_C4^xH^S#iomY3d)G~G+pgiGc zKcc#x>a;h#(1Qj2a+W0^jpIbmi=(1UM}5yj#;qXBcY`??F3Ojq)zZXmzylx2?>J?s z)pTmF(BPwU0N|th_0urBC}QpU%S_)9p{}Q^6K?@WMDrQntqs*NOr%u~a(M3kgS&8h zlaY)IV7OXW_gW!S_2JhxXKSgIh7{*czdS?9eDaB;&mT%&btJ70Ly<+hx};0I!`9+; zlLrkltz)}IEMo#3JVZjgATQBYoY2qLxovA3$w$Cxi0mg463oIzHu|@b`&ITNOlZLT zMlwN>Pv$4B<~e(wkJb>+XB{&sQ?dDWp*ekAiyD>~)R0tEkcWhScF?yH$HnL(-yd7< z^a}U&Iv$Rd5FrplDzCAwMoAVyWiN})MH$C#e7*i73!t5rn1zQ)(Ycxdkr*$K@g1|QS0p81kO7O_?xk3wa%~i&;}=Rc>7l+m+^6z(XC*&UA8~JM-`u@j#>Qw zu9xnVPSMBh#IfL$OXyaY!SwlWt&y~eunsjJ;q6f3yrHTUG=oFYY=BAaN4Q1YuZK5< zS582Fozv;MHXiJvgduSf&Q|F*Yv$8rN)%oBo}*U73m%8td6;HRI?c-Iu`kf=eEuv|{ngHd_|CO#H8r5RE4+ z`(dTRdc*Y@LbYe+sqnNazuCcpU2O}2V5>vP;~T#rJ`SW3$K}-MYOw6BU;432vNC@%9P%%rn<{ z?y{(arzses`D6YqF&D7Jiq0R!0NI5!tS&nPSQV5O51$+hJG;4VV9J3rpKS@7UQ=T9 z<=?4CQZsR?VO&rtrpVjWnw|XcXS}QfS;Yl-r&stZ+8gq-ahFd-0XGFloI9f&i=>M( zqolkEpZ_P<7bVa)ApGSXx(sK_3^eD-U=GQJ54@2hp47jyorshk-^{b@`^gVY(CY)6 zdzZ;O_X>;BZ!jxCbTK4XbMwCx`IKLY=FarA*m-H6^&@;yYlMntg(x`pzdj<)34{%@IqKUASq#JMf$X z7~td;B!RT6K4eVedWstUnrmNvef^jJbNvH(oc7nA_JC)8>;z6ifwKst`YJ~P;Hvi~ z6eZvB*Gj(o^y>0rqAE&H)_Is42wk%=e&(HcBDB@$a+=)wtgLy4#*Er^2ST8MByr*I zVzBg{K(-_r6SYgrRdxj#NJCP2ch0%r)qyOxlV#W$&Y!rvG&}D(Cz18~C>YhYZ>2!QMB~x??jjd9K71sgc22cw zG=^bFpN+MnHG2^L%ZI)B_t%!MOSe1^UgWZ(5G}|%D9pt14}}FWv7Ok<*eqdcob>Md z<@tk(RTcq;{ava1#wfeA;ncBC*o3Ra=jH2Cm2l8QZ5*>`w(Y^*Ar@r z_nVp6FWv`kmh^UT*nBa7-AV8X>%kIV+y3^B)hg=LNb($T*+Ul=;sQT~Tq;BL` zXZla4h3<2}N7(5@WpB@M)b|=Ltb&UHX(A4K6+Rvg>b}#%bt^Mwu9>EjDqlh*8_rl@ za-y=2GTkFbj#kcbK2`Io`+P*m_f;VHIiWP?dheVrX)=*t;Ic`ykXW&VtSt7t_eAdOhg zR-tnxVW?js51;+f9eSk@e`bc^Phq=Tc+_ituh&V+a|U%fx;%5=)tr78ZQ-MX26<*#wY z2tP?8UKBn@TC{IFL898Sb9rKE)esZ#3!j%T23xEs>qbT-w;V8fyNSIs#q+1?feT{a zKiBgWA82LuGH}s)X;-#{nJx7%EXtZCWjY$%;4LnA?3J~bwY{)rfx6J``O?k9JF18G zCq%ZLCS4Ac+wK$5Emxw}5Z>vss(}crRc$mpESZ-05V9gZ8yBi)jI>9de1#)|U2JFF%*#v9)V z$4-dQM(Ch3XZ+^o|0JCpQ@P)~rxK=O+UJu${f+Rmg;1}h!+g-A?Am7BX_byQsDtZ` z-Wxu>BGa>d#hy~}f>Kdh1YSW}m$a2gsv!Y&x^hcR1-}_eRs~nY{~1-HGofEsCah(B z&=PsuG5{*1odhEtxeK7OWQR*RQ5KD_ZU5jS&o!d)_zHk!A@i?4h`z`8!7%5_?M)s7 zzF6V)4}7llI$q-$b~eK6RW{l%ihObE*FhW4pLJGh-(6rJXC}}{AaHE3ZP6}!Ax$!! z`Q~0yR?r0fUw9h@*JxZq!VittJU|S&5dx$Y<6m!o(;(WgDxrVRp1h=DJNW@HlW95I0YjH|;LH+W+xD-D7b+<6oGJ9P_Syb_ebr zb*+!(Xd=^jtthp9)};xG;*_;&ZstC}BjhWW+!o+%uq=DP4PIfPN`1=Ahly-NTaOA% z@G>b+JuSfC5mOaq(rS!PthzdiID9^3{VI{1onQaTnHKuvg3wd9ko(FE?rOfzc^0tb zQ&m|i6ih%knuSOG4$eCEMAcgti3KwHhR$(@AQ^7(HhsrHcu z6hU|GFJ-U;#;T8p1MN)iKhO#%!9EWM9+SEJ0ir?uP>Xf888FSRs{Nk7 zjA4cAbHOeW3%oX+`PLg;Rm$2})0Ny1h?RWin(ZKyEda1a%lIT2{XL4}CZB!wZ)o8k z+3WEIP@w+zL8hjn1~i#B9)mte)0Jcn73hCVTs5U9D%MFQ2xpCH2LN*I-4zW-lDY$W z-9|axLIcH&CQD!fq6v?vtwn#JJ`cY6;5**f_g>8J55u0bCBd(yQc;e+UW_4gW*yd^ zU!kuIsGutQBIm0rZg5dBqD|^&_^c76S$&TQDw65ai@4Se*UaSSqqxQS1qi zwVrwlKWR~A8P{%m4>?6;zOsrmB*pUajb)iADE3#w!#rTL2xs+@l)4ZCmo3gwk@ zdakW7u@_$DTbO>=h_!X$u`wq=Uyn&zuL#g1$`HZ*0RIXkSGMcwQ6>+F0spL*fc^!^ zpQ#&vCJ8@3vi^}>A}|-TzGXnQ31Pz>L!h#cDx-mAvrl+KTx?CArUGcB70ker-j{F8 zTrFRDkFdzyMky)nthGjfrw&+S{tDlt*dz&SD~1@P3%my=kSFNkvjy(hNbr5-5W09G&+i{BOigR; z948Gq$Hlkm>cO>QRf}_TI)-TP!$NG^ZdbD@zZ}W2n5`-MKyZpA^zNj+l1MW|U@HNJ zejI7Y!e+2)rNEBF89Z=vd?tFS4oX1c6qmP~41AaJpPc)PmAsgtp*Zv?7s7~V5$rJi z<(1W{+oKVAjK3=GQY1n;`<)wDsxlJI{&fM&ntyB1zgtYN36$|m+CsopUg;gvifp=m za}sFsS`2^G;$CB|wGt(wrq16YnaN^gcN7((Q&q9B)W1Oq@xd481xlT-07#~+B7|n_A$QDN)fp9ITPDCJ%-ff2%Q?kV0^pLl(7A45xeZ0 z3XI$NeOVJRrc9&XEg`30g|j#}0!>vJsGv{{`lE{NL%m-m*{`pzC&hy6-6x04^|o5fPMeZ!?l8 z0WwjUNZJ)o?#fiVICfB~Zf=LDrKN&XPRLxFP8M@K`1!-anN4~o*`IVBKF zz&A{>eucOrM~!D+hdiSs9G-f_H2Cgb8UrkgL~%LU(N9M%3KgXu_dQ-|3j1+`eUAacBVeE(TO+9mvFc7ees1iiL+zfpR8O(qUIU{tfSU>>UFg`ydip76QN*!88=VPGWcfJ(c;i;!brIF}VD_-UuDb zG`;)~mxdJ@IbuqUhH51z%|wdPAVir4K4Da0`dz4}COouvEuLtqyTbIkND9(#L21ka z9yf=Cpq*?d?^HT@?!PwOV+|4=E}#fvxVW?~;NlFxWiXWSsYeJrj$BX+W*~67SH)R) zm>+k^Ld}aT{9@ta`~HP7KoGM9uwX4u#iCA*Y1ZDHe)lIQm6=e3Mp1~>^%DK#Ow@ql z$Vp(58p!9@onL@PnVz%&>ZP}n(!fu@l;;m$W>~7sj|sZ`ot8bPgDy9}Z`yWEC<5O{ zX1Z{-$6=520sp0rW-~xq&I2YDNFoe@eI>FV#gzUAZp3?QhXnX~$) z2{bpfnD@Pow-nw85XkMsQ!MmX(%vXxp4jD_=b|N7=MtL>awz{8`*3?YcTOJ%8jN#) zu^H5)0>dJs!HaOsbdIaQyls76?MS5MLrJ>Zjfkyp|0qFS#eFsR^DeU$hbBOMS3$Bl zrIp_MxPIB?6y&iSxNEQ5VK*asTfyizI4;i(;G1}MXGJJ6K{e!`-(#SE`wsZE427O9m`T7rL zuV+(4@%kb!=I+ez5M&6Dg!0Q}!tYW`ISf7?wl20KgN&xgAxB4xui z83(fSYY(YeaFMJyOsyoYLo=(>s^+gR#pEL;_qS_eBJp20e@B9;ZW&DRkU`d=2(~Cy zy>Mj2B`sAMzNcD?Mz!_`aWFHP>HtWFyn;qfSG|a}H9tMU)rUkny)9v8E@tN1mcSz4 z^I}If%-^Baw=>vmxZt8&`393wv29@SYm2FJ`=r_6e&?vW8gIXnDO$yP&Lz5+6G4#R z@WvgQts9?pb3bIUvd!o^AK#Hopj*rguDmt$;6q}7=$v092USxISs7Xww9;!85r%l$8^-qBT} z&u<+ta%?lm(<%Of9vEAN2>h@i;e&mog^F0S1#_F%0X8Au)QGC{7d#3jW_uh7poKah zXs_xt^*gL*RrSg@Bk8kmv=AhlDM4;Du9Kgi`Qaq}sFc}kH_=|mHRH_L50F0(%ea8^ zl;|e~Q0i?}V1$vD_E zcH^{Or=S8iOm?cNnF{Q@i(YwG+bOfKy^oKbfi8;K8YA5?zOgq-(@?QNT0f0<-+5^ zM0$zqc1C0n5uDWW#GV=qx6Asb?irdJ``{2}gRRixRMQ=xMIf!(H(%3Q@1hDLzWEXr z?rR9X@p&RTNse~)Z=+}@ z%~UOVBcqyP;$j^_ppV{}N8x3sAjSnxlSp%}*y&KkfQ;!~J4-qxYxE7OC^?Os@2A+= zPbU*)LKqkaFO+fY2kmA~c5WML(XdPc$V!=!;Y)QPB#8w6u^`+R7k9;lw@$!xW2@rx z|0_Y&1cR|zBlouq&LEPk{+}%?_>@CzxE?7AQ}q?`SLT23Y11 zEw$~JHYKFb#8eU{`415dJdr4RnP7G9>&dP6o^ zS+Pcz(ovvYhTPt)=eliydX~#8BpFBz##O`9FP{~o7<&u4s<)NZ%~9UiiicU&Y9OlB zqFgzfT`%}!nIeYVff{83(BxIN#3)my0Y)1(O$%AL89VknMLw9lqWXV0#htskBTHED zJBweNJ#@z4Nk!hVXWzTNwQLyEAff@S)ctoM-`^kA3W3-}X(`ON0!_H6siQ*I-v0$x z?0VQD)OF^<^vo<8hyGV9AQGE5y3aZ+J(y#bGd&m98*`-`xBa~N2T{Y`Quz1gJ~vTc zE#0r=HO-qvN4j+nr21xV&W_y+It^cc#Yx{!D6e$DRFRKTaw>ES4K7Ko35?p#^wwnG^q5a_}F zaBBLubJ_VJ8U>Qiwzvl~UnDU>vYruFa)zkFA(@J{_wzv~dbkPri=ru|_0MuS0W)Ei z`9i9A9Pa$>efJ*Ukjsap=g}in1qyU5f*ai5wME9m0^xnha{70p*k}3D5e4zxJmmQf z*jEJii!-tG>{Qmumso7<-PH>HH?}D~$w1ArqVeRw?g@4M_PN^e>G@w-H#vR;wrF3;8MwD%?aLAO?o!>Rd4(>Yk1eR}Yjvtw(v6yM^f$H5qReZ3X-rOdRZY+j$K&p;Xr-?`mmmdx7IiiLj%fo zz46teEL*Vv2_!&`%_aY}I@00ac4PF{lR)}t8oy519m#0F1&cqwZWURTB1ZFWn=ahK z(3GkPsVgSE&+6?mI`;74hflSCw7!c7Kc_etaw69=LUA!IiqMaES<`ksVXvmesSJN{jK7(OWg0%rOsr*<;>pDuOTvrpVlE(OYpkTEOExk{ z4xbYP8TGq${J#2aWonQ9_U43!9$}W2Y2hCL4j^LxH|APQ|8L1=|7Xdd0%O3-+yH~a zOU6h=$vI8}Dh^ud^-q;?@rFxD`w>mW(z-Cn18k|WnAUkdno}jHCG{WZOl;y;pYNst zVnz+!@#%~5Rj>#*rVc9?(5T`aC^~$Yl`j+Qu4c;y`2s?1%ClzOSm3 zrwDk_xpev`cc}i$%Z@CABkd9+WX$t#;ETVMScAz|`Ev}pbHB+FqH&-k3}(32>G>tW zYlr;zLzeQ0(weKrVd*|q*Aa#i5hm{+i?xwGc`f1KVBmcdc9-t# zAjk#1Vf6jw?yHAhql)KAHG5OKLe?=^t> z$T&ENB7tWi7y9LSiHI2hfV7k2w;u%8t`7=&#dXEj+B#e(r>#4|xUdWH!5jr8SGTAx zo2ST%T_Z?9T|wn9Jg<%1! z-p;voE;7yC+*DwWM{2l-amn42?jABs-d*X;9{NeQ0~Ilrz+6^uvGa0(4x*&7_DGj6mM}AHb(4&_Y%1*eWYQLOK*Q2vR@W~_yT*`eLCHE+( z%rO?#Gn&4$9h>X4c_^h`XM6+O@Q(O*`>!hngEMD5do{l*F;6_U4B>j!j@o~Dakl+eID%7U zs4c>3DiVz_lWNNPwnkp!RXyWQ3>I=fVIqq%_e~w0u_AD!v{6f00RdKi|%~&Rgt4=7x zA@>SNh3FadBn@#=Gl+eW+c*^hozS?;kiqi0ik6;DC@W{WO|TK)b3_)exN$pJ#x+fK zQ4vK=8B1O3E%3!rFr1H{yG2MHrXi2)9y(YCLF8IAAgFBb#V7?+GiN%wXTqGBsAKC$ zNn|xYo5W)rpZNCbl|ysAFD`Sx*7mhXbbenh-O=bmqh7WTr*I(Np8cBQ>#%oJ@+3W0!SUP9WV=`$axoyB+Iw=Ht!ght?9EB7qOB3%m-7m zeT+Hkm)pCHn#8A6licu6G$TXj#)Zk=D?_7M?fC!xPJZ(fdvTby=f&kC5Kv?nlBWTO znJnv4m(1#W{b<|#5c+}{!c)itL6&F_rRYyfnKH^T1?1L24&NwQs-5#X_H7PT%n}YK z^1pcbg%dE?;OqqosQ)S(70jT3GT^ll5Lo{1S^uV2P*}cs0z9QUP~~h%cAK0MGERb> z=s7&}0~7D*LrQh{mkm>;Z5LG}45OFM#m&>MojR`TYk2|$h>8vR&9RpaTJ-3HRXBjh zI2 zUs-S?W4Xy=;nya)jFEMWnYH>-BvW+&5~op7Do=HYZ~=1#?iZnFAxN1QD>=%M2HvDw38uWp&nI^rXc4r)%hY%djm5Oy6~X^ zYwjeHW%ITf#Gh*U9RWV--8?|#xyNBE;CTEA@rEnaOqGqbIpq~B^S`|SfZ=yp)W@hi zthiK<(bWr%zIBGGXjO|KSdRMm<`bh`@~Wt#&?1{+&%&y4{};g(DM&S6mR2zf{$JMK z>Y%%gO_^^jC!`8OIq7w-I1~e9?d;yJ2VALo4x}VQ`K+FKTPt??UD$5!7+{}8gNJa) zPKaQhA}~^deH*dx{E^%a(EsF?43N=!(DS(Q(;?=%_ds+JrAwr9~=01e_r84 zO#SO#09k-^nR>03bWA{GEiGb0sZuwC7gIH3?ziK{l-Xdg85g#@!OqYcP>O$TpG!D9 ziv_<5Ya=l7Mt}lbUd1BGUqWE(-$_lA>eH0g3+WPeVZ$@RF*b#yxCy*h`&w^Ap56V;micWGXwuQ*COJMgt7>=sTO zW2hQ_-NsSg^5c{`8;B9{RBX6Cw#fF|!s$y&C@8g>(M)OcHMee}#QVb+u^ll2q>!4= znOxtk1%MH^glHKQfyFO#0E1CDX@ek7Jx~J@LYf@D)cMQ5LR%Vo9XCr^%=TCOB-s|=vA~ecE{KesnVSnMdIcDIIu3V3mBm_d&_9 zwgEyFr)?IxdKEao>&8RtpaIG1O8#X|%KX6R5Mq%nu7&u1$pNEx#AfY({(g`EM(9C9 zCyIZE+f;&w=tW*k&Hti%xy>Lbv`m%N`=}VaN6+L-_Cr9w%H@M!W1~FMMnGP^(UgtE z!;y;U(YKo&VcD{2MFJL*AK2RDuX2LfgM$IzECotjdTJkersh%az5jOaC?O(fIYWD5 zJw3bLsnRRSG0`BFwGMk;4LOLIKVDEea+BsxO1>;!A7LsSt~Ok?w%b?giF|Ajp@IdO zNF*bq4p6xfUQKs*MYpt%Zw+oK@urM;l^E{%XVAztZc?fwNXpJg+_>7T4IDCeP%2fw zK~?@51a*I`)Lp89Uaq8$ZM7tBSuOTg`evLzoy%s1?8z)eO7ij$QaqIoGxq7>4o5@)&FMyq6$RE9PJ=ON5Wayej&rQK5-^PUyu z>bu-pK`$BuVoh?De__C7^xvKftc**BEViy@JrvZ*nU=)#&QX6-PG35BD)=PsRmfiU zes<2EBHe{gicUh>FDZnZ!orrhM1^kW7RqS~%VOX1yg@wd?#+HnH)gSrOxCX~Dh?|G|h|4?xTt1Ab9dz=u^;g{;7=9zzGoB*`^IoHW{(DP0( zq6{4#k9+fKc&%0R&f8g>D*J9?JbDit#y|2B;fQu|W{-28sxRVTU!T{E=C^g=5mP>^qdCl$SG=1+B zA|~1M)JX2;@R)BY(edvC>;C@IM}6Kn6s~xCRYwm9zl&`gRI#+gIF@YpBAzQLyXGBp zX#TJs4|?}r|J|r-mA{Oa(NmkNNIugaS6(0%_vZ-hHtxqE+n5Ua_GHxD`G4f5*9qPJ zgd20hv|rPnv&H(o)-^$Vp8ECM&9Y+7Gd=Ekqdn_X5g7ikDQODn5nP%QS@Gl`5(Fjk zt+*V&b$7-Ce|N}V9ycg!lyP(HVk|{F`gAUJ$yMa)afh0E=^*-&z~}R*SwpvZ@2lvc z8L$4;yI&GQ##u_jFrw*YhH}VH;U1*j2UIYXPx;TR#dx_UAJNZ=X?m$-8r&OqyLpH! znPARfGP+g}(@I$ws{i+5U;S-Dm$cv@`~?;*CVwSSSqaj?Nl_(eIw_S=|FOi!PfZt< zK=`v=A~M|9+XC{Qqxef)siKo!imRMQ;04tk>Drg}B{pm`dw#(K3gAN8`1eA3p%AW^ zVDWHX6uyGvcP|3@Zo&)z0Dnaw%rDr`A8T(%M_{68XsA$VUAe&{i(=@wxawx34{rw6 zUsdnm|9#(!9msq*WO!(hyG6k)*A6VU5+2i z6@o7L_Hz2^1$$|~j+L7PzgyPcuqdP^*BeV@&3(pV+O8NYjl;xL>&}YBe#?APKeS&& zQrOcwg+?o2iuv3749XVv{dGKQD)`%iG-x=}P`0qe?Zu<)6!<2!u)* zMk8gcl$^*?jFhp_-W#s@w$EeFSi=Hi1*`JJz)WfbTJ8-QQX>SOD<?7U+InV2 zs21uTjRgsK$vlo zU0+ERl?Hc`t(I1j8SK}i5k6e45e@zs+43{lSf!vKv){*mhlS_A8UE6 zJVG4b=3*viq37-Th;?vE+8blrt{c7skfI-AyUGL9^z)7X$e2@K_pP|uoXwO9lk+vb zam!+kd%mUwS;I`0@cjcO>)!{ACw7ewf(sn%r1Z}#=Kp@pnoWGJr;mGubE|`)tvb^F zMDJb9R$C6-{TUZ-V`_|{E72EaS`2D8tR_a+62ZXC?7e;oC=pgkofk))X`IJs;3}gs zCo6Rb}7twCoF8YI@Zxkyc&r>qn5LRn@$)I>%L}g<8-t z(V^$tI#B*!=#wAx;sC@iW&Mg97>z)t%iE0uX9MHN)!LNF_43%-b8ilu&Sf%G@od%2 zHa#>!&HsQ9a6TddT5P&Nq{*-PvhnL3b1(^en%DECzfphxJml2@&%hUR zJePlh)*#B}Ed1PX@~2d+8bb`pfO=iR({#3J5o}fBq18tkA4$JQ&q2DH! zFH3B%>tjovQr7>9?t8Z^{rRPTt^NFPB-tKK3hsE5O2qv#Abu3F1M+8k_{&Zr>`LP% zA{N&3uGmNH9B#g1%pPD+*KDQ>sz%Q4fwV7Tx`44XnqZKY;p)J@sp7{MG-%AvX0Omg zbqX=^!XcDMoVF|z=h6SIO*Sy*Q%8Ti7X3gRtM;NAG19mtnh{EidNj!9S~plA8sz`o zGEt@5G2l)29=rWiT_h5V`hpe2B$Y@)g^^>p;B6=Z}6Ph$%YdYZK?*- zMn{vKZ)|7tyiJvqkB*m37Q10dX_Gk~2>AVo93cF@n*apPf9fCd6}qfYymUk;i^j?M zV=*M04|zS z@vIW2X8<3o;QNUq5DTSecv#?U9_OE91$?9zl&C%jL(7nun9lXy(CBFUy}C!thp|MT zlrJw9>g{Fd!gly;B1s|P#w}nu7;1p^&*%9AR%k%Jj9;MYTTDzdx*jZX}ho;TqM zB_Fk5IZ3gnax*Q2?Ng9DaqzVT2AA@?%iN8V+C$R3T+c{kp<7p5{QLlb41q&?TZAbGtUzy-)vyJF%f8V;W{%P)Klr3?1N)?8I4<7&}g`@{2 zQe}&49u1nlG^o{YF46n5W4Bn%l(j{QXaa@bdN8jodF5K+hO|cM3i*$-$UrtH=HCg` z3rRKS7~f62xFEmqbV8ToS8Dgc=qC+|kjXplz%1)np3dqX5x zvR(B(-%r6*v@s7Qy`C);=VN8mWsTJV_%4m0>yoS5>OV^+kbs|`0anYcZ~Sr()PuL zYpy=`>Y|HBd&qt=_9g>r$Dcb<`6SweS1cMvo0TIpMnh1RcahQnJtIH&uK(&eqb31z zEQm>~;4_QLQCd1KSM{$rWQ0_X7mMc1ueW-iDlj`REbbaVmkRodP(D5BY(t9!QG1fN zL#g#5pC7lWXyi&?1_<%w^Cy~~hlPye>9G3KVH!1dPskUnN#?RqXGd<#Z666mo0E5h zAdEg~5~j$8andxeW!ci5P)t3Pqp<+B|E7ZPVJjpFvaISdKh(Y`)^wP#BgZ&%KorKc z)R^-o{CZWjz1)XpF%`0#t0Z;pc9FYCg%GVppug&#&ZWVIn3ep-QfgN_C(J@WSo1h6 z_VMd6n@EK@m8o)%Nom1I}$_RJZjDi#V~ztFd{z6chic&);`aX^QW%=(N1Eh2o?aGKJc zfW$iJF@wmxqRwKfigBO3Ms?`WF9vB>H0GL~$G16K%y0BiN8ozwWpl z*kHzNmMEjGP-a28S>h}I>-_uPE!0sya~%=~m?dY_V$KWTLTnU9Xzp(n6I5Qu5@j{q zc1d9m=Al%S{UxQ4YIdfhDg5+D$~nmAAJO{~`1hHPe^%Qs)6s|wEYq1Z7al}c|5MQJ zZ`d>q;1UjXX-{sG@jw4~b99WA5gJJ}_raD>*pRain2Y3eh65y_DNzIrK}2eQsC;o+ zS;0pVgZ%o=E$EPte4>|G4Xf+|t0k}nQg@CTf9P1)e9qg|evMbF#?oD4TA6C_P282O z#*H=DZ#OCLgM5BG?zgwDv=yR+q1~%RzKuVqausHg{s&@QrYiE0Pj^!kB8Ree>BYDS zFliKCqcCVm$B>f-V8k`!!^GKu5MmDa5=B%}IziYTE=dy4vDKWeB})5AMN*sxmBmK3 z`9&(6G?tA1_2GSD#COXiAq5~k zVr#u5KvvA_+@^C@u8?dYivd-ZvZ1^gEs#LCVbrc1x_5nf@I?+ zJfZ?P7qDgKPx+_B*t2*AWKVwb(%^(5v+D`FX@jWM9tlGRi^Hydcv ziSjvkU%E4emz`ZSAQHm?4HFVK`!O+qd7`GHYvIN#fTvbWCp#>1mI5juV6z_2o;2Mk zC1T_=iX$|-i`e>bmT%=mN1|sDBE3(SE$~CwtH#9l+}FRr$UaYEaLaAmTs9eV08`)x zkxwLuC^oN!O(7Of4ac4|^-&UN=tG^|SMkB@9pW+eg0VJ+fap!y>%9}eBgeaRE0_vc z!w`I5bidS4vMoC-=+kk)CGYzL#bJ%094InMi+A&jWZ3u7260JQ_YkTT8+RNEu~E9tBR5PtG|Mk*=0%`SRH!rEmTlQUWL+WZjE-v7Iv8t94C$q$oB zIYa$^A5V?=0#H~#)<~8xv6hw=FyAbncxqqC00>oxhvGr&>ZYU#6-e7Q9Y8fcomZ_W zaHJzkJV75BtT?Lav?*DvsiQXZCt|fXU@%~f+>g8P8x@%!?QEC&s{1xk*0oCg3|%o! z?GNy@Qz%JYg;79=uU`YZ27m%3)^`hwo8k|WP&(|6-(E$6c&UlR z^2egqhcccCkpUk6sIQ$p4!3w*ZrI>RQStGjmCTy-?5YCb8msXroF_YlewJ0h0z2{7 zpA%FT`X`t@4yLH;?JO>-wOKY7pb(_7W+Z#_Lw1=oF|lX17+uUktlx%6U_1osP_bmE zZHMqy&tC>_?S74{0B5p3!rqWydMjL=uvvt% zYm}(v5CqGFowMP!Plk+qH=Z%Z_W4@&a;p@B>ZHc5|Xpgo=bp5!& zqkXOIitJRCb<7K|Q|)gRI10M<1jQF*8pGgq3T~E}_T^|8Ur;QRH}&EsTAX#};F%8B z2dCnt0#Efu7)8K41+3Z4^Dq}D1Uf*YfQ|gZL{?U#px}0cEYLy_ZPkMgP|VOm1lR2 zUs!EJSxuNzFb{r6`S~LvVa+f0uBCKCu9iN=>_fOs#=q~Xi&O_?!rrC)NMw~bX>tjo zpC@uE?J;G_MCnWi`0}prs=F;j;>i+ageLR>QzFW`lhunIs7@uQncgTfti~gw1)bZ3 zVZVNC1r)!;U_Q%bDV}P6OLQF)2x0BYFLmYu<(j3xlvF9?Wtp0SK9`At;1Igf$KZ^K+~2AFPGh4Xm9vj z#gw(xlg)!vUQorb{$Mq{!ekK1D>Z>S@8@taafe853&b#-9OxJHWSK}&?N^NNRjAqN zwBQl#N3C^^^J>p=8VYG`VNGBsFhy49I})=Rr=6a@Bf)t5fVQoqV6nwzeZJSpKp+{4 z)u~VoN^xDo3>5X}a=^e=GN)g-O73%%%XC|fQ5K7cmL$q zJbRv=mo@y12*#Qre2c$3)%Z|(F1stMmSS;gMj8&XaOE5llSR1LW-)TVmB3ZV546AX z_^59)$LLCD0wjx2(Y`X(VCYVPT5SykO2uU%Sc|cOupYpkWgYQW%Gi z6J7!Uead)#0unjG0u|`_p$I&-o>D zJl4wcK8N>W(SJ~mRb2>3K2xMS3dL)8ARIBk(#P}@-Gc3WN_@^=51f>Yn*j}~i2O%7dblAnaAMb6_GwAfT8 zdMJxG+kKfQp!k7@KhWTTXsHO6gKW}H^JdS8F^y!_W3Gl49|BhOS=kJQ^^xuJ`ecRh z*|wapy7uX;{mfGdP!P;Igm=E=93`CtybU8Hy}Z24^Y}4l=wQ2>3E)1e({Dh~W0~CN zgksC(I1lA68Aa6p;4FJ7mvx)?agnT*!e{%Q!Cl@aHn5+Si#qYGVa6KmJ!IEK6x9cW zEw)Iq5XQG1GF20DXKu`0t}(JLyxCCy=K2LSwcE*xZxL&SfIN;>&$yZ2w2xiL7n%q zlXQP?2NY>LTyI1ub!&~O?Oi>y`rhGV@v|QVfi#`q)*xbHj#tptm@i=3oy{;fsj@T6 zaJQ?J8gDck$j#I);9ecbY8N7T`?YS6rKbb`D4i?j)KKatab^4Gf<;-+ zvx6|GcYRh02qp~k)=UJw{up2AO7y}XYEQD|xuf}Rq0GI??gT>d`$U@GKWExy9_yhs z_3pWXkpZVymkoTq;AbEsjK+j$xD?$3{&i>u>j+BXE+z@;|&5_SqMe0;Be zH6|?Cw>tne?SmjQypkExZg|uYKdR@9Pu<{Ee@H=%cYEgJ1+J9RtL#(Cl8wgFGoIpzy25O1lCIV#jXzEi$eb< zyZ8~Iy6Ye{Fcp2d0oJUpS+Y63CMj~D@iX(xi`FOMKM{7mHYeiU#{T8On1Yprv^(w- zDm?B}6w(M+=5)%FA%gi&2sYbdV?(=DhL&m8-CT+S<($GXr^Ff4R3A%OV2U&Twg`Od z>K-A%k`|KoJuLqbR9CP#;smMc0Z47_^%8+G{MRtjjU5@6aBgHk&a%)Xal{lRpX@{DdDVYH7IH-0te*pLg?S0rtyJvd9sN4+U ztj|(=K!kL;CZf5T5PB~44l=~+M?o>(`6a&FJLy*bZxxnUS{03Vy4yh7*%M@Ivi@)- z_&Cw**NOC#M4H#m7X$yr9$Sa*dtthdAaemZS}<21HUOeAPFRTg`5;T=dCj3Q!FvzL zjyG%ARq`WBXd#;#^B-iNX-<&9z9)nD79!xF<844>UHocxK%|@)hFD*7+W_-qO8j}J z3r{n6jX9)wY-GI@5?B@Vo z{RbkRNUVIkaoRthgZahF*xIdPgqC6BAX>kN3tIT|An;aGVp*sAG?9P|#Wt z5*HfQ?0mriNmI>M#eUkM)2tXmVi9Y{(&fgcFi{c_!SM-eLhigj4aV*Iclh$}+u@ii z3`lKKNe9}}uGR^2SI8+gM47yT>SQ%$;pj=2yZ1=$rz@Im2+1+6;t#NeEY`dr_BM&) zr4&3mIv>82`a23SZlE&oocuh@h$&Rwl67T(dWgwAE~O|v@8|US^+|>k4Yo12PTr@U zLW@D%t!vPWLj`;6`CczBzbF8~h)N&I1?(T#W&u+7#l4;$V|7#W5fJ@?zR>dxUs%!({YIblH6>5 z97As}xL{k|K~P`Ff+iSAAa@8-Sd)exCnMBATdZSp*~z}KgcS!0zL~7+2#`~`-s@`; z2wJ&La!5}Txg^o_d5RfG%qMk|%p9eWRZ1j4(49>nFCd%TTR%wu%G_4+!|3|FX} z{+*c`G)8iZ7{Z;*y&WLiB}XK+oM-hDc=*Yj7;1RY(H)Zj!~h;Q8Soulx^fZ^Z~_0U zQyKI81%Q1t!&&t`Fh%vr@9oDA!vM&Bef4w1;R(q~=_iB7C?3G2U_2u%lZrc(Y@TkoSgXg@t3!`26|&;s zzhLBA=tNtr?_Yy8xZVG^U)IiQpX`XoS>OoK{a4Zb$g5N`*^M(@ z7FLLOWFBf{xqptqokBibL;)d@gnZxG_r(5uNdo)CNpU^WVk#Q``qAd8mQs@xVdJ>C z+&1m#<}c-6T0}C-5MEg}Q^x7a90l`QgNItk%?3bG6QvI{Q{91|(y*=4fn74sWZUe0 z&xl#88(BaYS?oQ`{IaD4#Phh(k^SpCL0qINomuw{7boxLlCbd2%-yZKDYBALgUq}V z{v-NubzleD#|8(>(Q75H8#TGt&8mE`3B3SAIzIjTq<(?gBqJ5rM_+9<@=k^7GPsF9 zs&%2_xT%dk247jWBK=*Q)hmhaM`L|x>`>LOmP8it3dN5L9eb-yZEW|}_iq4h;s1Z- z#KO$jWIxy2PcbGT#UwKVMZ&T`(j=ZKi(Z?M7FX&2?J*NG|Hl zu~45kZes(`o#asfXKgNFJd9qa0#I^$Df}j)_ri5P7S38w%B%XOYB7ymmz8$Am3Hx~ zlbu&Tex$50BdLHnQB8K$GSLAkv~+utAByZqk`@+L0yLh^i&cwST%zOdN^0C1#X4m0KE{fRU{@?!Z{uV?Lm z$d~8dxa9YmkvO{cdNLA;_gvI=ZqD#$69mvt59o<)eW|YvYl|bUtQ()6bg_mop6erk z0&flS&(_Gg6l%>lvn2EW|i@Dwx- zw9A6Y_cvB@o8~SU{4~qsp1xHhPm~}18NM!P)xl^Yr5W9RXzWXlU;fEvWlZHGUAKV74Js?p?1v3MBu>hAnizpEmK>?Khbc(RE_ebT7`L!&?^YsQ==fKLDHODA_gSD3R0kKdf=<1jf}sSH0Q z*nk5OtQ<&Xq%gX?q9uhLe?JCv>Q84A6yD*YC(=nKev!Dw12)ERr;|^0&xv`vdaalO4c0tsb+FzTwf7Z^|{$Ad)45508deDA7`F-vlaqs(EHo$+& zMiq0IY!<~VXwi|Qm!#vV0Q+Q437lb|k)@nN9K}T=pz7-9Pk)Qiq_xA_U^>caj`z== z!gssIcdsMtj6c-qy*naH{XtApt}^p908Y-5wsPbNR`A z3kPs&Ovi2WNN+~x&;ow*f&1TlgVj9`ix9lVwysDpT%h0)u8%Jk_&0$T;N3f|(EZQQ z;ra-WEJp+53MPyTl_qn5L97E#`>p7xPji_!oto0KD<||>&}x!`R9mGp!kVYA;Iq`x zM=3$x7=x?IXCQ-;0PZbaswqP9b8w*nPxH);h{nBa{?MI*1eOAIrFW&JZe5YuY^C^w zMc3M~PeszvNT?8uB{gSO=afWxcj&i5mu)>DDpj8$Kgjj+Tar#EXmmi(O->?b0!fPEZ=%I`)%K|wu0(68;>3BlATzQZOrJsZ@ zuIQN=AFw=3Ul1R69B`d3O*uOseIYsBL!XYiL^XOn^Mk_AnlaK+B|2MZ)(NE;&#|!GBq)U_47B1bG|&3r6{?8!Zd>S_ zYnHJv4aDMQZbQa6C0RSZRT4;WTK^m#iEM2;1JKSk^)W(BxslfBlr7P z4K2FQWJYP|*EI&b4Aq%+YfNF5?dzQ*Rll0jGq-bUQ zb`AbqjS%_rD<7oZ9>Mn7$yZD?O5EGWU+#r6^6r6fUBrk+glP=jWVoJR^} z72tihnZ*!xXyO}x?89o_Ah0m8gMUnJc)B5aA!NmfBS#g?EmXYJi?ZS^qzpBwZemy^ z^&wqUs)>b)w&MybAfC+Y9{`QHpT{U*agq76ufPVingfMY`gme;7yDKI(qbE zh|KZCrtjl_n-cd7DTi|E$HP;^Zv@%XF$<<{iHK}T8&JmZr^*Im(g&ww@WrnJX=|9I z`*dg_v27ly8sjRZ!u1u4@o=PR{ix`ON`Qr=M!reC)M^hyuJppx!ot%db`KD_m@m&3 zo*e=huF2>T18OEOB?(MNsU+vr47(%Pi%CXf3}bB|W8H3pCBPXGmvk1C*z{q4!se1M z^+Ou5t|ZkB+Kqqtoe5J`|Lz#tCd&!?QVNlw<+T6Fz7#B02MV5S=2uSv@!8AkFB#ur ze8o8Z+1cps%~>SDzFrSUE6>hqL)qigJEVf59ra4UF2R!}lnAp7ve3iC18j zeFq1dBEzOg&|)XjyUtTs;Ol(x)kBu5ukvuDo(Expo0onS^H5yrw;am}v7=z4cw0$| z>1jp5C$zL8zNe%GJggruHCB7!_|+X(9cC z3|3D}3T)dJZwW7IxNDH^@2E)|2_AZfpxp?X&7{3on^(=D@*j^WzfoO&)ZVSj9msm} z)gsqPu!z)GwX6S+Jc5B!yBEfJ$H@t@Q^x~qbY*IBFF9EALli6iASeTxlj zIIxX8*2HK7l9DRMu_UdH^~cCV4xE!ji!r>%QLs*yQMeMYmYPeb_G=}ufRK4Hqn|Y zp%4>SDLYUXjZv5z?-QEw7MJ&|%Jw#Vl+R`_gw5x9d$sG&n1+lVxI?MYjlm2Wh$wRS zJX{xz8yQiqCo(F&2k@82B8OjzLOg|wyyR^iFer@&(0)xWMa>WW3W^Fu^{wKk z0dU!0cW>ZG3-a0kJSZCem-(L^WuDc<->wCf7s@bTj#RR1jrBs?R0SE;e(CNJKK3T& zVAU5 zyzCeKWI+;59OuvRnQ33wOhIRfKw5=m=*~AqDLcg^N$!05TgEvAHH1XX42PHL%77ZR zRXw~rnh~K0Mceu+bPPEp4AKKp-DR@1ha@RW9r7rHG@xBRj1N4^vG+=vCr*sYG}@Ic zR37rpMYi%NB4U^)N7?N!@-bfwl1LE%e@U(a#Y;G%KK&^NRi1@*dJ{!?a}|6u^uK+01g>@g$MJb4 zefB?g)`CCcy*r>pA#mF9&>^N2OOMC$+#U~uHgEy*D+T#Rt6h=%5=TJ{^L+Y&?wxUH zlUXISVA*c`k5g)e9@^L0@EW&=4_{3Y!34U9?0qqPMXA%ebatZTm093RJ7HY6%W&~q z543H->TPd3r_xJ{n@FtZyZiqkWH};Xutwnv_c_rTBDHCzh|z!<8)4QgJr|skBKVix zhJ7CD#;X^jx@+$y(Yzm{pxv_Mdp~?S=zxRYKNj=k$cb9(y|anXD+L4>i`V&3%uwPr z>=<6UUfx^|fvYkAF&n~s`z4RLRGG9oLFsiq{NxHYhHMdPqHm3`!iGc#q=y+;t0c91 z+}w?E;ZleCF%}r#U;g=`!(k1PC8p1Dyf>CWlcyB{-cf{{q=c`rTg%Z6(EWP#?$%@g z06S(TlK%Lpznc6(HrKy0Hhyq!jg+r?%ggmYb4wf*Eh+u5On^jvJ2+Wo|J#g__s>2S zk7+|j$5qe}+ZkC5k&b@$le@@h`Ks{FRmp+H$R~JC6|bVPzWs^p`~K&|hVe$H=rW~b zpvg}vh={FAX}VOlz|dWK+J%3vKz?srk_sjvLUp?X*N;NtAxWBw~e}rEReOx~5&)-|I@A8Y9Ub)>kg( zjB<;B=~fwO!4~E}PDo;V3dt{N24s&&vXKrIN{fxJL)#7R_zs#-|52jTpQ;>J-e|2% z>P2qL0|9wGZ8@xXUyZ%)D9qVU&rpmOPfQiw-fs2?2f#Pct-FKnA(y10ufU{F8GH8z z;7lV0?Su}n-^lwO%aY2fze8nKeX!6N5@~)o{qK;$8w6mOrhQSu9kvq8pq1Ae6K^vy zLPW$ydddgP7(puF_Q}tzglIMz%Tmf55uiedA}yn-1f%(t=5pf4xQ@ygmC#&W_Y1WySn8x5sb*HBGEcD zQ9SJX%;s#$z=y;r}51z*(tISW{xl=MHq{+Q!EbvIAfBtPikM5$H0&aoTUyT;6Y(6z9 z%k|B0Ys3){<3xSPGX z|AyQ*Cma0{*Sh1^0fjhP9zuZ0{j0YLjU+}O9=v_>uvm@Z3p&oz*HcU_Fhx9UNJ@89 z6zCWQ-_#eWYKS+@%KU{fhT!3jJdRI9+9p$7euX(?lN_6-KwJ?kgL6wOJz?BsO&2Pg zx`zXSiA|()n0!NdrwApCc@pLrg-8R&Y=vv-+=+qP``0fWHQdFef=<=Xpt72C#e#rVhn&i(t+EHE!GGt8+>M zllc!(`a}`XAuQZoyqI)N_4uIPQtWnyNNN2e5FAAfQ!#egB>a;URRGgUxVHSlP`db_ z%4(x7rUYxH{(+-DdIhJyXC`9oPFxdyN3xxnb0xnei(NZYg-Pq88+n|6XhBmF^6)^U zNXLg_CwiZ6KyD_QG=QNpHH(IVi97wLjI|4$!twh&YKSd=27~N*TR|=|5j$C^!~P#UA%&M0%Ha_ zD>lXXTrix*ydMnF@*A%tce&diduY8pZ8Y6u@cDPJXbpkx565 z%E}E{8siA#{D}IcrOpZK*%V*l1(|^z1*wBavojtCv{LLFRxd$gy(&>qXWpiyUa=GJ7JIVW39& zHP0`g(=Oi1pm7B9q@#`Y8_fY6bv9H106;?t!qUOg+Geet@1H+COkFqg@i-6tkWiI) znjT`9Ybi$(7;-#JWq~`-zysKq7>yT4AY4q<9_c{?(mp-f6O?zYzEp7Wm}J-t zK6sU~PYB5r+!ku7Hm=5k)RG{I=wn2mDRNLKqQ-#)CnKlw9h;dhAD^0Jyic(mqnlE^ z$(`C_j1QDIrH3>Q6hz8`Wlo)jYe4zK0+?oifjYVO864F81b=q2FTj!R1`4C7q7Joq zn6uU)qf%>rWGZm!z-{zIvy}nw`JhL`V+armDo{>i|6{i|E7<3|NJCz*srpzOVPf@%nswou$7zg1u@o-QZ!VIgBIF3w#{Hgo-cBvaU~ zC@0+f@x2zmj)xcjyssfTFU3$gKjU!}9{B^$9n`?zqr>y-U;lkJntGw8bD1$mf z;}K%4BPRQQl=I`15unUlYo8QyX<;sSf3yTdmPp-Blh<)7x-X@bllB$)u_O^;T5&=k zUgQuCby^zn=9yM5=V)?0epx(3%W90;5UWi8&GD;O$caXXtAQ>d9T8M9*Z#RuyOg=p zcruWzYC6Z=ntbe!L@*|ViOEJyWTj08N$B_>;0b{0uIuVnG7NIc#*J8deG)K(-B}$L z_}QQh5D>2S(?TdgAH0p4J?(+$H;f`>>);Prz?~fLVTy&4a`3K?n?GuO)6Cw|@rk*3 z;9Fc?fgI8{^NQmgV?U#NhaLAn34s6Yezml>Jn*-7sT&ml1dqVz*`WyX&W%D@&nqXo z{2Uf@jj4}wO4;<+2RG3<%g&G(=q-$JZGFma@60}Kml-5ufX+Ky*O4{h(ouvY@c_h& zU#rZ~n`SU7Lqt}nwb^BRZ3Vs}+eLCFS>IIQXldO~awg%_} zM59a|MDLlH>;K+@QfJsBU5#S4|9S!B5LOyv-<(lN`5k}a3Db}1%#(@c@!Hk7xnlR@ zv`=kZ38s38L(*Am*)Q;3d$XQD9Yl7H*HErI-aN)_h1$dhKcoZ%7J!*9u=P#;->t8W zYWPTcUqYp`pvP8Qs51QHM~5#+kBKCn>R<-E(u=qLfyMVLqqR}L_ew1@G>>HQ`g|p9 zc5)dlP27%_=HFt2%XmoEn`*z1dh7^H)N0ajx`v7|GQW^O{jG%83SjZV@Rw(|E1{b_2OPzXWMV!yXo)Ely1C zV;G*_!G-k6@5=D(E%wLh{9aaI#%FIheiU!ptyVRLD!mH5yXCY63}F8uE?%JeB}L5&I}x*C2C!h6D7?>9F0EmN5*Jc+N58*F7CdisHmG@l0_0lGcpGAA6^sXT z!4Re#fysa&m~4++N$311w$enB`Ier6i)>H4Ccx-{GqPI7V2We4PpmZnYBz;>aVQ-f zj(ZYE#(liC{}B*YHWVXqSK)x{8955CO4kaJ{=wRuz7mP%IG z>S6TqhmMiVR{O|UrW|r4rLOZ@7V3rbI!Z2%!_^h|&i^B?^FwrwWORNX?M!9}Z408M z6uN&`Qinz1Q&|8BVRqxWDDO>LFu1w7Q#qfLrgNL=Q2`4gZ>~lw@G&F@vuUI>I3-!T z4MRQkJ)%Yw%AI69WFnOGy@Iq;d|3n*##e#lcczU8QRzUbQld=)spnNgjF9nv7MfHM2eG)B-fn4o>*`|!6r$`=@ znI+@K(5rl)o?Td2lzNi-A5RgWz97X>OVm*gTG8BJVHhd0_k;9s7%d1p3}=( zUT$vEIyxlcW*d0y?ChVKo9}fArDh%N0-1pf@NpT-huM<-xT2y}x^*ipm;^s(yLOKl zu43fF6q1mul2~pzw;DesN*?}R<=axJay+6-=v#wW*29sFPv1+&y|#z+%SMd*V6Jvz z7Bnn8S-N2`;>=+%32;6L74j$Ns+1vwpA|;G6Nu%Hl>b5 zCvb{KlK$i^vh^Zf9LsLn96&R;K9%##k|gS_U->xC|K^(>BfN5doNR`SRzBkK`;592 zRp^)wk(&fPxFqZ1)hm1|pb5cgFmwT=J(a$rHbDLVkZ0Wu+-X*__cd(*Tia435 zFIR~emyUxl>3?qwyKhDW(n^DcO+amuf5Go35p+7E|Aya|z^L91qQ(t{qK>{5>^}WD z%jALtnEPD)YEvv6oRr<)x&y2Wu~>aDMjp!qJJ`Y>Z{HoSFdw<#Fb?v*{DLWsTgRs8m<<%QLubaU@bUi_i|S zilf5Ydw?$(`sc}M&+(Kvuf06sTv@eEU>8iW@1=Wr6wtv#ofd+DXB?---ykONeSL;E zZJZa3@4I3-`~1TFftP(Ue`kh{ktGz5HoCZh?+;bUKrwJeJs*Ca4wdFZgWMsqjRdHS-EjudGb_i{;Mwm)nwpXB}y|Qcc=rE#Ij~2nkYF);$+MCcZX&7b<|&lTo9WazF!gDt&uGw)UaIa+4BAMTlmo3rz@i7#ok+T zAjjhE(?B$q{IJI8lUN`Xaq{D9p-;YO`8}@}evMmqDo$4!D(hDJA^JO1B>EFlBv^fg zA9NsW8`>Ab((soOPEdM`u|D4kzjEDg-tMLgYEJLUk(7jG#>i;G<-J!6q*8Sf^mnxm zXPVO5{64MUr0`-bQ!;0q+DYOEDO@XOo<{94DMY>K&lZ~IAPd9){Uq0_afe*2QsTXB ztZx2|{#A*|5eetv=J{*>P?_nK3b#Ap-OlQ6I_zfcf}o z*?C%^IiMgKNW_8yTmVZ{YshjG(x%-`r?SxWn(ZX4WBO~uMe$fBxpCRs<$!n-D{v2a zo|vhmGxau5mV|_oG~m%+{-b;Zyn(1iG3p%^L=hwsA3b-BY4Lo%psT9TGcv-Lmw&M` zC5p?o5A-EasVvj~NuhVWIrScQBQ{G$&Wjn3b!M@mB&*HAxt?#FpdC;R-FsEJ{kD0u$pX4-`@ z->x8J6-iK&BpMyGfd-negEFI^gfcW>=RYAUQ;X2TUEKmF@&E4Mzzgc(4kINO`iQ-r zoz>f_ySN7MX(CVD6x=t}rzuH7X+?-BwckI)seT)_S-^jEJN=COlRdE*ZrcM+5oZ^w z@S-^JL#+W>gGdaE{plCelwvdqRC@BBa~LlTs4G9x{T&%6i{qN=?`pH-gPy7OzOP!N zcxgA~i$-ZFYCmYVmZZm&{kiD~r1Pnz^97y7R4_0k$~qqmZSJ@xwFowgYY@Od`ks7F zJkT05((q+|=cIJvhLfAW4d{;NSU8tyynQ zQrnan;!!WHHbKGJ-cuXU7M3t$bb4K4XBxpb*Rcvt@AuBUGw(mUGt4lv z&wXF_b*^)r^PKxL9w%h!3O9n}_m)^kPzsfa*wW=}^ZP&sdR6nwd-;YawAMYR5A_#m zLLdictI7;?E_mRT92NXXU_^JqC)aFQ`jF`q$0`N4`!(V2b*T`tm|+**X7tn8t*?OT zb2#e+70uiPpUb89<=e7n2ArqBTrmkZm;6|OM1R}=`Rj%8zOL;B(rH7`3_zls30t>= zesdwq<(2jj=5DOM%Qae>y8nH5P1hD}d}Sk||*Rs|7Yknkj6lS@zk@ z3OV=xrO#_2U0o;|+uSJ-f{ALIS_LP%r^Y|fAM@G<2i#vtAEP_dCD1&3rbH-}$dphg zlXkxp@UUJFQw3&WPG>+u_JFD22&zNQrNIetYu6*%NFJN;3DfR`T@CCXHO+^&F?I3N z`DK%Sf@dAsu+MT_ccl-&+ovV+5^`dffB3)h72=C5J}^UzFh0tO_Yf0uHI5LSzU^N` zz}C$?`BG<3iQfcb-iY+x>ZI7@3WoEgbzgLO2EP`GQhPIH_G)U%$?vV>6RhH>A4bh! zeCQYbl^zY^CSTHwgL&!SA-yBnPiE+yyB#_-OGRs*ICxa79=1Jlu0JDFr&NEG?v0C} zk8uv>kt&kEt+Rn7SHQ%7FxJ;)!}<2Nkpx0dKht}qLhus~b2oo)obg3AZ2LQYsZ4-X z?Zk?T7`{-=G?PPjzjEgu>9z8#4#^p_c5NrjZq=`o4Qc;;(IepP_~HG*x7TH^QkcL% z3)=UtQ$ThiIGMj20SxOOGuH!x>lVWy7j3wy(^OgdmNeFQNFQgDzXY)gi+V4bhvV-B zGt9#jM~@F4?xRJ#w~3NKyXUdNUYQ=^>iCqBnd-FAs5PBRIsCu+2oXzoc$6b;z`Q#Y z4;P$e_vk@)2?FFiH!x?9Q~YwANt|q^)lXx){HS;`)Iy~D@#q{Ea zf6JBKi=$9p4qCduByB5u{CTRmycKSx%taTRx2~xYfxgE68+e5j?@vz8im5eOnu2I} zGVi{exQs4EVdhuN(EX=k?Mgg}{2zw-36&?cMqrXWnN`YAM*N^mna>3WKI7vTQ(9A< zY1yNms%~id)QegdY)Xye!Z}>(lqY}dvY^A{W&)@*y2*<-!SW#;ro43cpVDH| z;~}39d_sTHp`)e}kH`*zb37Z&r+c5w-vtF{NQuUOU1hkCXyXk*$rYDr(5qrp4E1As z_uI>wiy>C3w;6|F9}1^eH!}znAjIVwj{J0`vBTy6fHG|e$A=NZ0ghnDGSoduK`->2 z$HVR(w3~8z@FwfP3zaQ3SB^QA9auk4TjjpLKe={f@p=$2>DsUO;-s&Nd1?s#e39s> z>}?3mzBWx{e9eiAyNeM)T@vfw9sDKZC=Rt!f^Zs-FZaeZ>g7q@~6D8 zzfi8HrnN0Jy+bnMnRzBXCNs*dbqlZzsDvcys9qh4U|xHlEu$_co_~2tJ(O7R??SP^ z9cs7ep?FCTq8VQ@5Hq)wuK0id(v=?pi6fz;jIkQdED}0BJL_Jv`KHJcOdK8)vbU`! zb8M5(Zcfjb>-kHLDLy`M^Xv~@1mA2{+ZkzM+L#~d?z+7KL`QLv^4Blr-4edf2d9V> zf5(pd283dpeTIp2Q^%xa%d*b-v+6l(z(?X1uYo_yEY-q>|2g^dr{UtU9Dare z(%;|m@t}bZRSShz5|~|KZ80KW2w(r1mE;MJkW_t$C@gc~mC1q6y!Pao7Wk~c(CD%s zKHXIBzcKwVBSa+dapxhfZGO|#VErkSB#E_w_GePb2Nx287y3yhc^V(^wAvzxlX9OV z^=X((j3S?e(AGtYMp@vyU$rfsWE|}8?(Pl^486R$_w+p%T3U5&Y2`L+&Dz|&^IvU` zTQbgl3u^7|O0qT_7EubPREek3VK<96q4-s4^&t>e=_E@jF*eGud%|B0@mlJk4JLgy zrMIdytuVr0`U)mq#_iBKBM_-wN`j*}gIVC$itEKj3ZJ~!*^kojPpL7T$Jz1Q=6n7a z+-e;ghZil2^)wSBnEs%n;NqpJ^GD6xm#fYbg)ztL&wkJSrn6r(Dymzey%9s@t-W)- zzwf`e=}-NL1u3e(VQ5sBKR9P0aWcmfS4Bny#aDZ<3S1Beqi^~-PqKpHL&RezGn%MJ z57%q&l~*>i0)}cId%kc|rjhY=A?(?|C6SMP@~S4B_VRj&uTObO8{6imuy^ls0qOUr zvkBeK_s0yoaWCUu^Djmw1k(sEXkX>Qy2gc`V62lv##f%Qa&h5cCV7@Zwr4mxa?^?SQfgUcCNJiT~Y5!_S-|HnbIdf-usjqIuPJSR( zSg#>6LldZYTqlVQmE@Bv0K*j6ddq*(`-anBAY^FW6&umyCvm&N9jTPD++sa33hr+) zKiJ&BSR`-v_#l;CYnN6KsR3X5FGH+23Y6XL6Gf9C&S{=(2KU8jO0XsyA|%3Yq}Y&o zP3Dpk`^C9g5Sk%Zk2M(h zhlDGE;>R>=Ly}t(PUb=J(K~h_zT3c~;Q8;xYH}&`UPum22gU2}L<_LTJ@cm4P?*wg z?ps1&w#w5v@czT=Mz{0Fb+MYjj{n>E!&;D004@%-1Nv(ZGv~;Zw{ zn)t$b+_l@okCxe?6soKovxA!Yy$k7Xqfbp~DI7mTJ;FOxi0>@@u6T`WX?c$T2ejw{ z>WOW}4721Y|MBQ-#Os)BqEMfB^P-}&N!V!hcW zF0@K1IX+yF)%)o3q)iO;@g|o^hO-{DNoaRx@9H5kM7?nsDyf%VzCh_p!EmbTF}+GU zD3cc5jkBYBW_n;n_w_XD&RXaV<>Sfd>PDB6@1D<#RQkzmaB5*adnWc2xXBJogu4MG z!RR-XF7p6XV&9JD7Na~>Obaf-8GC6v!KBft2o^@tj7?x-p08wxKD5W-glYzA(l8RO zaK2q~xOKUv*EP^vG`;$5nKGtd4x^8G(ntplr(S9LACjEd&(@0$-k=+l>Oq z#U_B}*}~VI@6)PFSzy)BdFNir=@uV;q|7tl))k*kn&9%XV~3e&Rt7nmqKz1v9u4F3 zu)dQMQu^Aqsg*x}Y!go3*V;gXM`F2QwEJoHtA2eWLwkmy&qLdSB{l>~swFO7)2XyT z-&Qu7SFxJ`&84v8NVF;KP}U;uHowSWspjwXRFe~h&#y&ZLXzaB(Ov3|>$T)?7aa_J zscs(K;k$}zn^GaxZ+f!Tmdcr)amudXn~a|lT)HpZe>mawR{r8sVTrdQOvWkh-_-42 zH783^D(HSeI6x^f`Xbb}KJ@+J!$;sD3ZCujgvNz6f8grLe;2yEkVoa>B>yV!B91 zmydX-N0-x&#cSk?Uoo@+c(~{4^8pyOyfomPKZo6B*Y-_kQXtaqvS7Z1OZ%>BJ=7WE1EfrWbI;>~2E;aA8Nwij^ z&>irXB%BP8cU|npOf!#<8BmmCasfn(_k!T)(Tb;}$QrADJQb)pK2gVtZD&9W28QM- zA)0hglH{GkD*Y*j)=8;%ld{8{` zHt1YjOr|~WqR<2kr}|M4(y4>W>6J_d-*Z(eG^G8?T ztb1HiGeOvkGw*tk%+pUXs-#qjqE)Qdh2%(E<3fv!1^wvH9^UXz7A=g)@^x{>>(*a+ z|FhtQpiJhGA|f)2$DaFw!+%D{iV^WFUp;5)!r2UGb0lT%pNmK*px&u^XX@{HcX=Ej z>kGp2o?u@*(!qrPE|1<^l|LXHIC)Wn@xWxP9pWp||G3eIDBJ?q-UyDyZy4+Sg{CWk z<^E+pO;?+@PYEv=zNo6YgCzuZXjms5$Pju|V>gA@RC#5J$V9}OH`3hvo@m=TTJ^?| zbd1rXm0z}=2R~_sMa_20X|DvzWIcR|D(zYF4H-^fjM4Lo5t4<63)xzn*j2PqY1iV! zCzYoCS8f1b?c89g=Oq}7?VjRp;bWV+dp`kA3ZJA0Dcsx9(^#Z_|VhKvlN zQpwHZt+E}x`&Musg#)C6;IdX8D(+iw5MacueGS|Wep>DLa*4N?*GCpp%=!ZD-G~1)e=ae&Dqz z4rGMAV)4I=qty0YbNs-r4Kd(0Q4q!7M+F#F5j@)A0X8qV)ysVbmb%037@2WkbOhT@r{T*t(_FYD1do{)+a) z{Wj*O`?o^nAJ|9+UY`3b5eHuyxV+&oCx%_<{ctPOfUQWe7NqYt1FG;|iWP!&`?60C zpE%fDl9!$697^wWrUW#4tgNnf`j=+nji^Cr_j>i^UiBVKaXjnO@paV_nqW8N`l1&F zk$K8x*+-%qgVTI3g;T0YB zwIl-nfB)p&r8a8d^|%`C;NUysmt@)eG~qMNSQ1J^k)<}Hq$E&MK4lB95*U5|n2nek zQnM^DP6R|B-}v;OM*WXsQ9!$U?w zE8~e*Jep*QQ_tZZ6+5r^@J#FE8G641cZ<+FQZtlB= zbkx0c00E%fwkjW?RuO-6t*eSRGf>Z?jTiM@h;37Hm;GagQtQPYI#;=eErMpc{!`b-LMWS5ro9@dh_C&bG3|H-J)}qlMw25=sp5Q3`^d2m}sF zmUnkw%HzHJGG~}+V8jafQSM>byJrV1-KDO{<{2x*x%@SUVGa#F*+JyFED6NrZ)BI% zs#sp!;$`|E#81^y{^CBOE&NCH9sH)BoT&v-K2OyN3!xFa$AH%Czgff)KEXL$jmw#I z-K+Q?Ov0L>r~A0-rI6-hxx@3p&yx6HCl*_Kr``hv9S!PoV`3i&F#0ByN z65!Wt-5^K~V?e-x%PkSAVSzYHmn}B{V)bwydoO|oMhNu;iJ1pt*FU+b zLJin+r;7d~J^&4Z*nldeq5!q(wdlf)SSmXb+}MSSW+M1e>(WCI><`4V!!BH6$@Y!272wn92*?dWJBbO;k!Bps*&4J`?E2jjO_*tg%&Z?6ZgV#B zQlniD$^l18jE3}Whzn1DffOp{AQ?Zklw9YNC_`w<3-hKtOWOze(XbzZ_!rikHFS~xv&-2~ zZIbB*BF|%4<5j7oS0slOAX(XM1H-*VTp9cLIS1F-M?esk&!eGt^*b$PiPr$O*<3^L zAW03j@{;{d=Y0??pt$T!5eWW|5<`XU=PcPo-N{ZKFNDa2Kyyd=jdBcTB0ZZ7>DC4k zIzRvXj$K(;cP)QAxi3=pE{_!*Ym{G;$z2GJ8Xk?lvMNz|F>CcdXPCy5Bs>ZYN&atR#koL;S}W1w^q0Hj?^tT?8qWddbE?a!reqX5g?_;g1KQ z^F3};f8lGU!<8rmDLh=}3$Obd@C6zi4+1t4y}aDpXM!D73f@l2c!ASHk7H%Nq~5%K zslc<*Mk!;N(|`wKn{PfUZLgT)KBz!e5`1;1)1iIE9nag==x(^boszOd z6@iov)(VyR`loIQBaH`ZiStjNZ*CyE*iudZm%z4<-hCGpst zAe0GRHbLD8{T+yq$H2HRN8I}7Yb)4=Ug;Ozvapk}P|Dkt(Y&437?IZZ@M^(`{j72y)Z6y1 zMR}}sJ>|z!mZp0s%+5MOE-=7lQ8c<%+1~fDrzWEpW!w;jg_Ps``w6$91^Kzg7=2?z zdhhX_b(v{n&umkgZVXDYD!1%wdmo%RKK7Rzhg7HRvf-TF0*?0?L#;SA{i|$tS@vi1 zJ@;o{|IkkjKBf6XWFOe)kJ4BHXeu4eY#TNIu*0T8_BLvsB2YQgR}-|u3ANjRJ&pyr zOUMf~bHCwnr^KywVa0p!sTXlIc&y8c!8k-%?Eu@3)^XtmMy)nNV@ z2qo2<;nc_I{6l}{c;D?;V^q=&3YJSD?)lJ3J}K~whrFr&S!lN_sc1Ul-0GVvdLwgK z^+CmC=EhC1n`YxVANP6O$#oSy`S{E*oY?97MYbW;k?V}}-G1rxhxwNHLh=MVUV1O1R1ZjYm*T0t;tV%! zm*$Yg34A2m@<3#Q2!Us7=ni)~4+=3m9IRi^JZCu^7HxMOcFqAk!j&o3|M-mL5$f5F zp1a?Xv{?9a^UG)rdC?q*{b3C)4NKWIW2&KpxfO$yIeJdR4VvKOV-KY8b-QySkvVoZ?~_dF=kSRdD# zKMtV$aWSBbf^2-8xKH~&#$H}YdVF81T*D*VqH6?*ID`Pp;qtpsjc^nKfP3h`p?D0b zo1c-lSIF17WLkFUD($xq1Funs;{cVWFHt7;*N{Z2s3XK=m=<|~PwPegy&8NHvL8PN z2pQxKJ-7n|tls|(i1BXZtL_fnQ@!|l6Si9Av^`VBVe^8n+H4Z;U7sxDPbY1R!<(vD z(4Dn7TNV$XPhLWK9B`fpu3Y5Tho1x(Xh1o1;2Lcx~cCxN(+HZWoYp(O0UDihPrf+v%*^%f5d`QH4a^56~ zUZzv3`KbK#DmyTq7crmm37xB0@ni}e{!}~uJ!y)@iNNj_QMuv#`)iYDBo72Ui(@K>ASIYo) z(&y-Kmw*8u(}~Xk0PJW!k!;OPK{7==odn`kdy_T5N?G}=eL2-KPn7vnHL7%@^wx=k z|ElZKznL6jn&}^heEBbjP_u9tx1&sy={<@Qy|2gD!^$jviKNjYBWqa=I#8AEp~7gF}fu$v;rU#R8UUb5nK*(+F4OFy>)|@g z6&4*5sLt;g4&HqdM0-I6hNC7KQE2`3!D);Ek%^UhO2rHKXmnTAP5^jm2KTA6A54Dy z&IH2t=fR&u(wu+lvcA>~`t@~&#VZ!vOV%mwaMbKm*0;Q}`9S#1Eq^~4j;~c7ipgqd zHREv>Cl1p}8runyLjFub9<*5*y$GqePku4_8P1%%j zCLX;Ih%nL!2ahK(G!Nefh$Dh*jw+EA>AGebyQWvOelaJOI0-m&+3UTS6)=&*>+l;X1!NVx5g3CX06xTi2S!{9n=d2G_h^GRL z)Z?faqR<^Sc_V8AT25^|h0on#F&gVgFrtRarpAeQSr|+;gYxBrgFW{w-}e~Z;MAX6 zKw6av6@Cy#c$9da{n{10-xaoPIC!|pLhZ;`#_W<~3X~O-LFt~#{|_f-11T7YsTEU^ z6>@|SW-o>YSAjCcVdOXIKJL0Je!h#R4%p~kTVZcgRX6e6o*9wzvl)f+leO2f{>Chr zi0@Ra1T;Bwsv*@AXsHoCkY*h&czhqF-V4b9;UU8HO(HM}2bYT}wgNIoOoZ5(b@5E? zPp{hN_oLrA2T#J2JmF@ZyPJ32hDy8qtI4uYH|)6OInH$5dMmBTRyA+Gn^k|}7@mSZ z`B5E%i8QG$PMv)+eMsfxigo_mH5dQe1qb4g7aiWMGe>5;dxtY&h?zPc#HsL^d*Ahi z3L^XHdbM4!aXt;9V%kAW#3o}jjvkVQxcqMRt^U}XzU&oH53`W7nRKoj-7n5)-;=ze zMn(P6We`kP4I%IIfAi6y{_-(**1l}v_cl?Tnr!#vOGeK5=Xldi!-?mL##Lc7c~T~V ziD|nx=?=a{DSudjAk+60+I;@l>C=*;V_SwIMrmi)@6KD%N_?8OI{LGn-iHE9@(Q$5 zEiswE;3>=hY{A#RTi}aUHQgK(&ju~%2FO(t2urVe9?p*FJa0E+JPQoXgj>!q6&TP7 z4&mDyB=U!zHIK>kGu}r!JPo07faa%QrhrP zeq2of162Yne-hv@#HR{y@U}|fuoLc=V87CPPX06EuBdqt9ZPQmhB93$m%)cc#>I6e z{HfX*>0|*z_6duFxh2`nP%iL#7D1q7Dz&RdQe=lxV~LAu7MPerN@h1@lI-zhW!8HA zq^th9@ptoFjo9s2K3jrEApdm4s-i!GB>)Kvz6ZLV_&8MWtUr0Ys`Np4B8-{I|`({T72UKw^1(|QGEO|Sb-$xz`e zPXjFQk1vlfb3rNFB1J0j<-{(n5n@$n7ZIuc@*OKo%+oldx{exwgZhd}*K>`rzE5o& z!>upmDEj34i#1=V^^6*4guzTp?pY2M7P>*t%lozq(dONPs#DIOM|5^1OL(>J2u z-Ll@9d#FTiQZj6pSicTTrVQD`aET;UBXoPI*zTsgu$KL6@b=|tfp+KX7;lAtMZsRE z?>AE&r-73b4q-yi-E8_uFY4Si= zP~|?AE2fLPLuQ>zKn2QZS-u_GY7+0D-U5xHQ1K^4pL_oUp#VTS25$h$j4cZsEEEA2x(Deo8{oSL>OS5bb z$MpB#FH`{oZ-Hnp;otJys{qR*s($WI0W42N(Ii<1^ogJgIxrox`*&xUgeS306HqHG zqiQUGUTPpWGttT$Fg(w7+W1Oh{@1aa(M0aeA+?Oar0hRy34_<#8EVsR4ZUyPi$mI#TZWwQmeG#$!F8T{755*15Mp8hv5q;ty#oGZaHQ}7=+$3-1hC1C0{wZLySZ+S)USY_q$z#}%SaYoV zze}eAuF4NYSIOc6Yqz|xTd(=Rviu4a3|B%Q8~^tZ!9@VOiJ$ZU)y2V^{Fd_s5cMd2 znRW;S2kTcJj9&dfKlNgcG}xS6qzjE53R-Xnq1iP&*Cvcwv&^tc zuJ>Si9fV>PoDM#7`M2l)pnoWX8bNY=wb?HItZ!t^2%N73{+WW==&m^Dpn${S@$B$) z9N5QdD+ya=F&UYKXM2FH0^r(E4M6Es-l?4=V3@Jg@saLA71rb4Vto*i)Qk~F=L0mkZL{3xAHKC+_gt!?yFmDhl{7v<@K4+Vvnmq`C zI&%Dw237Jf&b_zBP86$8Ak4`FpZkm8lXn~KRokU;Q?gZH_%@rk3{!F&wi{3XnCNLE zKkkJVGbuc)DgYl%Etmb5NxQGHl*j6sZR3Ki-5z%uT8Y3A--dwK57OAZBPyvj9~Xt} zsA4bsP7BzIxjpIL5ZbSc(Xn9sTZu=mXGIHW4yTAK3E3WcE4Ms>@C-#pWiUXJa9s`dbEo0e1u!_FIq~ItJ2;s6Z5CNAM zz?vC(qQlVvR1uKA&k4M%!%-UT8R|I<m-$JTvg?=yJU>Vau6+f@y;3={8Znd9~u3$UKIbKB6=K zj+o>nO6{tm)N$XBGAEHomIY(-Ud7u-FuD-dSBD^S>F%VI(Wu_w^O%2v$v5MF+XY++ zbOMR`HcwG4>f>faWZLcpN;3IRk!NNLbUOUrguzZY<=VmZJbWeY?;oRHVTEDvTTr;m z2Gveh;0Ir#wJj97@v}l9%Xa*aJrfTGJ*ZGV>&TX&wog@ngcdh8jX%A%BNioko~E)# z%#o4R0QZfPw;j4Tyu?C`NA~#RV6AU`M-LtCxnFtPj_+!H_liTC^&S6pLQLs_tZ`9@ z188iKtd=oZZLD{H*UZk}0prI*L>-3O!kt3 z`$*>)Ws})c$P7_$Jta6A@A;M_!a2I#_~qfITlOdqQYGfqTU(K4&DQsU!J2qp zwuI*P**gXKMeit=KxA5r5V_U>j8z{toAGzfz|i)k1`a|27zX)L{4`NXiP8x1NUoO4bu zYQ+M}+%5HL=g=?(kn@kH+m_A?TZdwy`qLP%CU~Xpsg`c|k4`S>k>MfIr|5hzrQ6n# zuN+)CnGCgqI=b>L;1BEOM+3b!@NUq})yJ}~VTDY&(zvgtXkr`dMm96QrV5P$O*8sV zS+cZoKiJ7_>B4|-9>Y!hRXX>p0~sk8vO|VyU9LwMu9)u z9**yZhm*gtO*MiPxEiIzkcZx4l4WENuxARA$y6!Tf>=jZw9BO0gl{`E4 zaw+~Y$=V*5jv6Q1;Wz!%11Q1sP9v$X+UItYgu}o)0&uR6{g~Ez{iLp+Yc*ZVpR!pd zFaJ6J;yY+o*}t8}+A%~JTHt>4y@{`vIj}dssR$_cyLj2V&(5ePY^iYj zd9>~JagTr^>aMfU&`*}ZqAXFJ=%8cST1`JrPc9Dr>faeFrmd9 z+sR5#S40sbEry=>H@cbzYtl4t*gBTPlOUK9z3A}pcvntvu$t=nVm}wJjwPdL|F3%< z5ue)Z(cQYe%89XS78Ph0L1+Vrx2ss9oTwy96@9jRLk5$EUBwL7yvcNM1t;0u-Ef{R za_4zOib_tVwdzWxb$mKe4uOvE};Ea2uoN7zhU;Hsix*3{xa_Or{ZdB7*S2t!2BoA z8ikEnBKgMayTrM`CQ2lXg2^oQ7gNUlK?1{m%%F~+o?y|- z!1pt4_i3%{FBAQUIhLAg<2y``#IRJ2^A@b!uu&H&C2(T8*fr~vPAeBP-_OvejNAC} z2G2|$Gy6XhliI^UKz8;!hC?|zhce!{iT0a~-bTB2+{Whe!Ny(?>=!QaeL4FQM+_S| zx>%AT&AL9Golj;qt50(V+lFg|=}Y%(DWa+%^qnHov(0(~b8meGXXWa+&=^1}r}ti) z8!wL`Wy`vt79XDikunAiq+C)UowVx38zb2P>MJV3M;0cDfO`57Ghy-{-IWhuf1a34 z=9w0Zke*+>DF=F~ibS*nYunnKyFW=OAs@NI#zUdK)T^6X66ETnYX6vVgTW!3y_$pZ z^ttjxuyX@mM{aS9Yx)Z{}nK?Eu#pvTnF`^BU#z?T`N9nC7tlqi@=Z zVlt8X_nvT~wc5bW^E2>b#ZEc5OA?Ds-HM*kyowDJA8Zb8p+lHrE1A;@(nS#pK5O{O z`am8MW=<9J89Sf=0+U4xep&n)JMJ+U7#L?`U^-Jm>#$L*Re;^M1mJt^FTR`K%-Z}1 z-+vRVUP6pK5MZn&7*J?{Zz}>tO~S=t4@*V`?6HjYWW6Ptg9T^kb!udJIxEfli;)a!)LW661FFTY%%)9gUcy29ewAM7eTXH^>Fs&62g z!x}fosGDEU&8H@GV{3mgRc;|n((%!;t>$HM#kcn$hj;O5?a^9+iGJIfF+@v=E5JJm znyWq$u*yZ3)`P2t1W&jIHrb{kzZRYtWa**>g0(66rG~blefV%q!>#v>#|*h!iHx@; zFb)6neQCTvtxQ0{u03&x;s9{kLl{8Q>IFl5S)KEadEb1X1$`+OguMU zXl!Q_|M=qV7}ZqEZcgCcjjmM2kZQ!i;D=O!n(d(2_vvcsM6lyKKUXaAwqe{Yejj}H ztjLiL`KJO{my<^={@eVZHZfH@jPgp)`QTtQ?WL%TlW(;tD8WwLje=sE-n!)W-)=rt zMd$jTe0^F1!os?x`hAgTew5&d{leU37M)^alGMGAB><>%)DZ*s+r1w<*LhmF_d9%7 z{x)&<)9Hix!TjBTzUfRYFeD_6&*D=FgNex~&~bOZ@yG7p*{O>Amu?vX9bSLaElqG$ zY2b6<=)|)vkv@<_|IQN;fIr%f+zxu@z~Jn{cg|Ik&6?Z+9y5XsUwdIeUBM0S#<#Jj zpER^r9X3%IyHaWC{IGWD(wU+spC<}Dr^)&DFXuSlKE@{ti}TM#r9?CYw8{9#tv5q0DHtwmJ2hnv#;c z-?DBmN1|xNtQT*!=CZ3o(VC$_DyEv0>M^=dQ;S59Yjb7g_V4g^LZmzY;ucvVq^lpV zNSD_b_fbH2lc3^0>vE)g>1WB9i|Lz#lLzu=)>!X>-W3IhJUoeib^!n1j3(b$%%uTv zry2-maW&erpqC(^5`DBuIZ@kOfPxxP53!YpjV%o0ufLf^IcT+0NcMI_=&IGT(zM49 z;9tElUX5V?)Gj374{I$LAU>f)rC#{{Mnpo3p_#AKJS12g@(n7(T`Gj5B$Mqj{<9OE zs>-O@ljXF2opkjKmp}M>;9JOyDss(yIZpJ61flcXMqlKsr!}4IvM@(f2?qJlxjtVZ zs!=xX6m3vYMN4va=48`g`c3U~)ItNX@B_(*^?A-EenJd(byjUqg4eE=Z^-v{H^|?9 zPoR1u;~4$kff?dV8=pC32n9weqM~8ow6|O=QY@kctYFc1QFeb3I1t`qM#nm+i;5kR zhBluPE|8B>4%Iu0*dj^>`Jhj<+m|<9ZpFrGO1<093vkzs)1KEK?O4TOSKwNunUSJ@ zguzm>0(O}O#St-%q2X6vs$kYy+VGo6eg7L?$vP$I;n?ZV3I|mciRoZYCQ0!EJN=z0|?C#nmyvYO12$2L0R$L9RZkA>Q{Ie|{TNtjQ-g7asMEpNK@M=tF3z!V4t7)dAPM;<@c$>@H@syd4?!kw)@V zI%!*!p}Mu4KQ-XdxwZEyh;b@f?+7_Yj){nrLW zI%y#^tEfcrUcDNjyzAEqg+s+U?qWW#o-}|~9M<}u$9RH792F=a0bl{-qxbR7I9Z2} z+K(UsSKX0sabZVm8ciNq+4kwmYL)1pe2qYUxL2kL zTS;qu3L!9d`_mYCxtpQoY|v0_OSzKf>z%|B&j7eq=CZYckAT$jP%oQO1$e~3DV#Fkg=aFRRU8uri>%V(EOcHNAB6@2ah(9COdZvkBgX>kJ zIJA-%s&=2aS7QK#=%k1mL2V*VFypY0Mj9QyF2m6?HB*PbHGwrPuk|j`eUx1};{^tX zK@%wioNabG?-8JwqAlnOx;jRU6E@Wj>+|Q6n;{5Zo9D)4NXM1FjguzuX)!CQXzmZo z1=wA{!|f^IiPar$4ow&Dp1oMOj_KXVy)C6_WR9Lj`@ifMS?FJ=Di%)tYVxT%GF#R7 z%Efq+&3LAYXTb&YZ+8ow0c6i9$q*UZT?<>X_b|eKO5$j8`Ik6SOe@}Tq0xNXH z7BO#gHbe96)2zC__At_72jr+6vczo>2;ysJ0T1L(dH#3mGOGE}72eCi$lS~&%!EjpT(^sm4y5SVU&>ti$VGH4OdPF}P zQi+47k+AY6aR;s{Wz6iY@Q`e0_wke6~agUAu<7P(PWInI%+ixU($_FlS|qM>n(Z zA1ePqG*CT=>peNkYQoy_o!>Axl$)!nyXg35*#O{09;z)*SE@77?iaP*&tC4<*<;B# zO!Qrt?avTtSAqn5`26(7T74O3=#ey7)Xxz2W&tx#aEDVG%*^763CNS*MLiFA2^F5C zEboX%1jT;}U>92((C%8IjOuvVtFzymg!-O9s1ct~@iP82fJxYl7AiH!xv$mqJ3Qm? z$x6T8ora*Gqs{87Hof7)BdEv?RXN@zg!AQ4wt#`zjBfDSVk5d_*qQV*@6M*{b|-oh zu46Wb3YsqEEJ7=qSB*K7aQ{Z~Sp^lQcqd!x&-kQL=@{$2lhgp$zMm{00gAOn&AMKN z%uaV_Q)a)4H3~J$bPL}uoZT;a(j8uH2BO;x(Fg_s=b8(~s*ZP%@!e|QrZ>^iUrJxd{SqhEzC7^teNb0-0lr=dk(H?f0GxZlBGipF39CNaa`?&T^yMYI} z%B%K1_Bv0k_m+bwHWGoQx%8Uw5-qg4rA(JeQ``=k-wEkh$~+;ZAWqyl)aNb4k7>Fd zm%RG`J1SWcr#;V>NtaIewqYmm%>>1yONZDi4|-hDOb~IB56@Y#AoeYzJa&5MC_fk!Ol&x6A1J$@4umly=2 z#@bo61xgv(#$_Va#D(fbgnSP3;gN*5&!4EdRB=Xp_^{^YR{Ix=Y7Id*L4X1dGqts) zwsJI4h@2whsHa;{0XpN3Cb*OY+?Rj;VfYq^X)S0fJLvT(nVzz=-17Wcwomx0Y~I!A z>Ka3COA(H=shB-fg&0d_!zNpTk$eoJIfm`t;@S3jk)8<8dlkoCmpdJ44?BvvdfmO( z=j%6yh^0~csP8eDvU(ZFPW!e?EaPMOc7bgkwN1|U^yc#7%d5|GktS?fJ}4=|l|u=W zcArVnp1arRJ6sWUx=Z74Xn&3|ixMn4Mw{D{f0(QRtv!#1B@>h8eJ#41&?-jMvpQW@ zb0#pAJGEYd;!&DFLJCp%j>`6-O_?u5G-!mF}JlOQ7k6=Cq) zNqbv3n+8!suV2)7*@cDo{e;??DnjNq?tz1&(T{?Vj$RZhs)qpITUx_!?7AxriBh3t{u8tdA+T09-9!W}o@@G&w;3-9uFS>hWS^;tQq&Jx@m70zhPLEoGAUJ%?o@3Qcrp`E{)LI{7> z4sq2r9VLBsi+z1}=K`PMzgJq_)?gqWO(WP&*q=uYf4b9ffckuuvX&n*y7(M>h8pHt zXK!hOP6R+f63B+(NFuqV#PjV}X(Z+GDJ(ZeTlk~CC}5oxPD5cnCfvHfvd&5HYElJu zR@y*Me;<-aLb*QD{a( z>FF@etBh)H7fa`XF-EE;Qy_;ob0p?)__iF12%%W%=&!M6rV@Ee4EA*HuCmJfMpMk? zKF!Iy{{DLJ=KS>1@G&(_yOXpa=P1_C>Wt#<{)79ruu7merq^(0vkaT)Obe{j@I8L- z;DrqRw`J&i)Y@vG#}~~XOmt4asg+|<5S1dWE{m;$e*K%a(;$zGOy|KLk8j=HpUSZ2 zo(Wq!3dA)H)+exOFwNWw<}lCHJS-S#>{Yv*(9c;l`W;Z*h#`P`0oeep}Qi74JY-C1Pbbpl~tr&r?XTm#jkll;L5{fCW zF2yQ|Gs_UY%7JT;aVoq|dJ?+TO>xQceIdds;%0RH$%jo!f%xvcph3bqD=E9?YwAnu zE-cXU*b1V|%J^XNjYE*7m=11i1DNqG<4HTVh2}u_mUqo$1J4eR+8uohHuJ+YqZ5kQ z@DWhj+^CTkVd+LkS6;E$$mjlWyKF19|33t0NC#leev&krm?H;A4#RzD?U}q{XrWrR zz5bU-K835J`qZ}oB<82WpTSxg5)YO_g(x*&Z*lc zw}L~il{&7BhSMOF-V@<$YS$}`07_cgN6=RL4auDWR<8dUC;P!YHcdwx#)op=o|VR? z!oWjX{Y1NfdI-fiz?#N_to#=|Ein!ByItf+;4Zc2K?jr=!YLJ~kAhzOg&gL*UeUp1 zc!Qyoib0K1H~wXB1vzcMmPCiE4;9GHpDNfiRBxh7YLii<*!zCXrrPi7!q-N&L{ zsSa#3a-@ciwAK0}0zA|w@x*cPT0c&@9z?j3^Tbq-D}D+@&>$$dtLl!y*DvaG8e%UF zczgFOg=v^Sa%Qf`RdJ)o^onQ3T$`~ti*`PcHXjK~@Rq240f=i$0)V*xK-bWu1cqzS zK7oN$rQ-jz9fDXOsniVQ;50*kl@pts`v#{U?8vR3FE!&6aegkPa2Xgk>1mxdrdjYj zx+dkA7yHY^$}SFkT8YTwPmIb3!JR8OE&}LC9vjtW?s{q{y^s|&-qhSiIHym>NkeZK z17Er%H(qIFprg>4qx~e1~!$8-+`M)!z&Y(=xW$d}?Nd z;%ORNkkF1P_%w?ovYha2$>GR@YSY%MhgttJU{)BODB}4O*6D}o6PV$eAUh~>6Q}iK z)3?Qbqi{?z9gf725kguXCp zfxgrJjoI$&#jav^m}CszZXw>2zYGcZDx0pkVP$G2@XTDW%tlyhD1Ia>Iz5pTfwUSH z%^4*|=&vF%5xi?!AAb?beFURsVn3PKao0V1h7zV(RLdlHm`;wnzOG2G$UBKMVgCc6 zX*N0UL`#1t`!Bhb4>1W;#H0f#d=@C(|FIZ+^>=&npibToIC>F*h&r|9$gD+isVV1mAFajG;P|0Sr+Ax1e&a~ikA1P(3G)l3ubJoq&F7s{^&XpwawPkW5|A~&)K6RRx8Esv(y5{?>ru0fR z9sd-!8#>X!MN`GI7RrI8s0`))?otvwOMWd;6J!c7Ehgl=^BpRmYkzHXt!s@5^>ktt zz-P^j?op1-SBRWua!M41nLgE=`}gwq0Q~Iq`3^Ase+8>=tY&J~w_MM;i(a4UhQ_C5 zg~kuu*$CMc5_ygJQ6%x#s+Ms7Dl0t9R{c>ntjhFRIks<==hDn64E=g#b)Yh=r=*+z&r>t4PP(+}XJ(j{@Z( zmO?Z-!e=$>CsqT`xK!cSf(c=9M2%H7RECI?DR_lUr^PEagTcV6o-+G~&D0DLj-chj z?4N=6K6}u(cc{DxqG!53pq-|N0`8tbPw42ou|A=tU_hAm?Rso8t{9!*5p}^@7OJq_6_yATV`rs)}Orc z=6S{wY>M3sAK&FXvQ3B^Pnrj;9JHg0k<)E}ED-uOSgN4JZ|I6ZrqMkj-jNh84XZy^dzWCX14e`dKp(O zg_K14jy+4?9A07Ga>>BrQnToc@CDaJJH9%9JLdoSTX6h~rI8&In^fU9+-c@HJy{wz zi$2bz=L**7RJD_(grhbwYEmm&>Yrgw4?|-vOw}lg;JXwae-`ESs@LDSC;lkE>m9OU zoGPu89_QG`X`}yTh9qe{-AgYneD8FScEEpwnH2)!vJC3P5BhapLPF1CV!Cy1oPVAmN>@Zs+-(VwXX-4T@+5ynE% zzOT>Swra<4;7p-piP+2&4?F~Egp;(}oA(OrTh{`X6Lq4K{ME$lA6cH$Db75z*`6`Z zntG!NaL?`tE)Q2;`?T(?uS9z7N=iEr{r%TcOG23BslAj1?LVbR!g}Ie(;Cv2rFh)Z zbpIFd)^9O&V+^({NuT<0!cf<~f(Ju^)n==fe)O)1ya_k`X7tY*z9Vy!H}|MkV>CnE z*_Mqm8Z!AQwuQdVKXYE9_}r);pfl#svY&#=yI8nC2GQe74z);hN?y(JukI=)O)-SO zWian8jdJ_q7~s)W$E_Pf6GL&g|AS9=l6X%4&Q_7zf0+J1RUraIjlA@{|Gv9dh7B!?}(s{BgDD0mm5JHrH=^qNx6KJL*tLK3hRug z=oU%B$L}#&Yib}|_8yOVC{kWb=qV2BA@tF!S3}pzvzsYF0r2&vicoUa7dwuU{#w+@%~X05Yx&5^Nc#Q9~dIM-owUhmK6ksU7s1o3e2 zLEn!df!JV~@KtZnuUTSq9LLq|!~JSDmTb1pQ`HmbEKPXDC|uf6Vd3rnOv0#MLyo=) zVrQCfuAb1gg>n17q;a7t+OL@3W`yoWS)n%p6R?fE-#!dMkZo+`KR5ME*{p9a22C97 zB!>V;SEi}3Ip9*WvRAq#+<+mBgJ*HFj%I}m9j3;O3ruiuv78tbjWr+)!n}ns)fc-9 zJR)*rhscm(^;D%Z=OCn#x@&27?Aka36oQZb5$s}pDegRQtHmH6}e;Q8uRofv%c z@a4csuM-dHMi8GpHa1MeUp2zSJ60IC8f$xhX0-^sx7kv#)*GQ&_iV^eJxQQASmEGO z&H~x#?Oo|=g4WpRd)uk8AQ;1gr}nu`-y&zpCZqR4S~qW=`lf|^9M~#lo=5k58=%)( zLnhnJQ?GyE&tu$k;quvH_@uLd2ED29B(8}4l8~+OdY5zg!wrwJ`sF(eD1nYduJ6l> z2{YnKc`q*gTSV!xREmGvw+n|t0=mC`VXiW)aB|1&CoQpaSGu1a4V=q|X$HBtC2qx5 zE#;9Br*D2z8Ml@bd~7J&I6(QS0dHRzJ0|Vi&y;kQgQ@Tk+quN5_J8%-e)e)aQEs{uQ@EW&TQe zVXUh(U4*iY8I>YN1%ufIYE&LIxkkm)#`GD&zhW?O#hH4gj)`AxgsXA8m{Je3e(PKQ zjEK1Z3@1Poc^Yl%)wKW6uj-9ZxBP*&@%WzRt#!7t-ui9nb!^@*g{Z5RFT+@hIk#<` z1dX#DF^jEZYs-iH`!&i-q)nZjNk+b+iIYQ3W98|a=L~k`HExE164gm2I#t2(G3`|p z=M<>h;X?nP&Ix{LB5%RWV$OS7#^o*K^*s*8kD$b3QhZ#7`zA zoaHZSspWfzxu^aKD7QAhF1@=JrePTKArWA-oZS}mpCE^p+f#7KvXbZz2G&%La~0+% zcj3S_i-2FrP>t=Gv?m|CaX6BD1+ zbYqg*<84j4*Oq^>(4?o)!?h21T>g~DzvyZU#WU(q>TpNDYYOe`-@e;2<7j{e6ywY@ zL8?qut~EYgZW2sy8aFvVPp)fGG5L>7{uA=U{fC{Aszc|5}X-Uf~o0u9iP= zJbyunxIA3Ms(smMX9SSshP+}>IZ=HuEPoApjl&w0JxXQuC!f!46N|J1`={$nYai?J z_@PNqh>4{CbfP>P3fDkDGG9YS68NK78R!0s@$ZNj)DrRkG{P{x0**Vd@LuiZIIu3} z|KQ%RkIfP^YSQUugJWQt6SFq~gl z^Ehqb8dL^`8lZg=f%G-TvDzD?xCE)h-icrX!NA0>vPcB?aZgQ<+Kj1`B=CvjSF#l* zsx1385*!Ai=;qw|IyRb~$?0N#G$i`bAg4$*ilHW_Y{D_eq<;<|pOqpYkpy@{Z7x2@ znp?d-c6lru!fwJ{ZF6ajio~jX4eWZv^MjE6-kbc-ycWb^8Eu-h*VP&z!uzWs4FVE` zpr&k`l@VsEEB!oOoM`Aown#+o^M~evJ^TS(pM#o{mzl8&B(6pwKF{g5Z&O5Xb^#T^ zi^Tzkk_^Rh)R-{xat595Oq_#dr*49p$DzrqPP-e8hB5)8sn-8P01?BybhodSdd5DS z%Jc0#~j-q;%wEp-ee}i)L@4|IK zhh*D8;^e3HtFg(bXs*Z%WuVsD+z?j?L}RQaqaPr{DxJ!mX-8?2?kUK^VO8{&St9wy z$~q+WJfn>#Q*jRrv@>e*otUW%1*wtzJUe>*%>_HGn4_)<=+>0F()vzH!SxS;-Un10 zO{e%@t7Cq5SQfG0N(dIK(82SiT6&a9ShTH=-uw$ytRfsl3gFh@MfoDdNzc$mPTw7> zy>euX5vYKw*va|+msC>3q@JFdZ!UJIl-Q5s{@_tHm#VL%!sQT%tpuV4g(4ZG^(53z zvQ&n|yZsXlj4u?p(LV))3aGWs-;FJ${8sU={iZ^0vNc=LYfR$uj?SnI0Ui60?!YM$ zN*x<}s@a8_-ajUZuAA=OJJR>v(d+wJ7j|?f0WkWlm!@i&g*c7_LdXFM2T65jnoYs@ zsJW{z=v>E|=XdCU!g*d=vmF+^o;D8i8?|;zI+FaBk<79^Ha{&af3b zRyz^^s8g9Av2=dF6_+u|HD{d)gB0PPOKxS1?Wem~ke*2OPgm$3n~PERSGK1ruCG~oIR;({&YR_%|LhI%A-^l(Y*dHsrIuwl!K z;K;*;b$96+K0u(5Hy)-*(6|j3oHYichvuAz>8O#?@QyAQtYL=m{G0g__1tM|?T=Lb zhkqCssYVGJ9A{oG$m59K?iAWi6@>STrN(^*sx%M|ap|$IjHGneS6BY`8tkKFcFa}G zIZ{2-P|9IsB-eMx+SgE)&#KoN<*895J1D`f{|aO}%s$M`yAA=Xpxwe(j8g(u(SQKg~$nVp$VcL9nr z_rss=zoYm++!~Zt-{4`>-`GGxHM*>66o`UI4UI(?G4sWPwM^3IH6(XVeizdktG{wME|hl8SVILwuV5=3H7m&7D2Hg2lWl~Oi2 z@fZ({z$!CfIhusv?op0s<~13LIHaINMKZQj8UPEo`vsjb(fik2jL z?WkyOw>IPyf8p`N356QiN|kAJ2@_pxT2-n+t2NzS$n%`r9WDr#4=f?4it7o z$y@nu?>%M?g!N1Ruez}3FV1|80-aX2b3;mVp9kI!SC89Upnfi17E@_T`YK0E4sDiI zH^2Y8eu&WfolL}pBL7rNWOF;=f1NmT0{w`R|2#&Llo%wSXMO=CrKv6c)r1)cydE0T zG41q6dwZe3WD@Dn!{tGJF~Y*mM~wsND-N+dkNWQ)U#KIGDha8NVmEL|{@6p>LO@M@ zrz_?t#V};riztSyg@~Q7@Tn3hp;&SJ+_)h0x~o#M0#pOb)?E!tboP?Ll>)UYDbne$ zg^Ojtl1{iOtsBk9f4PyZg#|zE-XhP6oPd)G!LZtEiS!pUa5z{2ztuj0S7IP_hJ*CCz{ za}poQGc-(DvXh3cuCZv)IzGJA4ZPU<%8#P`zlOXMTNeTcKu-zTVK8@=YAcqzw0Kt` z>`3FULCnk7SBx##ahL6om#J%2O?jJ92m! z-M7uR_{rW=qu44Bf$yj%a|H-+HilzDQ`tSLi~|Kxs#e|^UKYW z_$o(rm2E67xhPXSrH$w=^$9DxSL(+i=`V=o>Q@51`A~>9ov$mMD77coKPVhRXA8}L zDq{{lvy>5P&pU&|W1gGJ9`8|^Q;EO41Z=%q#Go@u76_FZ?oOImi75VSI%Ah7b!@{~ zl_E*RQPdpZ3b1~<(v_pcKPF5qjtv|j?!sb=|t&UEqN3f5ny^Efe zt%fy8xLixh`4o`%p;uch))C|5kK>E!w*xgy17FBz_^6|`Xj$v+sbsuA9`|xEcC+^)I9`H;$H%oxYMI3zVjSPS8Xlx4uzBwX#HqY`TtDA z4O66fC5IN00~R%CO6mKJ=CvQGSwGJ|4|i0vy71&?Iu#*o`8-61jbG z4;mln{LZh^d`469RyM2RceQfzp37$aYL7@>_4#^~PZkM%nPhF|7GC7OB3g*D7 zg^101F;58l9C8DaN3!w)YBFkqvcwyrzU2VR9df=Xehga_kSl^GqeTlBWoolj#dii! zdp}0d;9b`Qb82))prItv&YKNUtnQN%02#;If#GwqlfO4nf)oya7LL!G^<8YwjL{=! z-ez$YS;|0@zrhm;G3XPk*)++mqGa=;HwwbpW|?xD$cy-3u%b``bV#`1xVd4#Qh>Ll z2nsroclM#u4b`Pgnr0zE37~w6-hGYIuX{r%L51wy`+ugmn}K=1EGA9ovo~E=A~te)NF+qR&>+^eZ@hZmGJt5(Uj$bnF zcqnRBT+`g6(@%r{JBC*I1`0bCe~U0Ezg%rM)+i9Yz1$1A+m|uDNuJ}fXxn5OS^7ym zBexu~Sr0T=G({8lX>Qdp88Lw;aTRijbYhdLv=GQ{`YgGz=5~R2_^IoRbr4Pj)IVQ% zYlBuGpW9;(`EE@;jNZ5N4<0FfFPC2KVW1Gu3ae5xH{M_RN(!}7GaLD$rcv^qC+l4+ z28-ILPVrY0jy=6MUaZ3zop-6K11I$1ocl@WOb(=LURR|YW*cRnuQ!sMnatiV`G1A< zY17uDaxeIPpq&d!af_MLgQ*g|%(bDb;^$oQ12Lx*5IU&ylAJ$h_T~EZI(@AAMv7>+ zOs87a8^sixW2!s`v!MxnLq^ud&aLWr!+NxuA|grr$L{p{4Ckyf046rPfFBgkt6C{- zVx<~VDF5P0et*sel`7$gVZvOi?H)9;erT)=(#0W`1?vaR@`LrwvtTQNr0-*I+Ev9P z)@>}XCD`>@`Td3F?>z+QQ;EQ1wQQ_HT{S5IZel4M-s~Gz6N3)t(}xL;o9PL@Dih~E z&emKdLyDgvLJ@o~?*`n?)0+ng^$g92&AU4M=_!VX^`J&V9L_C$vT4mHpSJm9U711* z-WkQdcJa z8(ZAcsHT9U*rVJT6Y4xnQcs40oL3p4@^yhLX%tM?W4v8=V3xdhp7Q&%vi}|JW^u0d z4f?Dlb!n9AF#fYMq!?^s9W*L`eB;KE;_CY9h^7f3>uevA);iY{l-bz_%Spq2!sd}% zoi)Du5y!>z`S-w>DOJo*i*6f>0kfkT|D8*HHD312=wI)}zbhSeoy@W{7izN~7@bjm zI_;a@CI(hh^TFh^2<(+VX75>kt;*Sac=*Lcgisj>?`%EP7&{Cg zN+e8T?S1nmw%6n@NB?>55xgPx8E0j=Bw=rvIoqeaxnL}H32yMH^?0u#Aj*cJ<5SxR z%xK9mK=#X-9ga2^>l@ZVYSyT~`~gvlG|up1-NRG$8R6!JgW;bwI^rQvk4)JT#B-;% zf*IAD(_Wi%LH;*tDt^zLMmD~$Hav}Ln?O>lx(3Y54s-NRWnPWyk4cnV2G}vX2Yo6= z`jVn#{{EjlrwqqZCnSRAxv(dI+K;K6&$cCAHyBbRi@_$`2ncg}eE*m4*_V#^rz;LD zzteX8XpbXDp6NPK(($}8#@#J@K{kc0DRabjR7RTq{{fAcuI*a~&cV<&MgIh6$EkG- zeFxJq>c?i+wG!NQ;kpT`&Y6{(bHLx_wvb0yV%x5T!%o#+gOJ*+?}$^Bi0d3#+#Nbo zeHhTy2v_{+-g-@}7DPK#DaM?;ZD*q&8_K-`ff7$v0UHvxHgah@ryzAm=luoKk7_Jh zqdD)dO_@!{g{uJo`?)V0(-(dTA8>#`p%!QQGRHr=@1#0}wJ`C2RmLQGH}pT%dDEY8 zy>gag5_%gRP|ZC0s6jABEj2M4*iMMRXM-#@ z_l=gpS-$bE1X@^QgAD@@s}e;eWp(G}0bFj?d^cKngB{0%s}Z9U`YnEF8Z_=gmzf;a zRE^zKv~c?(DTK#l5CWMjcAZ>Q>{nJ){4V}rhBeEP5BP{uT6CCr`gY=uIcCI7r~|Y5 zGACGn^;)TIR=(f-lyX}CHC&#=Z4GyDAt(LJN|`x~_H2Z~Bli^oCKGBt##rwCeV}^Z zJW9?h67Hv*;Z_hoaHc*`{pL&?MvG9$Q=lB=rnM?6NNyqQQ%}*eTM;27`6?z=LOjC| zQp~Ad%@YEv%Sui(h_tYxg@JLU{rY8)&~ehi5uhb$NGe-MBvA+{8}}l>#O4Q(2^mlQ z57RvT{hC4|5wyvlDYK9S#8~!&fQMEdA>(D|y)vYJ_Ban|`V+!3B$`d?9#?cM)0Ff$)4>Imc z0aA#FHMgt&m$VY^6r=VHD838{f1_7qxG{?))Lm;R-qXiUbgj*g7#dkg^u&-!op?#B zh{1eDOPdHJq*dB!fGAN%gtJhCr|TewwVDWlNQm5PeKJ}4#Whz}jcDqA`ZJgjkK*M4 z`fN4U>=Z-HKn!Y^dI*DRa3XyjcNSHL?g-BC6m?78fh42@hTO@Fj6X;ZDRRp3C;I#k z?Wu$aBOPN3JDS2)CIJlKd&1)eq;XT?5QkcD;p!MoQj5|>mcf%xxv1wDG%sbvXj$7y za38c;&bIhHasxjiL4SE8<3B>XR?ma6n6%q5ls5Fdz+v%NF1vRkiu!Cf7^h?c`NvmM zXNrma_J673%tHtVIA3cH6&YXz}o0(fa1L zYiHS=F8-zYqCv+I^Jy^A83PFfQ$t;bj@N@v?$nc8U@;e-6t|A&ES$++f)yD`)B0M; zHX%q;Zu!zz8DaA1YPN%ZuM&0Ju4snUnaX9J;od8FRX)Scn&x7FqqJ7GS^@)1SK%Y) zm8F@N%_&Rrri1uGck4S-eFp?kqoHu{5G??1&%E)Q#W?5;?ziM~ODK@r(MLCOkauy8 zM^-b>R+NH>*8FWAuHLEu-Sx~U`1hGQ^n_DDBXE4&_1s^IpBA!pN0KoqQPL7b#Xv+C zr!b)sPrr8aST5iLALP-(_)0$nmiqLC2)3qQf?fS^XSF5`f~-BY9sNOMceuT;f6oL%hyLVYV}4J{K7gH;W@}WITG>T{fXqX%AGvjL3?{h}E`M(CU)m1gUkb@!xuW`0Z#Z{taf;vc` z8k{bqP4IkfOd1suKuSB^ zS(Wv7S(X){wjmGeF3i1QN1h4vgzbrzwa)?KQOqnwETDiGQu|CatjwuTBk0`lE?Jsn z+d-^b^;E!3G_qmwFOvM`Gvy0^AaB5T8K6|b%-SX<(4eh~m&O>6Eab-zl*RS*gzlsj zNZidZxI?-{632-&bwhWWAOB%GGa=(X)Hi2qd@pnj#1+&h%a_;K)kw8W#F&Co(=*M` zaVTq90d7S-BZr7_0!ZYF6w;HqU`I3wLLSFf=^n;QHWbX?9r|;Q=t?#DzsM0VirlnV zLRro%rlzO6o*1kHo12IFA|R5V6j*o-*;k~agC9}$Mm|owkRJf36U1umsOkcP0L4uT<1ahglnj4AD}(0U)$oH#TtWYIK*gIx?rNxZ)54qt~56O zc1JjXzz+nT#(v5p8`g^X%o8WdQ#A9Bjx^$4V5REq+kf`sM1yL03qLmDxm9dQ9V5GbJ zO~XhiqFKQjL$78)ePw5?n{^dcn(q~*IVSvBZb0%E&eIAn(0jIGTKh4W z5)9A63MHu=$Ho|FQgyuLOn|B)@ia(nXvH3lCAW5PCgZkknBN!09?%z5)UA{8hvm^7xw&(ll*o1@Q6{PX{F_$*nJkTuHTM^aGcPZb*dEyQ`Uj5xjgH8|*-UrPKvsPd(ep zVN9Kb++g!Z(BBu==u((vrh9YQW<)^*QKe7x0h>?B$oL$c<&kZT=N63rT_L^9{t1`*P8*8Iq&-7>^+45o6P@=ILadN?zO+_kf|H7%h)m%m+Bc{I)CS_9t-# z%WHNXf*(cr5HaDLWGjjLK+zA+fpMS_uT}Gk{~|{P8b1NryRPS~wu3T(P6#XB?9$sj z?cW8=_b;OMWou|dWsHc<3?4IdFi?9*eD2AwAMYv}`{uTZ0pFM2jm<50oK0*&rGxpM z+{?U=27|#q${&BH(TF7}#F^5Hu}k=UGO7n=FN@*bsH$pYv{RKO*UMS?Y3I+SI91Yj z;a7^By&z}77rO!>k&!bHc~m4+Ad<=}Dn+W$LCbZ@9hv=-5x8P~$5|oC)c=}e#s71A z@d=#4KHv@=pjKWX@BQ=v!4LR-{;zUQc^^k6rLPO)T)r~$$pUEvq9ScC3WJ0tJD~;X zNgotgJ9I(Ndhw^THTLNHz_>Nhuewd;aibxOKV+^gK@^~;J>k^AE@Bayp~J}B%J%1G zKL>wpR-bEJeLJ|LpQj=$FI$d^8So1gY_oWmLC3Ppt3vn zwB^Z3kSLNHB^81U3Z%fbVH;dd8!5pB?{Xm&Xk;%Hf3mTDZU%~wY~xPNe;^SJ>%#R* z`bP?eCPQo8CX6eZRcc6FVlV1g)?XH5A>p@5XU{Z}1~h#U3WL)FSMx4UbmupFinY!% z^=JBPL+h3*gmFZh3Ml(WBFw~I!oY;oWMQqdRxfbv=prxwMX4jJrJ8z83Iba8zGx3E zD9BX}_O`n{+nG;zydr#ZbK`o0K`f)-9tT>-=yjFraC)hj^a=w5`dzjQ3_8;@&mm<4 zbl!gAt{fmr_%c*9eLN)8YAh*9Zh$+zzVt9Sz3#tbp;*8qmY=>+C~J9U1A@t?B#ziP zC{8F15xJRRxIYWgrwx&9oKBISbld!HW!&QSQV&UnFTkg)Cm1^Pp4|eM>{J~D`<^=h za@h@XNz!H_3zN^Y3s2moente{{rHK68xSq|8rU}|2W90BsgT0Sa}DwT%tvGnCMCrh zHP>x!@}#E7V=1c9j>PeJbRu@rW-WT~v*$W<5;WiWJw1&>YS@a5#F1xxcycm###F|{ zF2q6H#zhK*gtw&P>ZE7}LT_W`#q~*F=Xr7zQyGa9ml_fn2K`?xfb#_Ff)Ey}8m zp+_yzt3#{6o;0?oLyH#9q3EZ?HF~evh;$GV`Psn1)DgVZW9BiXt#{@eaQdQb4|Mq~X#@ z0+Eh!Q3N?FKmSB%HllKcoCx`n$QXLI6J0$*YOluWqidXv^&WRAJNO`pJ8no6nbjF8 zlf+b1(rPClLL|Ss~sh$p;LPz>dV-P|8xWS(|-$?itnxOC6@sdR87kmj1 zBpUF;$?ggF6f>m;Lx*|bc+TXvB;Q8J-ws8}u8NF2uZb7cuF#53ii{`DpV}uQ90}qI<>8=#90dSc=jV8l=)b;dVWF0VzchaP_66-SZ`j7$O*?oW2|0R)%*r%< z_hRNlz&|_pzy}=$$ugZJ?D&D03ufV1dNL3)@CtwXSfb~2F<;Pa8+~H=gGwTxh&f&f zOsM;Pw70>S`De+WqlR6BcxqOs6dYTB)Y}E(E_aw4!9Y?Bzr#M07JaQVqUj&X+AfJe zq9QdHEih3)1Kv(|VQ$in{6r2mpu?Z-pJpq)SeKALJr5z`EW|5cK4MSb_|1DUG3}Nb z8SfEn3DH5FRBj|8Y-o3|`hpQ677L>Z=yYvl#<-g9|0k$~9|Qhjfq@XExXB~tk7LZJ z!61GkRiZ2eK$Rm3;|C?(r z4}Pvf6L7}3e+UT<_R!f@ibkW$V+1a>VeuA+XS8`=z#w!l=n}ooqYem8cYY}|NY*y* z*c2&jMViSE46Od^kQa2j3z6{$2E|sgJP>?1F(z5+>LumTUf{cRp#j8P`CN ztmII~qb5L;$xu}J_d*2Wn-l4#M%(SI8&)p{xF&q@B$fwUAn|@cMqywJZaHZXpPOE4 zp*Vp*OzvTv2_E7Es30uKxjUMJBS9VMAKQs!lw^?PX0{tIse-%+3hjBP_R+l$7f3oj zTn#Yh`WMW(m54O|Dh~;`Lp!BU#ii>8lH&ua?Ltb-WmkT&^hOGC$Wjv0Ceb*|4sArl zcN3m*7)RrvC;WkuniKqOj-Z2Hx$RDA!P-l87POTbH2D)f;^I%hkI##vh?TC`FSI{z zYDG&<(7LXMss!FEWw?f7P;+D0&U4yWsPC&P*k|WQm(O_9Fcod*6lDbHH0rO)m=~D~ z0W~#~$-o-6f_DOa?aH4!beOt?P@RGP$qr#jTbz?N=JR2E z$vb38QeLi{C!-u63AqM`ft&yUIZ;4@mPoZ7~j3%r)aT-Gg*mUQ&_*Uv&aDzxRX3cJR2TqqY1|BYY!VQ$# z&5iqp%Y#Xhg6LKkK{Ht#p+>JM<8y|TH*`1DQB15tw~D@(58B>;V*=N%*m|n|#~1^E z-ED5}A?&h^tZnB}Rt2(mRE?pk8WjQ}XhtqB%H&jDYdPzQ-eA|)&f$QP8xL+t2Dk^n zB31x(K(W#}O(>ox5L`$W2n|{i4$IdOgV9~I#~`Vr->~k81>juY>`HM`0Z=Bl1cm_W zKU{1oXMl!(gc}R>0v+ceM38zk28ulIz2sGfYe27zBsm`z>UW3YP&Pl|<`(V`NsnJb z;3^um5bbKj4G}b*c`?~aUEz_*b;pARJ#j!8G^44*8xcc~UySDRG6tvFfM ztHeOOn?Pj=-H(Rn(hDai2?5xG#5H4Qpmg9cH=<=4jR@KT_^^D)D<*?<|F?gXC4cAs zOXF*N!x>1z1`D87I(;RWczSX%9y{Bjih7xb-e6ShKAc`U<*DZ%9P~FYzWWx8SPeqk zn@huv_q#o|vRWcay_o7!R>+f`j+hTR z2G3{a|J_l$y%a)o@T*tKo+|MsP^;8i%-`=*BSC-GTkrqw+nSWKDv_kp?@cd0<4`Fj z`}%X=%}ZhLnrZUvp+tt(MItlPA6eF4v7#Y-@iaFB4~dMg2Tf0&qa0(qo!n8I0t)Yg zc#AjVq?s3M43<>%#2OqwqmiKgR)aneGjpddB#Al-g^$4yNosM7ii*VS6_Ywd@K7xn zWw!K};#A}!q)dA3%BpAmw=vyyCkj-sX$zVJ8rpNZhCeICI7W&^@^rKklnK$X(|VF! z@Ms*iqorJORv~SI-F41pjsiOGk(}wK5a*o8;4lLW3R#NZZdw6^e z{ry&A+RIS51hqo^ezd$9YzTT&j~3Gma@_y%sx&F4d$w52xa~5ct*y|X;pp|f$iI)h z1!1n$$-iRAH3YX8fktPOeH+z_;En`%WOLN}m#Jn^PZNDO_=i)z%>?#Q^I!Y^V0WwJ z7M|REpgvF#;uXOqg0{+g&mT)QpY8BfFh$kM@`>=48{&9UO)rCnac5eyQJ$svO0Dy+ z<{%?SHeWHT%l^Ifj`mmWl8Am8g_8WyU$_~-VV&#qciR;7a16kccn7_rWy81ftShvs z2ynn9g?fiDr#V7grk9Hom7dChKo~>ogO1bL;4#f|mgsFpwda?I&u%Z0cqDzigY1zM z2SU)K_aR)NtT5U<~ zX`w4Vf85C_1t9Jr$-QfZ%PT!yfr0_(wxUjqhAnBydzCQSqR;G+@k1`0J(IYGH&M2< zw!*_0s_T<_P4xqFfdJOQyKOa7x?YDg3I6W-k z0WtL6*(ZODC}eZ2;1z51F*CfXW4Rw!QJ}Fk9Y1SzS?V_0xEt?g zi3(xQ(dUx(P*-_aW+R1@TN;6^$@RUFJj35LH{qGf7yNL^ z`Z1pWEQ80!U5qpv<78YwveHoU>PXlw_N5sHc?gwEcVfMAt_0)uZ1ga2!xZsxN=KWR z(~c1JE^`J56ZL-Na-fLi<`;iAayjq0%W!GZkzF~1O?wP@S~-KmT)vIXDi*ti)vUXU6?Ni$YfFLzSuU;A z&xF+*BSL{a+xx3*fqE&7^NX!w_(5kxov&swXf!(xAaDjkaKY}E z_g{NsKlQ-PjE#$eTauZl5w)@yQnWZ6DmJ6U3NfuItbis>*}Cdy&m0XHuQEUPeEV$GWeTcDRG!nSct8F2ZZV%c!k~sVw%K?usRkZ>WEmhptJI`C4|bP zU;DvE^z5MU0_nK5p|f1qYg^<`HPj}Nsf9L@isXF@H#JauAZ9-pGAL!hNJFp_!A(zS zX~(J~rvRXKG>NWUbnab`8vgS3G!oY9sjB#0L67m#$8!OtII)T)S*Rshs5By(g0Q;w@Ft!VhWkTU@u2O> zwV_GwZK;%G!^+N1UVtm(|~Ae=BczLD#2|u8JDmTz{PQOU$kPrBPY> zi%X$DI7Cn(q|Zjj!_G$6%lrbhO#WuhFPLWw)4IAy5W4dJpLK$}whV{maT$)6J9;CM z8Ej5aZ+loSoH`gx2LZ-^;|#7JZm|TRwxVqw5J&~9q{sI}wX}-UoxJ9z{7}6Fhgbyy zy9mraRa7Rk8iNQ=r8eZH&u*Da2N<>&T-PjoF+$$9Spqgzq%!V7TiYbBHxhbwt;r>h zB|mcldqXXD zRTU-TnfzBbvf{C1+N5XBJAw|x2e^YP_toNp{^&am3_yMkwe=x26Ua|Hs)Yt;#2E?Q zw^(F6`Ekq6h5_dB`qxSXuwAr+!hsAc?@xPxYqf^9G$o6;2TKXT873v3^iyD^+3gv5 zW8iv3DSIK;ay1R>Ddpf`gUkx1{3nao{il?roivf_-tC;P<8O1{P(u?suV;@f5LGl` zB%}C=cyoVYy*i}FG>82^AJTLUEVkZpB*M-Q9|x@Vw_e*Z2KNuIxXVz4y$lwQgC1`<^kbsc;zX zJH8MQkAyb12#mG@r3#$GOf(qd<8`%am49eTf2or4P1AOcg(gp# z@ut}T=`NBH+sCZhrR5J3kdxLFeZngg?z^X0KiBUPziW=B8cblfvkJuv@dt z0!YQOX_(V_B*Ee|fkN_gBg4TSp{D`3Z_+G&i`x`_Y|MX13X^%~{j&4ghdQ%S4RTSW z(K+swGzal#iS>ge0AQG2#P@sivp+7|>mNdUoWxQKl@*|k)t5k(Qq1F zWJ1fYbnok@A&(tW%L6e{USCdKAy9r~a_;Y6n)JIqd-n32WWE<13(JlnI;6JluQc#} zp&3?JRjHR&R;rNx7#WTMVh{QDl763|rz~9>{A+&p6zlu#_o(3lA_|@ij?^JQO9^Ms zEq<-VhY{?;Bx&432An7_d6v-a(&V-CObOr|N{NEiE|*^`5{SL&WIx(E$Oy)k2cLi6##@Si+MGz|D-X!egV)Z`!#k>s?hP(lU3@O6 zoi@R(#Ch%fF~=Po5zp{0D=Li*m}s;ftm#$n(Q|VTgWz@baA^9U5W!Ta~Dg&e% zIiKI6Q&i5CQhFr7&ksF!Rj6lWML=clXhs}58E2gBS)zqQF`#FO&bJe>untd}NN8lN z)=^PhD-{?MFf100_E!U;rA*+zdCwO@g=NGPT#|;E64w@G%=@X(p*W=3`fnM?6!CMw zW*6l1E4Y^@EQ|%a9)wD{j5fhTZIT>vDX`I2p=9!A!xO_$5)TB};+QVAcuD2Lj((9$ z$C|}PwMfOz;8*WKn|f*)6CvAv?)ixSRNIwV%hVCju;}M;H|snY*{fsjP>RJAXBE8u z!Ou__53GW{Ei<-Ww7=5Ahba{st}Kf2%!4l}_z3$0#Ku!n0q+urf%Tob_a@2awGS{A zTH6;AKc?o^hx&>|LX5WccrmwL=Rn zEg4KKKOH9*yT2-|0~QTOkyi9L-=^9G$ZAJn8Kwp^H$24H%^O}DG8z^!?+FZoHg?0jp-V`K~-RAe_b1Y&}vBr z=j5YDx&Fo=-H+PJN5^Bn#^)neNO3%MLcxM|jbOOo{R0W_`LiE;r-;v&D~;Lmqgpng z`JtNSyB4RvN#fmCuqdYP*2qA=vTHE*`{8F)IqEyS*JI*9-}_iLSh2#7qM6l<(pZ|2 zchAntvQ^`D{Q>$FWlRWT<8*Nq^CGx8lMB1>FiOCbMw83MY zhW**jFmhh%##bu)&XM324E%*$u8^(rhkB)#woo4D`U8?YkykVIRbRl7%uv+sy zZA3C&RR|p$<8BWC|2?c^+z7lBi;pZmH`@6HYor0u2)WAvFh98flK}4!K7bE+8CY1C z%GB7q-&OTl?w%y3%d^^-)^~{pH?1BEM1I+$mGw8*YO_+=b=X|UnX=chY5MN(*mML` z%%PH;BmxN+0)l$%whLg7YV2>LDB(lCFb4SUs$hbk6{;g6@i#%_dj#e||I>z@)Gym( zANi;|sAw3c(w=G@H%RKU1#x%4O-j4c%F`6T!ZA_@Wcbh*?zJ?h)om$bc9N5()u+C0 ztd(+CPUVI~E1vwk2iA^DCKeslgw`g=Y$v@Aq$Vs+owt~8n<}yib^d{#%m}%sD5TbM zLs2caEqlP2cDuYa`-`+3UHDII5@La@~oYHzr$b5tNv(; z(1e)yb6Y*hWtkTo%`gB);gPGiu^<*wC?8s2!_Op*86LZTc7~2n^Y8YO*7t%`S~(bQ z%Fi4jT~fF2^rEAA$)6I_r#)Wp#COJ(Sv7i1vG4&BLaE9NSy_%;3&y`q~ z{!F<(GXuxN=E*NAFqr4^=O^v1I;@${L3pnw6@GvF&+~x%I2vB)0F#2e5*@l2 zYrF>`W1?p=N6iC~e8JcWztB2?=d~Jpeg)ZWd9UkqC=21{wiWl@YJ=aj@VlvvN7!i@QBHoW$m595uO}!UE->5_lZ-y-F5!cYuRDVqsshM4uDG@TC zktQF0`gKp9tNI=w+GB!xGTm_H?-!K`_*GBTJHD2(ct<9HRw=FQgk^s>0AyIGhx<|b z(zl4SA#4{(Gb=|^%Fx?*v1@E;KHaFU83}%Y21#Yb_1yS>rvmJ z4ryqg=MXPBwB(k8(2wDwAEi_e-P&9aZ)y&1>h7PsDw(?rQiU(7aEXZvgWVcRNPeZ^ zV(c@3X&Rm;%mWjMtza{j%QY5Jco1RAg*Z>x94dw7#6b4INQe`qGUQK1q7@f;pjR?v z12(?ipDIzW0Smr9IdLANTvK6zKJYmZH%?L5pOH@1)7jruPKo8=_~XzU!xlQ3@xm3V zvsTRf%YC6BcU3+6<*5Z0y4CwtFmu%#ylqbSl_~CaJca7Az&wDGtp3_Bo)|F z1w`xRJb>l3E^CBh``&drLQfdVsq$ImMZ026*af?M zT0Sp*h4!m)U5S{YZI`4~F?r;sz5bkD1cm3_xP5V9F-;G=h8P5>-`8F9-E!GGeaxsc zJy;xS+1D>W{);}d-{DeqE|Zw!m8Y-|{NOf=(tlPatKWA^Kok;b?-(%cF#zyb?GzzWzjAI2&%VI3(B8z)c9Zdi`=*)ZM@= zjTxyMb=$3xU~|2BtsA^F8z81(vmjZ^G3(L)O5|W6!@||&@gQWlv=x9~$Dt&sgr(6y z;gH9}^Aihg*@2?RtVZr%$;&Bb#3kM~eeHP|5d`f4YXdz0QqqUg-*_>-R`}k8$Ms0} z>qwW!D+B(V?O=iT$|8#&rI%Jw@X31XoJxW-rSgaZFIjzBmN9F^)wevAB4x{v+#kp!+h%|^OFBjL>Bcw(bcD)1&U#LcBjVn3C+hGTG2WcE>XH0k!f=i_a586 zils%07Y?KF`T-Qnt@440@wU2x@3B_@_D|L5(pIJ)Vl6#S-eFb;gvVzRu1%O&l%viV zgT;;IrGg>GF2`g`c*>QK9v``Lt>AbQ9ph|(fZQ}^e(VmbeKXL`J6ap4~j0P0mj z@`VU;Rn9AX+Q)l`py)}e4ALTv)@k%$%8CbOvcN>Ogy!2hck2agQC|{cZ(T+*6Q@aA zBNRy0Dx*>*Qgw)HYo{$Q%5TCi!hzQvk7x$mXq!3m2w3v-Gq{J#5#`~<)(H#AjD}|d zSXr$c!3RgO^IPnRuB)B~@IH3+oD`QuBqw9414`s5c&RQwR!)AeU)`bbXNpA`C&&c3 zO;6o{NPG+m6}CegJ}ZUuAiOGmm_qZ0A{yuKf2w|r3a&nCuC{EWB-mCMI6N6r*U`ES zYcy(Y7UQ9S;L#Q@4-BZdvDmq>^ihYoA)Xl+e4I7&&woEQRjx_LQK^j7FfvS}UE9c# z<%mL;t+euLxu6~?u8APBu0fjMI!`k(8`+5^9K23CTs7dVM_B8EY-NDVsQZa>_+MoI z%{{65qdz~^lj8t;Km^9`U&3GSw7mPA+ECh~CfQ&0mk!?@mZW`e|3MwIwkpzNnO-3T|VkEmMp~NwzUq%gvB!4_W0X;)~fk6{G(_g9%LBY`?Z)Jbo_Z zmL`kb1aO}*l}wfM+Ke&LkxV^_7zb(MuK#hCV$sDi=JjTsx-$xIy8as(U$?li61V9f zR<(wgADL+X`H=|g^(-8GCqG>EaY6iM{v(C)Z~LbjP0|dXQQeZ6deon@E)u?;pAw{# zlWRzL`H6*o&>ocV!d_%SzS#kwRh>EkRAx>gTB+ZQ52h>ZQ~-lMl zuyGf) z$5%#I-ao;D!Lg)Kqh;ICz&-Hn$5{;wNfEmq)~+kRbI%r}>_M`;@*>WWZy&>$bHYjy z6k=_Hbdp`z#BHocG!J~nZNS@#n&!wTD1DinR?|eaFSABqh~Hi)g%P~+8{VtGE2mGk zGwcKdyD!_fJuIBX#|C3)H&KF54m+3KCft=VI`4S;5h^3FBndcw+TT}xPhzPSV3QZ9 zd2}u!K|TS-z@akp1Hcw4lC76j30sVKxHfxO=(kdf!gIylzD5^O8i8egcN7>jdUo^x z+Ed!#QgKsmCr)XON@W{{%JgTAQ_{LsMy~&bunF|R&c~zH+2?Dy$=5kg2fEoUkf8E4 zi5gr8Xp9E_ExUU8XVpfQE#otZo@XRv$umOF#fCa=n z7v*xt`;GjXq&NHuzba(Z_illQJA1(4d;RLGl6kxAN_?B#o)U%9uh2Gqnk3R~;3c<5 z0&T}ft53x!tn?F57V>?2a+QGF8CA!M^M{S?Qlrc>ILKgo8XzuCDxkM_3YSX_mk{sB z(m=CdBN?p(DqjWRAlDxL`gepQ7*XmI>=#F&|_ANJ}kgg>@gpsQtUn`}`ONSE~r-483J8rv6O(Q`24NrvhR{+<=l@(cK3 zb5CUTiNC(P_jyXnIX|hU{`Z-n@K7Vmx}=zVo1MJ7#8UD~eEWAr`eO!j)_w@+s1rg^ zlonR$rwNEn2@-cqSPW4J`-g60xv2R2{h2#UWB!BEeGdq8ybUa(V)+(rONHYg!DsMqLJW zbQ{(cjYZX<+90WtCgAH6#!u!o5dykJ=!NPpP(@;Ntk^TxmuRh&3~-Jtr5044RVYx* zS&la9?vU-9z8yy8!^Fy{i5g=25^DQHPv8F=jiuBP)`hzyS$~we)Y-tfJyQtk@IW!C zh^Ouz7try1UF0(Rdn6nJS|N@0b7e0i+;;+9au84$+%*vM?x0r&URcH@dgI^iiQ@;U zBAHqQshOVxEQkD@18K&dFWf+~z;`WA`x&z}s`Mrl31lQXKl8O&50-LgJ-y?BLv?^< z5Xf4@_vz)=_h%3wK2Q#xaN7GQ830O}I|>LhIM|3_``A)KOH8wMS!S%wl#x>wfQ5IF ziaRN%4<%%Gd(=c3HnHUN4XMvszy4w{VlK_vw+A1NAy0~CDdUh^V3iIe)c>4aTOAY! zaeYkf@XE9zi<&KS_KSxys78lZUKj)todK%8D)l9%VWz6Gd9gt*nItBz27JFmRet80 zh;R*qZSKuJ`Vrf4#=Ykaq8A+j)U?KjT9Zf{RTM)ho5N--JTeM$#2Z&q=Ti?8xSo7wlHhm&=*WCi>4F@l zpppOE-}R?8>cS`_)u9gO*f}vV*%}~Q1#blR`!((Fx0ntBVWX_~ONyq3UDd9c5+|B3 zr^*zf51a_Gm5Tw1J{@3b@fI4YjcX`GTw;{S*I3z@MrFZpk{D}AS@J-RT{YwL2IIrV z!w^G75&~snF#roZF90(0-r%+^W1%%FYZ{HzSCaew8 zZrp+JcwOle2M9ygg`{qy23(UVd$G^xlHr|*MqRT)oc_d?Jn&)MNO2v9i8xJmOAwea zpdBxUYt0s3?Ww8JD;1V!WbZ+EHN$p5UvlJpQ*gR1!S5RIj#}@)dAG;Sn zen3ZK=wIyrH08diQ17THJxhrW_dQ4q8c6Q+nT6a*POQ?zOMB{<1mLLZkpB?ugDq4U zk!z*H<7HB}m)7MwP(kK`CyIZIqKm4Xh^n0*Fz0V|A`N^FH+zGBCB_X|6lrTaF0@7} zRwv+AF)L4y?AeOCJbF`FajPXZ9M$jx6inDdm8M||U%8<)PtC~F^DXY)E=Mn}%y!cq zam)us9RqRii*HA-hAXfmhfV}x7nB~RrM`Zu05Sbv3OouLDDAj0@Klhkm+=<$U(vM? z(-R4LZlXl^Wf|n#j_RAI4?2(aknZ_kJhxlqBOOk?@$#Md(F-$Q&Zxq|Q7LVB$T-7Z zqt(b}y29-2Hu}D_c6>HW)m6q$!~{tl=~kL%LuBYJv_{s9Ux^;-g9eQJL>FKa$i0M1 z`~Fy3g-FPo>sJ1m)9ta?n+l=4bNr@F-~yjg_s&<_sTrqZaH-$M+~rsNV?evqbEm)!`A#X%?i z-Y=JX;US7;BKuz(;7N5CBWw|vbFEXd=fyE~YZ4EG3h?>{jd&tl!! zL{RcI6BaNjrAlc{vz43B4ab(6{W89```8o_A3abucjM!2UP}a%7>$nA_Vrp}hcFeXzp1eFGkL|~Jh+7K_>_B+en%^kA6u*gX)Nx#; zP7E-FpVM>&6O^3vdW`*Yah2fBmMqMfb54`lAl160?h%{g5(WkNTYK5c7)^W)_>u(v zh&Id{Fh2?ZEviW*^@=*OGM{p}QBbAfe0&MM^~o%8ZAE4d#x^;^_F2bbZd)yk{|KH4 zj&=IcU7DL@L2}5W0VVFsnF$+Zk=4O?-)=^AQ}P z2yQDj=Itw_l&7rdwtcn+-^J zOzBd1eOjIX1Z`L3v9ZmiCV;$xIj(N0u+GsP4A02+y&g8|d zbVr38U8+qd{HrEXU+*NAjrgpT3Um5=uF6g$GooN&J(6i5#12g4k~pXYg=6HE3OSp% zGH*maMEH}7*sggYX<3;trTGpFE3v|Ai0AHyu)Cj#o@q&Z$?otDLNbvh+>RwSaPHXJ zL@gI^0Ggdr`T8p5bak& z2h)l~#QDmGPSXwP($QbkDFF4skRSG@r8VV@a%QDR9L&CCceqnZlw`WO7-P!+%nMYR z6cTXO+n(^HDPjV!?Yh$h?9Cip&a$R^2&lhn!T=QEvU8bgb5exVfv zO;r@>OqM`fu1jVGFjSuFMF&Efn!^4pc!gbrII6E*^9G==vdendrz|B>a2*AQHkdL5 z7B%_e-%A~)v({U^xZ^;2$vmNbq8j%*635`9%0x-u4cW!(dQe^WE; zslfY*`+c;b>xtCnvu0QZ@aKTbEtv3I+M^+CMo|vQZfRoAw8j;mWq1Sz2@G9kh5!)& z8Ls0|AuPsGr6u4^su;j`2bCnDz&|HeoH}r z1&5z;%ggUR4)gSemq1T2s@77MHg9*hsA=;m7IUVpBeL|;EbRXD`~BJ9^B63L7f^QAtvKgR#wn|tKHkv+UDa4x z9cvt{8x*A!(9s|M+uzcl7QJ>**!GVA9C`u#|~qzIA| z5WpH-v@ofJa87vqoYK_4BV!gdvS0p^cB_`Tx=I<(N`r{v$o98kAB4TT{$VtGeWezL zP!Ek|upt_H)8apeLq$xVnxIeqfjzB_CCkUD8xl_4=-p{y*dwVdnXCVW(o$2aNLN~M zLeS^p1I|claF`X2Kn#mlk0F@S%YH|#F5F)b1EfJ5_@dq6DpKlHt~D%IVCjQ+DckaJ?llEY?Pq7z9~T+f02SkeKi~d@cVqk zB7eLchL@_+9f-*->X)JWWn);&70f;H=tw8mV`Q;X>N5anN!KyH{1)x%z&Wcm+<(}tVCUOlN* zuiur*QIwvDLQB!S#9^aPuJ5Ao;Fg-qYGFF$^f1<$H6Pfph!H4qz2F_A01=AraXM*s zn|Bm@_J^lpF#$L%X$)R z42ds9zk%L!_OfS>Y)0Gx4h%8WUFlj4OWj|WGm885A8b{yM2S+F{)kKh%|%wUHGJ;R zI=hl)2(}2(?)xr<6EI<(-1K8#OVgj)cURAo)fzF}ZRXv;%yV{Gm&qT8l zl4&2I_T|@)#Y!pY(8Mape<&G~lw3CT9lPnf0tteNd5^+3y8#Z&i+3dsZWhT{X^2JE zduw4-DAdcV!Ryl5A9mVL#$K7X)ss-orGh~uxl}Nh|3a%fk2Xt%RPr7#WJ@pxWByW6 z?nE~xmi|!EHwe*J_jBc!pE_O4P8d+Vmm`KXiHk`eWC%4nKjc*ab_gtQ@0w1-Fe4HJ z2%Vd-qO3Lg9!V_K6@aS3daTCqebhr=+vh7bcSKi4-6xiv2js4CEW8IUm<0+#;fR4+ zGVhJACK#1&C327jvoTPgGHeReTo(1R2BG`gdc#K(2|d+@p>36+6P#WHv=d1tGtP{?-tc z>by*JSWVT*Ul;PkKpySPj)05{u zM`Lzp=)b$OBovt;J)c7**~HF|1}Vl`Qi+~V$Q1g?fUyjUQGG-@h6IDr5Fqs24H!=7 z@@XN&U%0-!-kMwZTkF7mr1rDK0WR0%npZq{hyb)V<+}mk*Lr#n>pHosioiXuB4?ZA z8|b8AA|Ztxm^OJY@0=akz9)B0x`Le+!ZEAFTu+1o&x9O5K6CsD&*rgS+2$N7=RyU8YAB8DtfWobRGy@R z@x3BXIoIkehb|r3LFDm$H$>`oJu|CCRC$2AuR^ILGx+O3FRjUxfRWBzk3 z6Ve)AQG66z@9mBX%WVruV>CgGrRi`n)iKiUNps+${9>*LL1Jp8Z}{(Duq+!76s3j> z;F*o3QX;~X#5zVp(&NEqu``w~UeYZ$Vo}-|OcztvlYj@VYibHMj#Q{oD^9}Q*BUuh zBGR;+wbF(erLriTeFL=-;lb#*=+`EtN#mD6%blC)0C;WwcGRmoQnC2!U!2q)w>>|` z`>Q=p(9`YpQhV$ht%6hqNNQN5C8QXbNYVT1g9foN_0{m_A|G5;;;9cJ6c5>*gswvX z_ZNd?KtjSukE>l1Tk;3{8L?^ay9?V7i6%%_KS71@~ms|y_J&7F|~VB4T3IdKl3WZ#a|Qh z2lSJ7S3^C|BphT zf)E0Uq=o%nAcq0bA8n%htrrZBD$Qhb#p%t12QThD-J_0ivaYd0{&1|?NaAdr zYy>o0EmgZZytABk|26HtCDvs4K(0p+_y1Isl3Qdk7tjM6w=o)Wub^!GIOzB7hnQ>t z-TErJfbMIE2>RjHF>MIH+C1+UjVC-@&%17}S5Etc!Wy}Agait9r26g<5yt8GPS41N zx;+D`a+cB)n)B*N@viaC>cpYwkf7o`P9RCboom<3D~5b^$sfcN2eQsnzF_h{tvr0Q z$o&Pwbw)3a5ncPrW$dkAtGl|N{R|F>*26^VCcCMT{1`F<2<*lpa`*q2RUiLmbU$ea zCC~+f;Tc2CCUxp7HzI%yBkPMbG3oY4?+08S$uz07`x9-n>=y^%I!;|SY{??BAt(*1 zL;hJSf^e&8JyWf?I$8xNwpnWu!J`!Qu6Nx%rjXD=f}G2kPfmB$9T=cbL+0MB1Tl8p z#swEViGnp?M#1lKr5dx zHvGlEsEj|=y28l3a~-mwkxU|{tJZ`KX1X8@sLQ63g9V{AL-_@@KZ1UbV+6EB?eKmmG75?PuveMS5 z!=E4)5@&Za*kp<%mb4ENV$GH1TwM22MR|M=|N>@6hrM2F=fnV(Lm@nzU3kQ#QH#}qx zDvORIjLVeq}$112hhC^GTTp(WS+As_e0uOlKFz{&Id1ZE|yK+4K!=eqzr)T zoSCVBhOe5fT)~uGuzxALjPIH0c_}`W4yLL0y7++p2P9QgTyA!tFna;lm8In;-+Jz$ znV{kfSCTfb5dRmn-VH6kr-+vY><0oOq7=JA+0$zNtxE@YAjz^}in!OiRYrs4mp_+T z4^!^_!-EE9a`=}gmDDcF85wF}nPh$S|Dv@N7Brd3riN$-09o+i9m4<#csHus0UtDn zC+G!4JGvokvfN3B^l>o-4Q&dE&Z$GW)pnC|Hdt=(l_UZ&15S5&DSxmca_!ZfCt#WnJcgq?DE3ur`pEE6Opw49L`5YJ#zC|G9) zb~NcYI9i*fOF@Qktnt~=ICtEb`X*WNUY+KJAyV>SQDeg%yde6IL)c*Y;O}V&CN><% zIxrJkai3?W33udE++-L6SL2sc-q=X{L^hd4SCwD78uFg8schOWl^o#%dKu}OCVoki zKf36e%NOM{D5R@Y2YygmDn~I_VSuE`*|+t6et~JUf&2JJ+lf^j{$EsTw*m>mIG`{> zSS>uhq}ff~YcroR;O3ffr?D&r*0gwEV}byVs5Rf3m6M&Lf$u31$YS{>tzj3|)-ge3 z@bW4M4MLcJ`W^|y{DpGNP?#%!1NPZrLluJowYdUJNOYxxy8FvNNXOO?`wrd+HFFEr z{|GhTz?5X5vV07{quvK4m5C>OAZH>Z>pEqF1XCYM$kBc4c_ZX_so72aqSo(!53L1f z+O3D|Dx@e4l=7d(UWm{pppQhym)|830`u{iO~TAEq#?oouz;{da$mVt+@V|}R`tR> z0~V;k7));7JBNQ=h^l}_8|f+Z8bZ~(m+SEYan!dL$12y_jG{|GI{3MrBns&X_OSY# zpMtL0t{!E(7iDZLX@(l^oa9sIL7;fymemVmhx@L5G7a?o9^DySiJp{?BvNc zGK&2bBqk7??(S*DE&@b8$U-Gm-JFs%cK@t4qHm%WiR}dYj4e_m7ktg^j5d>YCY1o_ zwp)Mv-(CRHGJnnE6mjbQnhp4cN5}8gB-mGf%)W@aN0NtED}UFAHt4v+F>wJ55+T37 zKyLHGL)SX$8_RAD8O4`-BBq79FrgiOO~d2Q)78uQJV{4`#@AJiof8M5aEIbVVzj54 zL>RHh?Gm<$Uwfc0E4Zst2g+@9w0(zBt|07vy9AXnLs2uz-uR36(f+I8+&PHUpBsE~ zZ|-0c!pz0{;42ZSk4^%$!%x{~qK?PGj6bqU9?S_$BjrT~WU=u~f z|1LYv|6=slsVWqG#*B#%pZ?QL&`q%X{(O(F!!Bc>cjiWpa!309BkPzHAw4fJal?0- z*v@N#|AqBFfIs4v_k4r z1E12B+Sn`aeo~nQe<_H@2i+72&el%QT^ZB0P*bJA{AwPJ17xeGIH$?T$RaBhV1X&A zF>lrSlY_SQ%E_yJ1=i&|G20T4h$@xQZpA2@G1jL^IQvU!s zXzLOw>VU0Ld>8?Uuuw#N({`8$^)7z6Zo5|gcP9_+Yy|?c1nHuDN-+mMu7K^@%asKg zQz!`j|7U54JJv%%-+aXh*yRIZ5%U1nbBKdeq)?g%(xTk;oaI#GN?;N(wiu&k`!ht+ zV;B2{tV$>&V$~OOwN*?TUcSw=?KIL0| zmmQ&?L)deDLv?&XAZ>cCi+^#)U9D;@I1#G_h3<}pIlY#kn4m~7`39)tM$5kwaF$&$ zp#Srmjk2S#=5gG>?oLPgIkEYd8pP1VzYqt=`us3Sx(M}lwM9T(&|;VJ0RpTS!`UCy zaW$p+1|x#v|XRc9cHZh!4eGrncnh8;fEE}DVLOZ=4e&Ot_e(ayuczbOIXb^ij{J4*3+lcw( ztbf}-K^pGtDmVY*Pf9Mqxfk=Q=2HW_7F^-Zd@ymh%mdR4g@XCCs*mHrQ47FwEwy(P zjaV=+3k(8Q&ycD4#JH~x(F30{2IxEP+xX7nItL!5NvV{1P(K*1mu%4LX}9p|5ER+w zKqxNBp<^e%~S*NyE@yqLEfW zSM?Ql(wO3erY`=UMyBM)`Yk=3?mNHAo)IY%PWou8Pw#@PQc}@|*wnXyY?=I`*vH0+ zMo4we8wmM_pEUR{47jVi{CHD4lx|^A?9algGI{ChAA211ACXXgT}!DN2vhK_NeTLK zniKFLL^!1P`ni)aXfiy;4Lye#P3+>}y9it?{$DD^BYM~{fp5}PZpLXqcRxzQb0B|q zYq6u_9l_4#lHCoD6dEHVG~B9b0xmoxy6k~c(7;E66I1k>oysPhh(&(eAGKb$do$JF zKqz-*a)#j3E1pKd?FkP>ow;n`eIHTH7pC_&^xyUgVK->0=Cook%*YDc5+ket1Z5DK z{a>L&YQn^0O?Yw47sKBZZTc{b#-X=To8VQXufX&@X85G~)~F~^=P&%1IOOPwDE$($ zl>`TpUM13Tm=aSP9J*Jq+ct*T*V>UrIZf?yjRahN96q0AhMpJk#nc8M3XWmFtw^JG^dJTtx+=%0?A{w0 zS70UWs(sWjoH@Ny^#BPzUTQ*L^SL9rJzXgq1?^gEbw0srcc3t6Cf7YX+VP1)5s?FI z1>0!gBt3jR5M)~jJWKgB~^{*ps?Ou}~u8x~Lm z*6u0Gi_d{F=rcfK8jL5f%pxu+Sw8_Xg0Q z32#$FcWDu5iw%Jc031?K1O{9tgLugsQShBWybt<#09{eIjFF!T8FdP!fW3XM=LQS- z*OQ>$=$wMoH_X+W;y-9zBzT*hv;Ssaih!KUBV2Nna^YKtU;fe9tqw`bA;#1CkT>WYN>QO)&_uqRPk zKZOQ8o>|jleM?hS1D5_wK`2fR58(aUejGw=EKHXK?WEHM%{6!f-uVemASSn{()h@-Md=?MwI-=Oa8|NMrg zhVnTPb5q1~!TP<=>O~(M|M@YMNfS_@)ml(y<-$g1jE;fB@)QV^iWSx;XEB6d-+BM< zo=Rx6^S}F#uXuwytV7a>%-|id-%wBNQ~oIeX#W@PnE&@4B!bH|>OTB(I~ySLdopPL zx3mCbHvXPW>V(&Cp(tVAfS`k7{*xfN0Lax8% zqmoeK0eUYOfBHnAwrEjd3{cH0OvQ59pQR@0;>GnSe3$|e;US-g$5DxW5j3q83H58;=GXK7&L>r;MIpK9h<^?nHrfSZYIY zzCffvVA0~EctC~-WXLI3VQW`?PgRWqO|_HI3=@6q-!zc`4LbZ{J4dkdQ#&Vxu9a>~gD!$Ea-*IF~ zXluU@(09ugl}K_Cb&0>BF?`6e(QQ)DEO?`re4&Fk@9-7ZPD zY3*vs_9}e9>aC}6aQar-s|zFuw(C>m-OTe5{=F89?X*<{1X1?`AR~oNC3=%|Br0ed zI_HhlwhDTH5wNt{RpQjvIJ)z7a>w}g*j1+1>-F6NIAbuppgrGdIb@-Qj zg{U_(t24;(}XQ6UlW*BgBej`(j(+VNVJD0dp+ z`Nc)TBu}j+$wO9*d=S7R%n3okaO1^`=b9_Kl3gTipc4N~6^HAiT!Fp*sFEE2IbfoX z)kzk`?khqLXH(dFXx6PEF;pb*$og!|vx{Qae7TGtamv8OO1(Wig+9O7a`Dyn-EVwDCPyU0+vGi;G^X`n#y z?WM4Qc-9fZ?N-$D#{OI{4v#NpQXoZXDxzmYWt<&9{q($eLQRJ3JU~?PyPa|o|A4z! zu7pyEhy5}A)6JkktDxLZrBXQpxsx^ZnK);bxTncMW=+*m_;yK?njn|w}(uL;dbAbmEvo&Bdy&~ zweaQGo%`L6!{#0yMQ!Q5w@~`5#DML9udg6dQ(fcKi1o|#=s2Czg`LZ37V_FyXNoYM z$?Ed)!$+h#ESQ6HztgijuQ*9Sc%PU3DfJm1_xQ8+X8R*Fm!@vsRSr7|+iN`MN}H8|v8S92xHLIIvHC|8voMGY){Pc=4^ zl_dsXQV{F;ay8tu6hi{i&arSa23ZMyYFEa+i@v<*pc=GIY=)zMxbmVl*&Pl{zQR_h z`W%0bCZP0B8>m=oRs1=|(d3Sak!5&+%~iYBWH$GqB;zZ3f>MI}(c`;6uK%Q9MVDe3 ztH}qD4TQ4%#fFuq)zx;3hp_M&kWs`(H`i)r^shHRjEp^}of7r@u6Y;zq-WFwHXb9C z-Dp5+3n%(Gwv3@xI3xudi($B%h#^Ka@S=&seCp=BJ!|EUj`f@AK-UDFizy*0i~)h6pX|U90T_(XCpjyPGLm>ezAw zlFwd&8tSLES2K35s`-ejAIIXOi`d!ECmsxEY}m~7w-Qw^jQaTAaFPS^VRpYG@P*pp zqAj9zI0jG&w~n%V^Iik;6YHycYFCraZ*u!fUR)f4mtV0uUz^dWLT$plkbgOAbdK#O zL#@WSFD6E{7?&nb%`SYvNeK|;MJet;LI`RRs+>zD8kJ=DOOlnny1k0!Z9CKhQgSUr`l_D;+5)a|;~| zH0FpP&o3FADbIDVHaQB1>!t7Aj9`9AcOl#|7yF7c9tCS|aN8_YlN}sPmn>Q6Gf(wJ za|13NZy*##!Jf`C*vjl|y&>a0apy-^tM(tCLy(-TjwAKH{UH;-#LVE^0dT31)w&zW z_?)IZltaDTR=<+-`YuG~Tc60Oy;O=|W6v|S3D9qksX)PPJY&+>nRJjmjPmRTtSd@% zTqN$Up`6K|mO{ktAnzr|`= z zj8Z#@qNO;PRtEjO)cI}?JX>yn3w-_A^Znkq{}DS3 zbuKubDkL6&DXQr#roPr-|5{wjHZ#ccx0#dlX2rkz#L1{r>l6QJ3vSS_vQs$4#=F5o zPOASy)K`W@`F&j@Awx4mh=9Pr&@D*k&>hmL1^%bc*n7#o_oXFO{xtsnQ&tE%5q! zz2XY!=#OE6qaA?#Td`B+T)^{k%n_NOJTkGjW)!cC30Caq<+7U+g1e$!W@}!;g$}mU zFUvJWqgo`66s>yEJrZ`o39~XZwC&2yQMg;iVHp6I;)5Yqt5e0*e^O6UySDFHl%GmF z>dZ;(ro-e&u0Jsf#!S%|_Z-fTYgMlzRA;@}_*RI!v7?7hDC9TK6w(H27Clf7x(3Nk zeJ&@f!@9(j2GHAt*bx485hP7_Kp!JC!lm{lH#iec{z}08!AE_m%j$3=9NX{>%xn&wW04^75C$X30CYga@c01j4@D|I6_yYgor4fs9Y#>W+N8%m3tupO29SzsB*k_Lk)+UW&5c;xk zhPL{9hAdNj==U9NY3aLn_xzZ@;(nMtbY-ndJNZG2mrkpL zk38vIP3$#MR&Ni(WoIvTD#n_h(tZx%jcxzjM*7opl zfePJ&{1vDJjo`!8>Lzfi>V8#&35>s~3FnNlUljdrl6JjFVG3JWIo@m3aH2+B^5>84 zWwp+3`1H|*RiZ-~FWJL2vaLVg^Ud1Zj;ug3S>>!?u8Nd1g=T?x8156B?Vls&i0zN< zhcJiZNlBRUOD5eJ!E@%3{D#ZM_O&prN^p<)x&)8x3xkb=r{wyc&L*nmj8s<7(z!ZK zJPooz>v!cqq<|_mUQ7-8nbjQ5{Y-viL^y)T#|VV1W*7YqU#7=1Lfn_2V8j4Zr)GxK z{xxp>?7>DFFk8w(wxKcJz5cH;5q_3!*@lfsR>-i(l4$q%pW;!!gM=VYL&M*qfvJ)Rp;-n(QL$mG^nxTt zzp4s9%eZx9i^rP7cfYn>pCre(wYx zjX@PfKgr~G-QJgR#gIf+@E?bcetT(HylBXLu7yUrg2qaCr4MRGV4EgLE-Nn zl3{>b8Z8#OOziNXlJWgL<28jteYQjuq!&GnS#{lV2I%R#Zn6}xy)r?H0S*&0?>a_N zlE(8cY34?MSpyK1lnsIt6`L5=d+5I(v3zY(qy<|kjj45RG%f_Z+15k-nJ$_8eAPi6 z2)(YW@(Pv%5{buR121ggebs7hQU<7kE8dXQ`{av+y@X;>1ENC+YqYcnSyND|=S7ZA zit51i8TQRezOYj7kg3&=pm21Y&i48+_E@M+3e9Pv81waXPW7VdXd+5Xuk*3!o#&8! zIYmFh!q0AisrAu{i_|-zs8%b1#v@5#bnOtt-73FIC(-#h(GB8((>ylNc!09tlEiDnKiVm|SELm{n6B+(-2Xf|28&-2oWUg)X$Ufv2}Js2s#A4vKL@=XC9iXAiKVFVUHlq8cd)ta7{btOLi5IWksbWI0>V*5b>_u}tBP{~AGrv#l_ zTf;YPN72A{3W6wYJ&D(IpLv3^DoVBBMw~nd*z5xuc*9-K3k_VI%a4_-@8b6i(4fdNBWOgL4KbF}i82;K?x% z6%4~wZ=zGj#;m@7;kJ7wDeF_mAX9aQS#Qz3TJT7<#U!rAoDmu#=~6$A>dMNhS$z~2 zaB>L8;^6;vbK^*lAGw!U=u@(XWKOY0&L}PS zo^^CWl4I*%gz|k8oYPEUlW8H2K`|)Dd1ErgqwEJ;g9ZHl5Geq$=|e^#F;{Ozj4Z%< zzTk~c3cG~Wxb9=UD`Ut_?uU2mD86OILBrI-9rip3w*^XU4%Z$$J#lQ}WSr1648^%pd0&y;v_i}SWFt&M*c;TT)f%Q#M+;bUPOEm;6w2G)1vI*V$wR&zOawsX=%4b{ z01;j%F?^p|OHc)Q2&TIwtcMZj*$ZAr_QbvxZ|%N0)~wR1(Z&%ZnG6dqGdLT+0?1Uv zhT^_&lHzb-1@dRh{lqryISx02#Y^j5w!#dR{!W~#oImK0+kMjf)r4*Soo88w@MYH( zYpQWYWv*mXi#Vx8LyZ3KA9akK15%Afwwv3Z9t314-iraxfqJ)U9S$EPa8h^>mt@a~ z4Me*M4DdkY9D!<}G(i%r4zl?glwu9$l+4c@G@Zz4B0C`Iizm;WZ+&@i(s%Y1xuz7i zCaU#=J#N&O%NLP*kXO@BSI(QqbA(K~M^|2SV`dC|{2iM|o#|o&^cfY-I` z+uqzg0*he_NQHSuml0O0ZLfyAe9v!lg?OAZp_WhQB$33+G88b_pM?POFjmnJQMrk# z1SF@|f7doJZA^uLNrW!SQB>K@;yRj{A-F$u6V0$DWxzcTPmK6EBaAHCshzd)BFUp4 z5h_Bqi&Q!63fIT1HeZ^qTJ_~ zmi|H+`%nkH-4dX}A2l0$+;QacsZ5xYkV=Tph=hn_tfFWC7Tq&80+QSevEl68I>6lA zy1o<^R z*?Ybi5VwBXRIlh5frZ^;^8FwP-Rq}{Zna)a;vR9o zw4fN9wNAUvW^7=!LHn2zpCE2{Z`InPoyc5zqpb>v=%hiXuFrrWH7grn23y??>7ce- zA4dYWe&DV7zO!GVZwynr+H2IVgSdpB)(-lnfB*X7iBqce7tZDAW|b-xG9$_JwIyR%al;a}p@xot2vC+>P*n5&d%R#=fGe#4I5K zp@LYqHbKeRrP$^BTlXPaz$IA-Hx6d1KljJj(hhp56dksr48c@XEu*#MshUm)L$nv{ z3XzT_b^k-U>CNGr;tH=Qf(<#|Y;k?w{G204I-D`XuOg*r%lQL>Quix`@cgs7+P=*ufZP1Wr~Ku_?oT(#qf z;)gQOF-@-PDRz~b05@Xm&3c^5)OuN|O3mArSaMYFb8Q`&0Ww-CK*;LF!=+~VgL!Q! zigGpgUj<@VuIC3X43pD?E#N<(YRg9p1N3B#og9)`H{8{tnY92C!+TFxJBqdxsn<_W zvjzGh-+i^tV!7EyiH@_^mb;Q#`7)w~@F^`^qeauR}3&q?X3kT#b;g zt}%_kTq7X-8g4dFoo3kb4h@^?Q>H2gxG*^`UyyV!F5TaVGn%w^!og< z5#>5b$C#2qAY(rKof1+_Kih<#5b|p(_WD z*OwH~22$M1Q?OIrtCWXIiLGBiNl(_ovP*-tb2QFumjD4MowjGmF24Pzw?Qj(j~`Pn zO0gm5rl#lEJZv?IOt2=*U&I$?9yfDdSGwLwupksGn;(A*JtHJ29RMMHi*h;DZiUK# z1OZh}r?7Az3SWBi0cT|`W*ndfv~yCM zwwi~5K40`Z_n3)lUH!B^Jg5k-)GTI31RnLVvDa^sZB+Rl>C{IXFjYx=IsSUD7S+U0 z8+o&RxV&}$-bkB>|FvHXrI!j%<8M4uhbf2G^r>OY8e^9|ZhwL9<$D1kKWwd+1c|)4 zGJicvxlBxA(}pDnpH7(W`6et9;di!x73oaB;I(VMh|VF60Sd@=FdfC}JJX$NuJWB= z;vK*8d3Ou8wpZ)yMWE5_H;*^8J01D<=!=_X{@(ky+Vgm<(}qe6oKJtgPBemv6NvEE zyh&ELh2q8Wz%Z8&S27KX4p?CEwjD=oX|*gZq(GcU5CH5u(z;$1z!g*CbNYw)iD6rG z{u6n0rmNxw%sFpap@VCnWb1;WvV=>asy-swDP)^C-##H=YdgKLI zd7rbNXOn4wW?{IZ;=5Nr^)LK5nIe-e4?k@od4?@)KKJ}R^FWNS>JCB65ADpC>3n{w zc_ZyPvzjvP(qs1@T3rY3}!zzzfh zup*#}Vjqb}%7n%~bzv0UfL(s!hFZ|j|Goq&_=Zjf>v@GvTbg()eS13m!~P$!{afBt(oQ&n}hAz3qz zRj`D0cNmbrOo}vKT|;n^y%=lF-!>85rG{0Ks{|e3w1qgDxTJn7))@puJ+ScUazp%pbRGJs8S!U^)mJwNkuK-gr~rpWBCZOZJ0(|INN_D;xJ3?j_3PVH1Svf$o;bb$#8bj0CEi| zu$&17kvGdq(O|jU>4Hk{R4W}1Fv0S_kT#kkw#aslV)Hm5)2Sa^T7Jj=or220)9^Y> z=H3TpI7tbo)3P1~^Q7Pl>9Gp3zUf}6!6Ya>jRd=wi|{n70(hv)(ByEVcitn43`bLT zDeE1spAjuK;i)_s`OLlzuA+B&U+bolnz}U4UhwJikepJ22f0p&if-$E7I=Fm4I^w# z_s(@x2c)YuGcsWbWRUiXj76LobcD+JC=9ikz)H!0SxhrjW4F1Q?YEr^^Rt* z8PO~@0cisE30<*fG#(IkAh6+R0%*nYa!9OY&?XwV^RZfgyuymb+Qt}sDeuAQ=v z=P~@6w!{})&z|=v<+Hy1cJ3 zHLBOUH<_LDhg${4)^bo75*e95=qt@0Pj>Wb^{-G{l~~;DL#t+O0Gd~t=pV=jv5m04 z$gZi~Q+itiMf|E7*nSwQ)z6~_{6#oR#A5i?TXD;Qu2BC}AWcCkTZ4R-UT)lJ!dCmw?w!K;ik}v#(Az!~1w!~x^j2UM z@b>ka76XR%&isd|g11%xu!+!X3o9Kq#4z$G4`q?5(0aD|P->qSp{R=f?dXGhrV*nz z=da!fORKY`39z=LzNa}QGrZDsLu#KiM`|LR3f?sZjz+eM52<(V+)#sPhs%TyoBtD`Cagtf!9|h98t@x+>l}79(A$p?{D;u_!~#~c4~JE&bL1(s z5#h-=Dx9$gZXuBtpy2yF1P#Y#0;LZiPbPQX9K&7yoeg%@i_Gp{o{{f@HU4ra-U6p1 zCM>^OHscN#h+jyS<{$10d01e62g9^NkLOVy z$hU_WL6gMKlcP;f&mU&ybLMRL_}I@(>POqGCO!8}Rd}jC z#CwX!GTHWPCFA_e{|#gekM@lvQ)%Vf@A57LkqWtWBo_SU3Rf|SBD;9Im(#GRxi>XTsMID=bHz3U+=!V{ z+xNcM+OxEj_LM=aa-E`=ZzpE#(pvb^_1BN3O0y_AqNf1KKj%;deY=1G1>IC^wdFH? zG6uX3pbTxS4$jaW%3{V=ECl}ImSLtp0900E^E8=Z^d%q-hx@%_Q;R?;vbWSlGw5dY znKYIE0O@%Did2BkJCI2W;p%iuBj5jQatOXd9jLMY!Xby(Nh*%hL zGr`O#Y{?i7ajj;{-?l%T@HaI#@Bhg^8UNUMyoA)~aF}(>XXL15h9B*;UldjFD9Wf` zW#LmqiT`F>ZP1iR0{lBSA*?IKdcMa;8A#<*0PmKm3BUpsaEc9C$W+EmVm&r$|+ zIbY0b+uM5^AbMiQ-U8Yt*yut|q#UoM?JdhgR_Vg%Sr#by<`eg?O2nH3&K@qAYLf+k zGzf&yyYw|@v?|<-=AQQ*Lhw}4@!B>J8O`!V*v3KW?c2T-w||52-g|r^KVhAbu*UZ*^28>dBXk}_7X87%kyI+ z+&}BY;vAtxR>b40+X-g5F6z}@PFbzd&;P=$gp%S2|MA5JA3q^uxR)m+FS=4ul_`>u@Q>apN8qGy#JHGaTc;BZy}S)6{D*J!yKqVq2eg zAvU%?K^1~JrnoG5H+`0Siv=itpBsjyFFly_!6LaGkN|*pqkRf_%{al*g5rPD9Wm(c zq12uDrca;!)e5-k!2m#ZCuLkdwd-3kv-4I66O3aE|K1SfmKi5$V}*)BX84y+d`U6l zPZN;@?M6E{&iVZ9i3E+jj`u1eH$$S@?P+$zn_AM?H0l3c93_omex#FLGu@KvNFBM4ia}{+rRDIOVc^B=U z`y~C}e;si(E&JM;3dH?Gk;;k$f9BHs>GL5Ea&E4vve$)4k2IxJhOSK37KLL^bk@+b z^rAGfXWYDwcCO8NSFwqyLQXw;E~aNJ zZ@eKZPk{kq<3KWKN0wKpo!&B$&cQVxpcF_M7y&A7z^)iZH{W5bQ*x>jzK2roX@NLQ zLa@M2;e3;2p=&^>o%qV0xPY*eO7YhLP7t)GE1b68ndGgEiFCrUMVdwt9|lQ{FAP{&>gwptgpqb$yVv!)?(I8+EL--8W&nd)F)~_j`LK2f5bc<_o z(Hf)sJUYHHCljtq@QSbPDWvI5lL~9N%S9!@uc< zDU-op`u4p9dv4^98=}51W&(lEy8)6h0q_bx`-8*Jhb-aWF8p_p$88Wl;@nfF8eq#? z5369*dF9OWz<+_jclC@rXcSk=?nKk^!#vY z^(PnQB1&ta6zYpE6&QDwjSCWQ2TY{$5SmKQ^;91a>nESk?_M}w%;mQiJB&1xJ{PpB z-zKxGZZy!DwiN8`+L5f2As4+PvdB z$Ip65AdWNPi%}!}j}|mm`Xx-uq;h#a#$`OrJW|r6q&|tg0C?NX7sIAF8;>_3_%h6D zbN?^wqB|M9a8f7a$* zUa9a$66$_YyWAi_03+r61`mX-B9hUZ1nn;DT@f z*%tJO@P`@1r6~C1JJR2JtuAcFJ!+{S^M*4l1%u4#{-J%Oz|JRM!=cB=S(W2oX@k>!Wg3d zI#$s80+!UD!Zk}`C0DasXfTK9uSu2+S_-dd|4w|GBx;_m4D%aIw5b*Y(Mt#5nn;%)yEj~u?yPlg3fK`bz{G4d8|&jaok_HvS%TV*ZUVIbi(q(CrN{*EFtmG(HN^9$d6i&?nh5=z`SL4a8D2AX?rEafk-_=5W- zlcqZtDzg>s5&WG)_iSUUV_?ahIdTSQB1ZQ|-X?VSgXXX5d(F3D56JTW zq?2?=yVO8$frmRP9h(y)x;hI$xh)I=ODjsiO|`tzrAEW@Ar2jQu!GT_FbBX}p)@E4!bq7aY@ z79aFWQ-#g{Pu=^}2V+D-af;<~{M_jB_F^3M&1np#`q7c{GpX3RjfnL1FE*jN0N$%7!b~lPIbzu-HC*Aw@59*@rM(F#q3nmQdh616DB?;`s9Z8d>p9sky{sD{Rc?hhcH} zcXN8tQ25cO&5<&bOm|sv^Aq)Vl=SYt=(k0ac)pwH(h%DN_{$$wktlZmHF612j^6+I z_Va0AwjR@@8I9bSKnV9X`h9(-{R~jG76r1_9ebNjBp`0*e%g`!faPs)Nglu z4hhFg@Yc2qyc;aVqR0)G15FaRYu1N7hjiouoGxHrQ$&r{3k_{eN<-rJiabvKyd&2c zl`M(bTGJ zD^&>{3>jv!4x$ZBQt_i$02u;gCb>M5_bgM-yjB@ngXwUuE|Zvi9TJ)C2mll=e- zWa`%x}wq5XlIE2OF6Jt#L z0^IUZ)YoR!k=!3&_F=>jP%@H4)nSf5o0^EqST?4V5nKXsa(3UQ)STN=P#NZ5_!si0MRD zgKv~d!kQ+{849w^ba9|W6ydLhg?kLI!TiDuT1Zl-|8o_bWe9QDCpaPt9lWJvNJT-6 ztb@u|jNxG1QbPeH06<$Sxx)=p??ZpJGuIDc{1E*x{1Y0eG6~va6f9u#@gssQ*>N>WY5lmw=1?CiOpNIHfK4>7_yAM#GvBOOg(b*>< zlg;dX%BrWr?kmczYH+WRjU4HTs_G2XGo~|ikdd9aT%h7<8e!6K1ah2X;n+76!k)>{ zeOvkSuEpKbrBGQt2ZuH9xa;L?YV3I3hz{rSaRbCuf0~9c9V>X;a-J zG~%XtJSPx?#TNHWhARcXv)zsFYpd z&GDz|)!7xY-;4gO?U)5&EpFlnp&o5fIug0_0o8OaNC?09Qk&!MJs9FvoVL~&?y(-W zmhQj4K>n~Ni8^0N4qg8aJ$JL`KvvU7S?V(cA8p|;zpT6u^Z>RN;gA{FhdR}!gOxpC zMx;YH?nz?yAuGY;XqbV$48N8cZkjv887=qoJgAfZ+5zR&a28ACRXt%Yf8SCQr}Xwq z4^ksfR61;4Ps@0OY1Cj5ua^(g;cGfIv=ucA)GBw`l=P{#_2C%~|Evtc8o8IlzyKS{ zvf(XBO?|BD!VnjzVz*SJA?_0Uj{~L!%;J7$lU-FNDv&c%4*P<|UR=YDZ8sm6O=s9Q z`4wp*s3&wh%u>Z|R2@ViYwSBV#0H*QFg|oxH#hLsdzP{N{yPhW48kVw8K4OI7W4p)>3!8U($S zw9?`s<`;}L!pf}7o+7W%*boY-_Y%d!z40}TThrBKyXui^YB&nG{(+HHEOLW2kBO6J zx?c5~4!9L8dgvgX`;aUNVln>$M1eujdh?T)W4%{P#CrrrUjv*VLE!KceA~ zUouNcO40Q(eKkFkDltZH+{jm1I!f8U&zL#2n8ohtr+@XyWiDKu?;J5{h*DP7i~TWd@<9qtjE$ zlt(1G_uuOz;!^g!PWenZqb!b9M-tI0aUq>5UR;(TxuufpfSlw+@V(NDPj#lCRDse& zJQ~NdU|5S`N%O3lbo3z1m#vLkSfW9&mN1Lab#qH%qK+d-zyuxtrf*ZBAOE%4?i+kU zXgh*|1(%>t8X1Ni*gLSP#iEc}fa`yp$_nq*#Vl^bFw!CCul1~(NOLEiVh-1D>n3A~ zXL{!(%a!;v4neav4na7LO23TPv7+D3khgt~jdkc)h)Qq#%l)#x>8{uY#D%t=~ zL-e9lF3ls1Bxk?m(lFFu28tstG#stR;^3r={8yTu zWN}&7umdfwF71mnY}0SwiGjN;hq4LPR;4?*?JBj#kYc4!L;73Cjf$ zsAOR7W=SQh;(hl-J;wI2e;9i}>!0w$Ot_*3l> z2gdPY0j*WF&{MD3!~7C=igW;;ZitP2W0YVsg2t6}u8@iBsUX)Bnr8?Qi{+(JE$*KP13k5{bsEB!_6w7WMi=FwKAx0It zCdkdC92Cj|_Tgxov%&)H#ry5akNJoVGea^A!+PAret+Hf4KrO~FB3_=QgpVdVW5U6 zzS!1QOm}}Qk$r-9XSU-AX;Gx$Ug(*Zs&1>ta-e`xCcQ{6Hay2 z49gADL5L5*rPubFIJm1Nuv?P~={)Qrh<=mPL~674E8Ir8Em&bRYvejB?{Iyb=MR}< zzpMd(o%hRd#_qhyQ1R2dAPT2T@fVVB zZ6l^&h<9bj$*2%*Jd$d0IqvV$>@D==3$EMCJS)V|7EAGOd;(qkg^B2d|?G*S2lR;|s3cstgaM1^B47z0ZC z7;m9~14>yh?Kwhu%`EziP7OTXjqny`$%P$cV}IP1;)TLFX^{F1D49tg))y%PS-9O8 z(5`WS3@KQl2!iCvR0W+c0E33ZhNJ2iGD{bfI}2b?z)pAFwi&*i>O0;)`n|O!Q$Q_$ zF4C5rekyfCYNyT3-yLfo-1wq(MuYXGO3S_tTeidRm45AfU;9^Xu@jh6<#i1oO*kYk z=VALlo;R+6I3(GG=P4P@QX~r@R06!cBysB<%gzCynIQT4Y@wkaCX>)=)JxIFgo(0v zl}i@MH4LJILEk#L&sz6T#xQQz*J&?25PVmD<}O?cPn>+F@Z$OrD4R%S&)F6{8C-zY zHZj#GVpoJX|11L*CzLu#z@0q#d;uM(D8<()#c0N1+&&mpm_B(|%54>TtbdeWrdiM~ zWT$k}Pdgsc(6}ZV zd152RkCFocBv||>CE@R5gbsS|F86OA7}_6){>v0h|1t%kCs`_*6<%PQci{D5prCi% zAVUK+S>PB5nwz&qPRXU`+(p?a+M0Ejj0V>$<9MCHlB=*T^Ll7z}#^t5Pu}v6u268dN^7+vB-tIj}g1;e1)I`_@hyxFoY}nu={h0LgH@o-_pl3q3 zTjXwd2`&`oaV|u^Wadoea2m`S!6vIHPc%c0mOg4Gr8r~`g@%{1s_B5k&}0 z`!0*fn;2i7=JvALC#qW_*yLB+Jd`=2&X@ex1=4Of4_);f^9=gg1*=kK<9)-_!RCFO7VhU(NtB6gS0}fe0Kl*gcI6ug{9uG-Vd11~14w z8^^UiMo2hn6GVC`nkuNo6s!YlqM@b=O%kUoa}^edu~k*Bju393h)0qygE zSEGr0(@%BPN7q48s)}E3O2@T^h|^U!uC9}L1Qk`h`|=g6$^)wC=7*cYZ=LlwFuQ60 z%cpk#1bMC>ub1FAeWOG^vnj{_@$tKh*Csq`qNj+b1Vi?)W*vB={*bhpc-Oghwivsv z8_ZDN=?K+4Tf!t$Hn(LL zCE{WPpjb$lhfNKE2%(d)W_&l(wMEO#VN!h9UHbn6t|DJ3MAElTAgUg4spa}G0K4G2 zYHSln9&d1n_C|_dm&#Qr=Dj+-rX;OYTI>0*p8KNWn8dkmF?jbFgWI=1rbiYAK z9DBEyaq-g<&)}rk%g@kX_~>O%@G+D@JwbU=u~zu^Mg`9$vc)I_2$w2gyf1 z-WY<>Ia{0j1zVd6>#F!Xjk`*if(m4KCxU0eQPmk#5CbbF`anMYpOWh}OkLT=!||EZ zQb5~pi+~%Cptr)LsZhBRCm4hUhMTO(y~cTtZ$M1XE1gU4FROaPBgGi85Yel~&9lb4 z^Yj_k7fRWZR+==X>0S}o{Nw0s=dY*B;*QvdVQM(gFrGAx9`=<1x#^KicSr9O#2-H4 zm8bb5lS#^YRQ-=-4`-Gc*<4nx8@*$umRltfZhad=BpdWax>YE6_OGtaVIAE)O=kC# zjN(>(9y;9@!Zs)Jf2Sj`?w|8uv3e^jEx;bHSR3!|-txO7)=HKXVS|u1{1!FAYp!Rk zvhZI$?5p2>spYo*o?#zq*!6FGy5gMLSWnaW;Hnfv8}|XR!-t=|JN@ccf5m}RL!v|9 zhZ^d@xMXR;ekqD7Wjxq6JtMm`2^gZgF49$G+k`m`!3*NR;>?r7&uUJ-VkPx#;bNW{0%8shWF5 zjJhwQm_Qf7^$E~VuZjGVL9s)g-GW=S3UP!vkgHECZ#ab3j7>Pj4%dZzo=8njnkWMwX0d!D*T^VBvF>;YcoDb(^^D z%q4Xkp;FxOXW=KsugaqE>6tL=hbMC|3{*@jxC_WwX(nI65msNEk~1K&O`k_p{9+=A zSZ)0L!gfq{KFsLc+-iZc%!=Q?`sth4<;QOVjnYG>lY)wVsq?YIFU5;pWm*%}ob4=M z46=EJS843wCRGHyad_h_oCZtC0?|;~s0)TRkVK2q7f1dQB4`f1M8VjLF!B24ad#$9 zlo2M67MLFrPel4`hqcVr-qnJ&xu$rsbO1VzOSt}G%&vEK70sv?dwTF-?Svb0-R%dH z3VuGb&15@L`G)x`(~K9fiyy*}8j>vky7+Og$gd&5-v|;ttU!HEA?kcle2qgv{c8X~NJE^> z!O7qUz?6h+pAFdA=PR;RIPwp6kH;Iu=P_@4a)f*Zy_c;G+~ZGXN&A}UTjJ7_a{Lw_vQhC|ht#f43-MLt52q9GL3 zRkTL4e}pcd3Qp)Ifh_KsPk_hkIm0wAJ7UF5z5?&04xhI~?~$8aWZ<#_4IwR@mkCQP zg?_3o+T;!=y-^RsAs2eXYMiNLkGZc#NT}30?(cxyUhNsIA*>%{+JD0%T+Vc5@EnFB zhi$s}RTDIE(zax3JXS;Z-Df^LcU%*LJG=Lmztw!B;YMq+gC(?it9Qq+A!i9&c;BmS z)~?DwIvBt4=A^fbh;xTRQO4c2YKxW|bIPMj{zUqo`1y5GpBJUl@5(cu7r((Ml$0`~ z{&=rFBc(X02++Hr%q7K~&B(F~PrEl6WCkSl7=0~OyEtD~a)zOQKpq=t|b7(zh0ySq!eQ%V8p?(Xge zK|n%Mx{;6)X{5Wm;eGH^-`_v)TC-R#MrQ7F&$;L9d-lHPO4$(9lcfE)-cL6eTTE#& zH6PGtdDKA9J^koIq%ZNCGz_Hpd4ubugM6K1vO6%iS9Km95UV~LfcDt{IE*eMaKQC% zMyEK9AK|Mue@MO8{Z<$lTT<*2TXx1Utiz44HdvN@F75e}gjiqv&=!3{o^TlNE%qZq zQB|%!MQQD#e@2p?opC7nskdwRIQiw;+XHh~&SK==X|DicR1M>V_fGqS8c0P^-FD|O z-FR(RIFAiZM47+uj63fSxQ=Gx0JTPHxE)JCois=_gh@55p(;umzup|1 zN`!Y+%J+!&BPE@oQJ&{n;*bUH^WlmNE*ZydIjk!AFU}8`IZp;ZSEDk42JUG^Xz2Gw zgOm`;lot$RPg z`AQuL@-)vztoJ^UDhO1@9@1r{7P6Gqr^Mfmwc+yWThmdB$a^OaFF!3pud&)nTB|KJ zIrLj}oOHL`qjYArPu>e-n&Y-iRlj92j~1~w7u5J+a5-$nqHu2Pw%*nRR$JgyCSL`( zch!^uXSXg&TT!SGzOCgC5vIoS({e=i3+W^wN@2YIrSJdX_#{Eqz1|-g&;Rasf?hU( zv)84uU4SDw26jQr-ijO3X%Tss;8&5P54o!2fL{NzZ!)obeu1A%lJ8MFVv~zv47gG8 zA96{KvX*6DA zVt^{g@FSP>6_4=k4srT!@rs%K7S?VA^LyHh@4`<46B6p#b3$#&UzvVffkPi5=K$8mQ33xl z-B(YZ9OS-#t4ND8p=1_jkrHp2Ipe-)^^wL+rYnoXjk1n-NSP6!ahB|;Nr6I z6Sjf*t5WobG&6;1JL+0)+UFsbw>PGEh9}Tpo^zQx!wbSBYUp-+M{cTzA>k+MHY;N5 z1uQ3rF}F;PFg;n}3^T!^=RxL(j^0CP^?Y1O1ad# zxW|(7=6gj|YVWn1JL+`7Zy5=qkVKvH-W!XA8l$^&iZ;Z5T?Gq2Y`z@0r~aICHr@N{ zo8)BQoA*{qK5P4GWa8x*tFs;G6@qRitErbInUeL){@x07QQ|q@l&2vCdNhhM@h0(- zp|ro^-m6FD)%&8?Mhf-x6M^nZVzj-_YdR@^KCjUMS%3G8(=p`-s7E%6kNjLkqS|g5b6;MjbDu zZ)WW4{0)RS-pJiy5By378!V@jFSvQiT9inEoBOuWN2jnKWhsYIH2B041#puZn1FMs7@*1cmH%L3^o6UDj^Z=4-Z8FjMWo!h`*mJ+f;hYs-GL zi4DOA)sZ4@%}WJnikL3DPe35r=4xEs)td% zF%qLMEeTAx_7DGbjQNyZ8ue)=Ul&WCk=dxDsNHXG+E?K4t6hT7_q=*59WlXlug@dw zdlr$OXBE8e!|nzy%OYuaKi{eJX=IfJ5s8b1Ib`?2L~2Lgh_!cpde;tSOh4L6%%q3} zjgli5~TfeCHNn z3&I$y+oOoy4>8#@zc#33p%xn$#~nTE$h5}p+ciYTZr=(cBOfv9i+Nx!mCnnmab^|wXr-4CW$GX;9k3qHuj1VH9W57Cu;34 zq>OPn!6x;pA2RcZD5RsMO{yfn7q1jdYt777uQ)HZugz!15e3ArSI?VoH2RkrfU1y~&lQT3lXIcn$G|00U5h}%I269s z5(!mJZ+vmOPz{OI>G?(UL9sxijzNP4qp{x_rOm0FXM}+d)vtc;H!TC_V;kM}so&h&g&pcB zqs2ZZ!cw4waOMQghnMYgT<~yor$ehL$qdTpx6zSM6GOH&;RSdrR2aa@Hd5G@L4Imx z^98>J`!jE>nGYft#5}XOI$yD-<3LL*y^Y6L#Sd@@l&W&v9J=2Y|sLV{}jsq z>=w6S{DgTA3yi_4DCN?MU;c`67WhCYtF;lH$t)MmRVk90sWO6nqh4O(kzh3ehsU3u zy1RAM;0Q4PbSS%=ShZaGw~qjDRUdy;5FOR+MJ+A z`L*Ri(?yz8M(o;$uYMV~{OO8PoA;JoSUHDi3w9P#$is^DmE1d&zA1Z^c@3|FYEFif z%Lf&1YjlpVle6W)(8?x|KrQ|yZ=F-rOanH?3* zb;CO#1=f$#TYC1D!G>HCR2E_k*ySbrt`$ZmT}J9Ck?DEB7zXr zznQfSveEDZh_QzCS(h$sbu1@o6cRy!JCu@m;p?tca^MFN9%_Sji(JCyGU z`FW1->+wgUQy32n|1(_w4`7#$lt`nB;Bj>r)Y|GLlVPFQ3MTiBrv&bLtLy({V4F@T4(E+-!)Blc6s@>MDF<#t$ITvq+ZK)?vL8T@XC zDokXs2pi`%el1E@Z9LmZ`gzC$W*gF$SbhTuRNXT(cnPoNCtla6m=8q}3dIn2T@ z_&nklNXGOj${g6r61|^?A@PE9b<@hfQs1btgsd~PJ=w9l2kLh>LK(D7<}s9hqL8Ue zB4K*(fpTviDQ>aUbuY?>IwVOivdqdT%GuI}f5r-eNPB@B762v9?7}xc&i+G*p>N$x z3yWeOmy$dHKE1C zp`UJIirhXpJ*K}D!DOE1#jBCL73REz6wIiK&uIF07nb2LL zQb*bu*Q%wp{R)irmePNmc`ysa9ygE&g@H(rMrC|eJn7!eGRqoi^mP0%gh4RpJN+(!9odNqV_^b#?3B~7CJ#h%AD`@U3tgvI8l2?o1a8o9Yvw4Gnd8W z`mTMjPV3O#gTG`oiSAL&Nm@R|^pE;r{Pw60T9EDT+sI%tc8Gjz8ig|0PyDJn!kw?s zoxCxBq8FfUk>iT|XgxBZS9d=}sNyYKhL`k2j9`*BF!Pm$NVJwk|96%h=@{Yf3v5f_ z#WkgXPbDOESUw@D`qwo6;1@va=Z^J>7@XDhzeehbV* zxJ=~Pk7ywa>Z%W~020!Ci{KU6KQ<@zi>Ej~iEFpx+O#bvHuv*Vk;R029|wh$`U-H~ zXP5a-fOLPo76Sr-I!@O5=~-KF^`Q_@pt5~_vph6lR>}i3+*)T;-Zmx~nZ%kp=+XoG zfNwW9O~<2V#v4iK*JhT&oHOJ*(md%ZQ`; znp0a~N6Vdb+C2y=Z4swrhcNSO8Lq;$p#OY3kz`6kj9JPO%eN4fzBg(%bYMa!@cl^5 zwwfT8LHXm`z_g~BW+#-U>EvPLqNfn=7zIsD##c^F&Yj!lXtVaAn(^%Y%`YyaF$G5? z@}J^Oj-Q0x?|_A(tZcSI2gk!HPLUTPt$-}S;|eLs1s zZV=_jyez<370B^5YWK+suuey z3bJ#KZlnL>rBFzTTz*-_wyOo*8dWB8Wta`}0Teyl2#IlF zo&27j9sVBC1fzD}Y1mdrt0z!=W^%)9pRgPWMhj~r$~5r%1Nypm;JDWppT3nUqieV9 z9exRYRM7}P0aGK->T3FHQN>GWO+QrUimRRmAF>_N10S>T=10+uF@{K?Ruh>@iFvKH zv56b6?qc1Ka{&^8P#-)N5qm{Zn04 zeC-7*Sf?cU$Nfq6du_*OcNvMK9ro=Q%M&VAP-$iyL&?m0@$5@~y_7iMyklg=8*ti} z8`2Jj%ta4Hp5F?h5P}rO=5j^Bh0Bu45JaOBuz}u;;LbrK z6;ni|LV|p|TQf3q0$x9w+ej~lE3ygeQU)RT-u!ge3>13Nl|GK%O{tfbLbGLs{!Yup zpcv958!TZI7%TrR{d2L@EOoJRF(qpkX<+J1fyDZ>Me{n5|0fiYjNMXn%aXwK=*tPO z1$e#rsSch5s>58lk_`AC!p@t6~w(VNkN9wE9(h!QxM85A!2!b>R z*y(C>(x`m;fwAdifi~Bk$W`V@5`j*g>@-wKzm)hk2$Fq>qc|S216>Z9OEKbQx4%nR zY=gQ*UJIuGPVg5wF|Ay>EL`28IhSnMY_;v1_Xad872r zCxg-K{#eS8!W9=r-Gq7ar3&NY$^lx7!@xZ=Ye3QGStL^_W{bYI5O#xz^O?0Ewl%h^bsn_ zPOG2#Om!0>4=2=P11rjdD21%4H0fqTrHYVxxW9l-EHgt?k?nd#7+V_r{+X+aa}0)M zvbRclQG;4~ks^M0+>e8kU*Q+24+%C&1zJrskU~;tvqyRpc0v_Ub$UVVp%5mfFTyEq z`UGC9y~$p5(m0Ut{s9z7F!Gxu-P!ip$0b>1x_)8oE#l9@zdkiBI>|uIwMr{!j z8Az*HwQVvikoun2qR(P@XD@g#m|x&xuu?H>w_jpMoMpAi-m)CUCX;HG`dEB*Sl!(~ z?$BW?yiM4hv!IQXs(mq}-a(uzer0P3R&9E}w-h=cmahn&rRY9oh1KO~F695h2545&OmqR1`8lB|K zETvyp{|z>PW`eqUq&LJ3kMx)8rb*>H28Un|W&aBs=HiCS=|Ax&P=SsQ zvV8t~OOfJMBBu2jXlqJe=I2Ysm8_TO#61~ogu@+d-&P)F@{jg3HzQHm?^W)Wj4yQe zlz07>5m_+DrH~#tUMTPnU@Wao4W3hU^u27}`nHvNuvH-G*CDk09bDryuKkJ?7b0d5 z|3+L!h5mT(O^_MtuxiC)bneYwwcO4NV>}1*)<`Y>n;4fUNw-!N5Dnw$!ZZZ{g-D~? zp^9uM5dT?Rz$z1Z>8I7~6QWmyoH{$izelk2yzYeDfO%H=P1W%bp+L$?pfSZJ%;ULy z-Xr(+iz;H8&r_(5;I%A-IQLdCUb!{dG^8-Qha~U)$`~!0(?U&e+&!mUS5zH?l(V3p z#A54dj`}vQbR+(*|2#t{Svebgp#r= ziUAD0c48#IHVroP8HNb*Mya@>jI^|PX(IekjcKa5r1`fIRZ#UpO^x;_mEu$id{0-9 zIw2YCf2+aoj`BDspqk5tAO7g6VBjHr`OmhB+8)T)GDI|}$sB&o8dHEQ_I_m z6)sL4V#Cukp_ATJ$HC1wb?+3Y7oD8(1#nHhMkP!H?v_)^v`iROP^=1!Y>m0%@-%YH zszxZ3b?@446darPF|^~km3gnSwzNfeF!U>40C8za3!w!3hqYvqJEB!N!Ngg2~XE3~L#X!X&3iYYq)3U5{TsYA+F3%7lla z@Q#8ekCthQzD})ff_(~QIf7Z4RW3adEAmuju}q7$0VuOE^SG2uQ(8py^1Vs#&yOTqShAfa;QX^lpB zJSvSUWvwJPdXifR*Dq4|h$QkO`zg34evxc#MC;_C%CvRcNa;Pqu@=4Y+^bs7ju}al%unz@dky`@_M#iLsz->p1Se3@(Ah$~zVOUG zaImb|-#JpRMkD0M_anTu9mi5rnOZ&lZCSG323?UI&wP3%O+iEq>FOey^q~Z~_$)>> zK0L&&yeTTz7JRXV_$0AUWo7wgPTTU!dBQd8o;F#9_j)uh1DUtuVT_DrF-E&fXyTU2 z{twI(AsmtTMYLR_tLU*JSydPOreTW0Z0+C-L3*JgNyUyY2`a&~#GHbaVe?_xc2-Z2 zOzTf;Q%_B#Y68t@++n4b2Fs%dSqd_88G}Usm27|7Y;j|16%X1aDhmR*n zj{@z;N&E$4xuOn*L2#RD9~8vzdS3wV5bjrCGa821Plerc~PpvdKLKLrTS}E_IWMd2Go6 ziG8J%Mb#4{4E;Zr2y#EgKA4h_w4&JYnKMF^g~ z>=dMPsd8H3?ZNsPEEtAXC#$$G8y4mN^UQ$E^a4B5_EV87zUMawc-Wjryv6n3JquSK zo69^IR__Z96uZpcvi()g!$BBYzY7wP(k~vcHWObyYWKT6b%~q6TMXEEHHgf61^EZt zRO!O>Gf=M9F)}W1ug7n`dh-b;c}#~_(;rBQT*ZaaYxGv#uRTF zkHow67nM6TYneY6Mg56GZ#f6f^1)l$w18LN>!j zS9C#z4>eJ3F!{~|`9{K2$)o_nFDzKfjDqY1gETJ#I?A9r3Eh1>HoSuvop6f;Vwi@w zHB^x};qTHZ50VOjhtnI4UpbYIpo#96$6TRGA2?Ca4atN!5>`R z!Z0i~X+*xiX2ldyN1zw+HNmwxuAnhn!46GpJb6b4GfkbJfJ@eO+r>(fmn*amFQ?QS zss|d8-N!I0$N)iL&ze$F7I`_3H)j?txLxpqo6F+g>Hn;?-u+!H4#H21!BCM{V#F4y zC10`D!U_#nx%1Qg2E&y+;j?b`3hU~B61d81yWPR}Ef@qr-#;rRo+_{@J_E2IDq!1u zT+a1V4peww3qqRK$$-6U`A zJz#Ju+8Ft{m4IDc=-gp29dbaUQjtccuBwlYz{4PzA`4w~uxR%C*cI^!mPejMS9oIO zZu+7p|IL`x0#^-jQaW+4jt09bnbx4fB3BKn8u4P}8wD9L%Q0zv2}+OIhIqov3=f%d zX({eI)OhbZw9h^j@pml5GZ23n!G&KhAnhxD8p#A-x!1GfFjVLTFgQm91Kl5GmBq1S zOa~8+npez+J75Aa$nDoJDJim-g4QE4?5H5RbWmt<=pAmFAtKk6IU|g{5JMG^bdT7% z=sd4~)6&C)pg~u*z+`f(YV(dU{D&SjDgBl{e9D*n&Uk&Xbnt~m00G#W8Kwbyd(F*n zv$fL$cg+K$4|bxq$ZJa@lYtn5V3AK`yF>Cts}dIS6vb8#yl(^DA*4+b3XsC; zITzT^6X+CgWdv_}@(#&&*VdYSDtdF8A66(tq`suLOOr$^y*<$iw_<oQa{tIzovUnY8_7g;|DL2YdnCaQT!n)cB3o6&fw9gy%$mK43RMgDML5Xm;ZwY=(z zVvk!UtyQ_AW!4%m))N}>A(V$PsOzy5Pzv>7=p{31f1@vAQ?F}ZN;cnp(c|4TGfiwl za2pJyd_>fu>eCUBa$u~ol)(8*mCSqPhNuvT=FYEEX{^FU_m>xXWosWd`#c{if6dX- z-ib#eLtOGV$E!F53 zkK@Q1Pj{3r?j6T$iRK`8v?iW^b7VFuhuA76ij3~d5S1vUxQrxs6wBb$ce%b~ z`4znUB&vi)G&uL}=IqO*S9X`%(r|LKL3XVqhamk4M6CJgDIb|I?x5=VpG=bv2)B&f z2&0eyj7weQ-63Mg-X{~!H5=Yr__S%Jv8`?*zz%{2T5-JO6ek49m2&|?0|Y94U2fhb zSSPHJU6)vt$VqZ5bo49slG%EaLr(+52F&|qo@IV6wS;-)iK_d4 zcw=$eC`y>1P|5~y;I1N?h+52UV;_z`LJ5)qkiM)sAY$X)AwK8N*pC+97-He%@uSVsD|D(y*PZUUc+v<4fY=t zF8Sqyx&KpP`Br1qdLw9pPEQQ-g#^^sdGSrl&ao{>XGJvbT!SLhU03v=FWRc+T(_wD z9CdO-J0Q*^w&f=plFv=YEEU`@f=Q8E5`)#-PN=t5jffvq4LS|57&rEOT9Z+cy;?FY z{C#O4V#%)-@WL4&r$PYV4|m5O?o8}7HvEN0cT-c;=AT{wWp9T$+{p>PAbJOX=g6vZ z@u#w)=v_5Pa3))5W?X&A7v1<1x7u|a_qwwNH~GR1PEWgOa}Ww6PJ{-_NlETL~El##n{^~VM(FmnS^k|R+#qbouM)M`VL)J=eKfTSo_JRt2<37mq5B5 zG?JS~biHbQMhl>-@akcAkzJ2~R{YlZ{1$J%CrDaVzoRPT=kY+Zf$^_Onba>;ZoLV1 zj|XhGp|MOKe#4cYe~z{l(DfBKm2+5W!5x1xLevFL2DFYX^P17s%WTTte@iIgHplgZ zk=(nBx6%3vC0gHXax?|r*5?(ypo)o%{chVm=6=Wwruh^u1@T6t2Xp)%q6MzF$@pF5 z%;(VUV?7I&Gm3i{1S6|rp6iEe=5urb%j2KFNcr~TxfYnIav`e+{7mzE2)g2+G&WIV zCl?|2Pp1tzEN{Y=Jzz~X?)xl?s^zjPado3)E!n>XaC#*=SOlZeHs!nB;#_zL(^;D} z*4kkAl5CU{R}#hGB-9&unQ#v|9oUz39O1SW^gf9|3a?Qt^N!&z4`4S(;1f0eFn@n@ znSNwgQcVeAw>9YGCs`xn`a|%~B@-oWAW_N%2`4FK?j{eFJbnTW6pu4-?2>2q+QF{Cu`>y11v*~2{>7oLnsQNK-3T@~0}S>mj7z=PI`^C~vx zn|pK#7682;xrnE|VJ z=DKotlbXX100u>N;{(SDnoS%idl&h>O$GmwO&z45TG8Q7eXGp?xdDt7ptc#oe#+$I zxE2nd*cacoLKb*UvQNR$WH!sys4kYuUWaj^XRE7UeE!CbqW^&cOrR@f0n+EkdA!#8 z9fE&Zqmeb-;&&oITW!@rN}+i2ofB46aCYpN7qTqAPgrp|z};+3sNLd)%3B`^hKPf` z#j4`;sYuQ}@WBCPCPQ;}nU-_l8Xn8uPHg+NEFv!{g%3-S^|GtXmZo}iFZhFkX!ZLi zr)0kH1YBdyqa@6!OUk6zB3F%e98oD4XLM-|p7;LnL_tGN8MTeKB4KB1FF=f zgBHI}(rzPT`AgQ?anN>7z7Ho}mu|kwF>dE-`z??2w2nJLJJXyWKQ;3(NwXY; z>-$~m=ObZISHqQXt`LM`-dx6*j6uICOtgQ5RA_0e`Txhj%{p=flOao`m7oJP;GX4M zt(Ud>m?7Q4D_bY&P;p))E)_=WKS{`Fj?ISa?nty5KbQI(FD-0)+%M0ek5lS?Rhg2; z|Kus7yu--`f-Kh3fxh9!tsY#L)~c~x;LN!gVJ{@UG;v9#CvG{Z z(!|&SvCx#-N0hpNY=5F6_uW!mu=x&7%~z*W{v~yvcSw*da`9^6<|zDFUgsi1GAJUkTmhrj3>z6Yv{5R2Vi2xBWPb7R`W+*Cpn?q8ViicTda>xx?pxls2>k2e)A@;A~^M_NYwk8m3>#`9= z;5IAyB^zxb#>Bx+3@ZB5FWYN62!&iAhSz-``nF!R{Qzd<3`3l&hmk@V;#h^6JW*y9 zv0y01S4AulmZ!djfg_Xp2?w#;0t)jB$pPNYOkF7Uf`9nN!9}?^lQ(&v3QSwyItx#$ zr{OVpcegW2@Rbv*q-Xj?Bz#;#dzEbgC+Q^-`kJML*Axj7FV9+?1__Xu=L?}W#SoFp zs?_-aDKv^6ekESj1T_8}A~{HBen-9{dsbPfPZOAkEj>37<4fj0oP)l6X>Ao z!Wx%lUd)urRlv9%cZsZcm4PZbzc2+A1IM&l;5 zs%6lTibz6vSd0}r4{zSz#>=*GqIG-SLTrAMQKp@nl_zosw?R}0X!T@FzHm_X;az>R zPzM1$cbd1Stm)H#5X2<$TNCoh^yVweGX3~Mf{Dxb8g=DJM(gvh3CSKD!p(0cJ;PPy z*;H<3DR`0Vu3>^Ma<`Fz(z;K{jkP-X*XBd$Tn10z`!giNP!I&EZ#6=$*>e@QT;LsW z=2JU2T^eE<4uX7pgdOe7XWlOxHk&_)_6$$x-ji>Jm3%w ztOFz}ZWAB67}ofnr9;&0&n0_Qdej78RLt1TS=Tkx+;@tEC~YVr9FRb$S+gupg!7KQ zR)S9X$JU#@*)zPa>lkC7QrO#l<3N^9>iy( zGklH*lcBbcBk@YGOK$b=b^oHa76rpk5<^r3-WXD=pIZmElSStpoWp*%;`7#q^E)v* z#@~tfC7RK}%<%H3gjevzv51(af$JRMt>5>p*1c9rk5#mKuUms_+qO^1K2~Y8wBnwb zyze*<&vyfE1jtO+U9Sy*+YLx4hE`0(&`)|b3NpGjM(AyBO(|i{@yWczK_^{bun@(t zxqPK3biF>uSgWtO)oDmV|Cp~U7>(>V%>jfQsPiu6hruH%w!1#O-D84cU&WH)jJ-Wl z=$eIV^m_`?ob1o;@oQ5levEtEkhz)Dn(hu6Zcy@3Kd1qU6{5mOb?7Z?A4*wJ+6>(* zha#T=8_VQ2^I27kID579xub~`l%JP_H0~vSn=R^NR}l!ssg;1n0V#Iq47iEdrGxhf zYLNE-@JC@_i<3)x8?SGa>Co&>(c?QIbg2$^5D^#Da3<3Jk}W@Tnhr%u zp{HycIW(_^xBsK<(J*uO)dnX9UmYg_emoMxt2=~(U^Xc!k^Ys7kI-XVKri>!|3ftKM;YfT_KTdFwbyejda zyAg4;Cyb}Offw9z5E^R2&s4TZVM7R}$d^KA4~`siJ)4cwv~%V%&JQt}2K`9BHZe9` zXz_*r*>6&Rjz2Cjw)$i^GWJr*fdTX6$6(BO|8}IOesh#M%=f73E~9K;#FprYjekDz8P*C~~z|+Y)1t<`4~q z5;2gOO~%Q>NjEyPJ^p2SX5joPo^J5ZZuue^h;I?XIl!y|6o6*Y76?4Ucb?l>QQ$_` z6SCX$oq!j(44NO-JuabSQ(%@nO};UYIbgxXSE&TX`UYb391!L_KrQwl|7KByp&2J! znKXs-+_dT2^{udLae7(S@7TF-CrA|iLR{Gq<18{(KTZgM3SiR{W71)2C)%9PEpq>H z%oKJ$aJS{*Vul)==O6UX9#QOYp3!C^6HtPnL`mF~%58jZ@501{(du!<0H}s^p4JnD zq(H}kjv<>>EHKhDTodr*@glK}PM+#CbhIxf%)7Xo9l*htImvom!wy8wp4DQ5g_hzY zHVE*rbvQ8L*VU#-;ut#IwtfFfca%QBA^3bmV66QdARP0HhLfa``FRj{&m6p5z>#zZ z3TS_>?gg!SdB?PY&wGU53&k(y>!J6ipmiF47eg}ulB7#bBBdYm0+w3}omT3&-j zETlv-XiAfl)+?xur2ip1rE=dQMb5dt$Y9nS{}a+u6Z`r8*cu;c zC6H+F$aM{FOn*jYGNAxBiwPsmN}&`FmrSn{oSKSfSqEw5pmXwkCV+l; z{Rco)A;8FtzPb4}_*-qtE;Z8cA6(a=0&2Ox;dXRy*Sm{L8f*cCNgXa?H9TKZR z1VwCI9JHjQWMOf!PT`Z5f~d|=4K!B9aMHQdNNA~4)YaBy(T)&0cX*C%TZzpDxrb?Qs%W(04) z$G3c{d`6Z+FjGQ8V8LX@VvsTFy8*nvfv7JWU37;9SfnW&;}lcYL~CJo+z7Ku9q{(@ z3sd}*UMwz$O_X31eE*-tiaO480s^PoH_UQGG3CU`$CY-D{q7dkz*Funoj^lbdK=os!fsEVK79HrWG&751iBI+S zhEOOEM7>e9zD3z~TdMSvJM15C0I+`UIjE&^h0dMBq4zu0Zqo19wSU&N_%p%;RdFQ& z{{s(B;1Q3@6Hl|X2~VOi@$WxACAHf?X0pMvTNWbbbo)OaS&kHm4Ozzri`)Zy1$OsC zNkDIc4vhH5cg5@vdOhyJ>L^tcny)W1%zXXV#M#7mj2HN-_@8_SXQ1Q=|F@I%<;FbL zAqf(ow=xo&Z`#}@_5#$$Rl ze0+vZ(z5>wyQw68=Z?8nYNYf(?mY`6|MJ%7pN4C2!4Vh}ih;4X)PXbCE->9x&}V>1 zuE%r4KO2z$U?>r5ertqsIh>JbiH!dQi)Ye-{++-Tywj5jos% zuPZ&>Zy(`hlj*AOLo+xL>unL!*@3$O@Sy*!a-mu9_OBOzFzx8q`2^nolaa$Zs+`_2 zZCCT)-Eslp1_}ElJzzsK@$<9OIY{Gzk{Z^Lcgq$`sR(&#m zmYmXC7#YRSsd$Dv6ZsYEp8xX^ejiDh+^pH$*&KQuVzgajettCUrw2S&CIpDe6Du-h zfI9?HGWiI^#>2E+s2ge65&xMoUwZINEz(RA&h@>6PI!9t-;e$5iQ-t>HE%He^cKgO zxq~nw1f+NrVbsNdfPwSsH4up+D95TGcIVW~o95M!G5bGb0XBYi%TP=Xud94~Xz+@q z|95~BWXlhaqpo+eA>-fdFDB;vou3z0?myb{1YbZQvL*P0q^PI}gyPf~)vLXMk*p3$ z5@=rMg-ZB}zYX3eM6NyObnxdpA&id3UK|iIteW-06VHs zfOSZ>mbp!dv>>{Cx-o<&=-pz0s7^+Fe(<8@l3=WOl)w5E=C7%U0!zuj>`o(MV!fxc zdx649qo3&vo%z#mFHI;bUts?`)apj&0Z~tI0X$6Z(7%h!4Q#>kgm)Qv7AX1t(Q`Bc zy%KP{M1H`IW}}(B@j_u>SNXsI^B?IezcdW!f!k8dx98Y9rg5)VLOwe95&jiOQjwr> z-KxAd!ujwRcSW68PXq-y8FJu8WZXgziEn;T7w?hrIbm(WDnenkA|Hv$P%KXpa1kLo zVlWr@^H%Y`++HNLC8WV@SJhb#9KQOPL~QiVuZ`Z<&a9~zWB~rS>BhJL^w9h}uMCkL zca(QuZ$#CXY#G~XMgPy?+DrpFAlG~0n9cCl2ophmpXB{2 z+YyM}3dM%gV!&WR?TDFWD&y`+vQShr%nmPPh=VxXjb#&C4M~N&n;pZLxJ-Ic2u#wM zoi12l4iPv^0+$}KYX6aYj6p}RwrESt?Xv7b!;-_sgZVlZmOuX--rvX)H^#xWO* zM}b*ZmwI+N;y0pX&=IeJc+O;j!Q@yX9wNN;S71I20Mcnki`ywv>yQ^6#}y?_yweIC zNC?murU}r?$L*9lF7SF{2C=N*M*vtFeHKYZ{zXhdB}#@1A2=7zd#M z?4RJ4LBV8`s&I9)6$H}r7LYsrwc|g%?dW}~DCZVgF>iEzwSI_kQS%X1fM@ZrB=d5W z%(2P-qn^)_YVmxFaPb+Aj^bhN_IKIOdS4@ao~}`k5QKb` z9~HjWqTb`*V80fdLeZQ2P)#3M%k5x#biO~|#EwbWQ@KE{8RlSgShoAlB%2V0+46Gw z`>fr+SxFP#uU(bJ@z*Dk+Hg%gI3PA$U{KS(%J=uDV>~GyPE2j#*kT8K=#0YdnDlrv z1L&=?xkJ+WmtCo~U=pZc4-bL6EQ6|O%`y>W!al!e`NasbNTf>(tnoE?2PLWw{elzr zK?mAs$XDp5+>FXpOjh*p^&r`f@`+E!`y2e9y#|YlJ6hwb4Ve$LiM-!Dv~BQh-D8qq z{e0mJkD7%7*bIMZ8fsD4og}kiq9gWQB4#5a=)0@CTS@ha315;?n&j6d#{0+gTwpJc z>A1VP4!!$-RDA_dlxy2C-~vkti%3a#NOvRM-7Vc9AxL*gcQ;5$hbXa1cc+xnAuS#M zvwF^XzkgZh+p#R9K$f zK6$K;y%V^wK#$oE2^Gra9Y}l7UAlys*7TJYNNZTlp<=#X7eG#DB*?6LElNuNdXv)o zAdBIdMbT`%w1}=q=NBJtLMxuZMMx{VB>H8{J1x8wK^AN)Wq{w|8RePl^Yn9TUGxjwfgw5D6fxMmeKo8FKX}`EXWr2w*kBJ_~t}c0yShq_P zX^$jCNsws9T`w-`8<_lK4Q_3p+L|W4Z4G1e) zg{28X;^|IWTfbVFNoCfW5Vg>lO7s&dJB(P+>u%CSm*Dl;T~w?UMUq4bjo{rk&7KZ{ zji5n(iIuO{Du?;#I~p;tAhL3=xrAy^Su^{Km*&D|MGZ5yOHzi}m!|J#GE4U5%^VWX z7ScSFAtN-#f8`tS0bWKyulMP9E>)>U5t|z5I(J_JEa(o4o0hkScI+dddoz`9&UaLO z&L^a2pZ#8sdm{JbcOOYZZaO~7U&nwua9m~WyHlXD$Vi9Qnm{mC5Z7d}cO$?4Q1tOR zB%w&EIn*KXH(YBf9p>U(T(W(P$js&9*>zR;#T!Jc5Y7~IBt{|;SoO#0bq?gt}s(=U@Ul3MC$D%Hs0HY2WBmBM`*Fx(cUGB2j6L>ZQv$GMmD* z4{GG67QCppKsxxGfQ2H2Zo4QavYGV%;3gFsDT2-Bc!qd)^?9O``Xx6ZKj8nHFu_8m zAzAjP*jPZXALrLxA@8*xJ|BFrT_DS2G+pnnzZpx~Rx4Fsz3kE_U4$5s*t|pubz@X$sfK z5xb*?ym$-$eG~ri#(5@fnN9R4lY;Y8#F_~fKI(ddFlBd$O8&xl2%-g#@4IRT(SA$O zI5Bu~U9!<{-;Cf$pbXzX}L=T?QgTr7r>Dl`2sOPkcK5 z&mUbQLAWh?#;X5t|LkGU>~d)x5e5hy3t;aD-wR$+1FlG|#SpIjVk1fx&rlbp9YNwQ zMLBc42A*AH#GAjW4e)Xm@|X#T6L(cMUWixF z3ME;s3sZ|T7v-MghY_$-b3!PMy?>tFgUR(;Yw&Z!txC|5^0pjPU9kz0eJy9kMoC30 z;P~e%inv*D3g%}KlDdz6!@t~PcPhO_f0iObYwmX=`MZ6aDu-EaOX^_sopGJ1OPpJd zW1{Jp^alYfYTgZKK0t`~Cx)joX$1)jywkkA`^=I00!|1p z+qH5C7A4{uDm$Ch56KzWNpo9mQh?X7CP2?suR=KLr@x^_Y5BBkLUk~v`^nR%mWOo9 ze|=F_*?co;lU+4npg zgtq71*sPnIbhE!>(Zz{ORQzEOAmrQIO_K3tW?Z}r%R8vMN%`kwySakJ^hX<<8Np)s$6Cda6J+Q})qq35@oqH)lw&Eu4fWvLUpZU>tH=6hpw^#+=ai_6@rddt&)lPmE7h|hF-w%6*CIxR`3+D|DA=9^ft`IB$eq!^6s=`PRojmJr3 zO^po#g6FY_Mf&=&Zg(f+x+eNtf(|xWRYEZ3vIm_ZBGJw2EQ8uT%5RX}H~fsmPc&cu z*m?QV1<}ROkAzWi&XrT4lZv`|DG);ydc5^ccLV0S{C&M3t95(KyP|w}x^nirHqe@Y z0x&csK+4vXo>o!>HpN=ZiHorDdI}g7eqTF?P}*^Vq*6n|Y7vFVE06crUb$fDP-ozY z+*6ITX>WZps3uLMa{VUBPU~W&kzA>c+jzZ%>Qefmcy7pfW;fV1kQpyvW852&3=Q>t z^?$(^P8_tY7QfhPkF{k?yoaNE2FAQ9L9o&$nL;K7yekE(=dnrK@i#PlPV0ESR!tvc zz2*?NU}O^C#;a!QzuB~WdoYmz5)kA#pIa{+XwMtEq}ehY@Q>Jy+OU91o*sg=PI*ff%`aNh#zZUK9lqk0g40cGi_ZDJ|Du zn2{bC3IF*mqTl1P0#V_MzY<1+Qd}ZC8=74H*|l$jbEQ!QK~9&2R|Y9!8sYsI$e<(| z_rH2afZ{XHA5(Pb$5o-*-=~{wo}vS}CTJTlF~nALmd`__QV0U3UNsx7CxDCA6zEjH zY&yQA0!2iFgs<|K3=5w8eIO#13j0;Z9ro#y@_@)T#eZResWS}F=Z{y24S8?Em6Hf( zv7VX%gs#N~OhnZN837W?hBRyyi;S|eG?1S<%QDTFyS+pVcNpP+l5XTgFZh|HpN}cPFXV2;IP)BldBGv8Rs*e}_70fFDaAcr}gz zeBqYNc_&_v#I7ssR%e*{FW@CoWG~S@FAYt`a@D>Uc4IWWxoMZtjs+piZht;R8P0eL zl5LKASO_}@?`|QW3&Ja$4|27Xk@n1r)@{BNnG^33{vRcJAfJ-n;LTVOiS3y>y+6x& z3PgQ6JU|dT>)R0>3#!Tkd@LXV2883f{#DUH+6VB;0)Yh>qC=_BCcD%%hvqnXtH29* zps6x&%l_&ka)V1gnEacmIB81Km0;DCbSjR^oRH5#Wmw*`N*Xba@^YE-) zy~%&|ME*EcIFq8bKFi$b>9Z5cG&(lz>aowu?@h^+;REwH$_F*{a#le6#b*aROYwQGxTovAP(nj)!!|_?Vw-(DrxK)&I)s2R$FP2>U+jxzAh&+-Q175SLCWjzAf@K^N#R5Kg34odFu;c0FmZh| z!j@e#f11K2yqH^0C*kQSHyn;Z)Zp<$;6MK4psE!*4Xb|Kli@gzW(Kkw^ZbGJljIC^ z`DbeGg9?!YcCJR0@GmH9WeDe1l6#xbJ~q8^Xt0DT(c4TmmdE70?D~_`21Y8kvP5Sp zCIu+U(nq`H^CB{#+`E!>>*8w#ZYElIzg-Vx2RLOlZ)IfMX|4v0ViE&iJtAxg(YZ#~ zTt9Kp2QFf+@^$oIyP(fs$I4)pCK((1Jl97gBz91Ld7N@R+*> zN7d2Vs9969faaDE9r>6vCigxDABgd)rVhs-c~lB7{YYwE(|ClThb@7h*^i8c0-Rfm zt86yF_R03qkoqGrddV4MztPaBBQ&b1^(8X_@7+~46#IDs;w;etw*G;5>- zk_zhQ5Xh(y@LBGDm(-CUGRHod+$19N_Y*-shcvss9cn#KwQFS;8?8pv>-P=)zEmJ; zpFWJ4j77cWt8Ve&zDuvBZHjaIMI<4;BxF(EH$cWBjv9)!3rVb%+KNTOm9?q1!QPL3 zFDinmJM^ClGC2eZ3;H0^KFD-e6ex}?Yu*41Zej)+lnV#UmD#Ee`kW7n;N5Z8e2Wt) zZ1=*Mdp&QiW?BBjgUuUz=f7d6=mgk4HT--qkEMfd@pGG{9p)sZ1o#+-TvQ|(`Noj~ zsbBo6{G&z!g`Pxw#C6#1DXWo9y@Lz(;ioxM6KjfPuJv=W)OR?Zt0Oy*tOkoBK?mHL7p@M6Du2&W{@| z5IYH~2$^IZ3}0FHV(mynv?k;}e=4m_-}sYU3aV(Fz}IEx{azCG^o8PnRS)z^>84n( z=O2N~5sch5(mEF?eyVHlCQ;;_*{4=3b*zQ4eLdRdCFHl^E|4$39jZJ)57nK zkW`=0C+hwHWhZfc;ja{nSm}5LH*RGxoumMUpfbG3$YKoomdXajtd|;~Pz8m&%#u1o zHK&$~phr5PrB;_b*bn~zX0G2p`?jnO;Wejw@Fg>y#59%8gHX(2KY~)_?uNc$n1I=UP-*5Pr&}tjq`F&-xu-x3I&LOd2OFHYClL+T*wfR z!jycn2)xz19sl1IdbpKTZ4>XGoKgu&uq)UyB28_?nXA*?&dY1bT!zPxwAo<-(WlpJ zm*=m{uNe)Ah}gI^g#-3Y8#s3xuRD?hCL2+^8o!~DlT(C5&uLk7TB4A@q0qw*i46*g z-=yFK2~$yYO=gSr7don`$sZDPMbP#VP~H%8Y*fQ=_c29i z+bK@H=5xcbT<)n@)j@H(6ztZW@t!JvG;SHG4KLMaaoLUO4K+=qWZ=d6!{|&rU4v)j zZR4Jn5Y4{Oj5&A_l9=Npw;G2COpzFxK7TDPf^OIAfXh~xL7pTg7o?c1fP2r{ee)^e zIioUUoqs~eiNEL*ibyH8)6+vq5U8^b6xz+c?2!Q^uEjF%6#VbYGkxOJgI z2Q5XZ<@af?1Uma)=E}f;h(>+#s*A>Uw?9h;y->(KscjA3N9hQy%pu&b}t*nvr zkHUq9pirYxir?7Ib3w8apF5j&M3A0d6p1$3Tpa4;x7;baYas=ty2poO0oT;f#e55v zmM5TrsLv7}bVnVJi#{VkKiux$Vbvt8gd>+=Ytin;Qm(5Lt>;gby^zo-1S4-BJr`^A zBa4^%q1q6j*J3`R;y&`jZc<$Q#%(7w(%A-|*`kDBgpT@~1n-G4O*_YipLB*(l;&Jk zMSLP@u>4VEjLkQzmIwx521#A*XT|#~NTFr>4u=wzyX6_2?6B+ z-~c(kKw9zBCm|VdVsiaUj%4(}naUFTB*C?Zoub6LwC_=gBWzmr9F0bh!F#UozzMmi z;0O}A*;g&kqcd?y zP6I@y>4L=?@;~NLVP2&X$SHi4zR>@`{e13ld(;sa;P@gHVNF_1kjV8V)X8b2&l4g} z6QxsYltso2e*Xg%jetgXz&8n9t@ut83ybVq6t~aD8J=qZKLw5r0l}isFZb*v|8*{? z_<;RD@cku4*f|!P#ETA7*U+koZrYyh0+)^-iy%ll#mX}W$>i9jrJ@!%#jp)q{F;yJ ze`c3qzz4KK%J_egRgdk1Xm6vowg~Tm9=uxET)h3=i+~^BxdX>U9t{?)?fb0qd@YyB z_GS^4hMa0=6IQD>{|22vGX<%CGV(S&uzsqiK|OuugaU!COOoW%w%nSE6V8?$5*7;q zK|DH!_JAYZ#{v}Mn?&V$S)YmnE4o0pj&)tYc%Auh_QP2jc!l5HOXjNOqz`vt&uy`_Y3xtEK21 z*iHnc0ZO`c17=p^RPl=WPOBYPbDa;NBtl;1#*NJ$gTc6%*CUIccG(4*}1ycz()`3sfq$pdifdpDkJRI^4Cg<-cBa!O-U138>^H z7Cu--vQ`1%)nx6AWajc{`s<8`DLWXzbeIUpPvL-cu)qJigHfUX(%3M71E6+~?Gcvl zL?;z3S%{v0k_PrM02cHG-t+?NI^fU=LFWf3(WJ_Rux9~9Tk?GKAS$`8H{Q43hPR0R zhfp&37gXa`np6m1J{NeLWpd*Si32eUs|Ub)!}lROfbA6B#R6*KYpvS>)rRtY!XN*D z+XRF(nD{(O_X1OTrl$WBeBe>kN^1$%eYM`%w3*XyU%vUyDFZt|4^S8^{sd};h2}0O zg1>QUJ=w;~lQw%r$6%1FCQ-Fcq;NiU&#Vs(PJcY73X!M= z=?y)&ib-IVr@B50cm&{%!4VT~w2dnN#VPQYTXAhJt9*yx6s_=ad8L$()f09OSPj%? zEG!pxj{j}eYjim)P~bV7L`>(2v#1}$;pYJ;&+3hUDGVkOM(4D*zNnu3`VEkQ$dtfn z8E7?^GsV+mmv)ajnh*)9C%G>j-ms7v{nwk@uSj-aAh@!8iF^^A=5O{lPfGw>Ll_n= zmL>xw0(06MVkzM3UqfrqtH50q&UWL`-L8w z8W%twh@|WrK&*CHnfKx5x+*z9&mGPB?NhK0lENbTOX6$}25JGbEA%yj>uaq1`4dD_ zbHiY&we}2XqvoE^Dj7Zbg;RdQD_)sas!cq+{xRGx-%dWL&#rRGa;F;>I2+FXnW`88 zP%k7~(04<{w+gv?fM@-UdfcT(#>huSg8zuQcH*(ADW_p!!1GJK?SRd!u$z#%*Eay8 zD*%#sq6*A(0ELCw(t7T>{`ae&8iO*n*t`Cxf>M>&g2mI`+zdKY;on7UB6ooSDpw@} zLQyHG1&qM<&>nX_-eYnABfVdPPxE`Vq%IGkotxyT|8ycX%dMS_1^)UTI40cSgYaIW zEghE`7z9Lb&x{1u%LvyX&q}}*Nup1%lj6KFDL9XD*O=;79q?sHr=w3ywev z?5T6kY{G$_atNbBQq9ro`*lebURYpwDiRb(+}bStNpa!?y>G!UKdUg6N9K$j!r@y{bO*Yu{!v*KuQc#}>|S^wfN}X{t)4FU@4? z_|H$&!3dmdkz9W@$B@3%R4~)sEeb={djMJc{K`_|2-I?Gd5?FrfQ-lc~L}MpaDR?!UlkO#^cPM?E zalUvJd{gn-n^ZEH#)H}p9P6lK=&+(pDsp;{z067so!Ibc#`?KQrc0`n2*G}NtEjQf zV#^ey1Qbh2`}Jtq%v0*vPd9hXM8AthY94yyn+nzw?a<8uo!0g3L)S|0<)4W~UvIiI zZQK4=_Q0+cL%7$@`^HGTUt~O_r-e@pGzwq??S(*+?*|S6u$+`4kdVukjD`kjrgHlx zW?XHlVM+1w0Huw5Q{L-0gNODGU_C4~l|o#5V(6rlL>Bsb*6B)9tll^IF?PJpw(3p2 z(fYtnX5Q7Mk&u{n14XJ`Qix*#!&U@UAQC3Xd>TNT|3pE|rZ z5+`3j2AhX$V2e4+c4K;I-dVQ*bv?-I>PRdKFA(bK=3Uh-o|sF6x&aqSZ((9DOO0Kx z-PPabLzvQMK+(u>fr~_Ie{EbIq}kh-4>PBE0Bl1d;!?l@GbLc{m63QFvmvW_grG`& z$;5}a>8Lbzs(*Hku#}jnQZ< zi&&#$tfsHp5?I?9qC3v+7Oy&{6F7eHD%Q&uw@4Qa?q%*UQ8d$0sAiiFMO>wL@AO0m z7?gCnC?}4+izInxIAliuzxHt*%s>(B!+iB)4xhU;~3ksm#(BA=gjTgopHHkrPzoSqz;bKjJ=)t+YcmHA{Sn9?eb|R_ezmZ5h~K zNpf=aDKKRjsej=7h{KXpw5t=F#wm~OWM=bjx6npCwqt~LVr4LBaB#S7GeyDS6a721 zoHm8_!k=T?ue z(^CzNmvxfI7|FY}8i`I>`l#ny-cZL>wxc>;w={Q32oJ&%g@XqX(bn3|skYa?RAdb2 zd}H896#|Da3QQ;d%(jxH0efniEPr;l#5`fs`||dq_R)G$u!yzr@@>eqh1T|{FagOc z@&Gu5671SkxW-h3##E4-fqYsn563M{rY;Vj!{mq3tN8mf!Tq!g%Q&=3-q$qxIP<6s zAv(nV+&xjRMhv}uHw>;Wq{{s-XvsgG2@PM*@X;zxQ2o$s2$y*aZ}yQsY{?;toR8^S zw(PyJ{hRmue=4)F)eGhq0yOrnY3d4j}|)Tf>EDGq5DSf`hjL0)aQPXH+jh zZ49h$4|Fuk)g&h-W@+W_tK4@fF!0HQ&g1i_l|F;X)~o}yM_9H->_ys7ql%`cZPc;Y z^j*s;=-ENKe3jU5NW1tY)u885~PPMzbu#J3i2MAuvp!r4bFcB7g2uY4+p+}DW!*i{YzFbD4;SinQ))u4+yyyHHL z7B_DCVanqGzWWf^*d|XmCxa0EqDA-qQ1H*6T&ydXKFEmPSyHMn zW`dm+0nBS;f?ltI0SEWAUsmFkMlbqqmQyjcJW(D43PnJGIBfrWCTGG^Viy^);=upl zRmnw|(mK$-*;P>VJ)S*95B8=6&?AWowEiYWJOvJ$4$b7U<9zc|;-gXf=UV1bk%tAV zNp)pedU~~z*m3TEOC1SV)O6s%(H5H}d+bOp~;E;MyO zuvB5x4+ZLg^q{aV0M=Yl4UOcW;9xGVbL&fHXVV%X@8U3wl9(4IrHVP}(fBUlDWmqb$Tf`ODKV4#EU;W`EWt{f(&|yw7ke${7OuHW-H)h-#c)jKUt&^UQiy z!E1}4+~eq{p|lflBItL7lauNNyDaW^w@T6f3J7ozT7-;kmI}FPy&PX)+2KGsCjiwb zK&FGc^9W*q=*s!Zr1Vr0^}_EIVFdS)%y%@LL)vYrf7?(ai2d_lcmXawUVcEwFltlJ z%Ht+QE&jubz>&K9NU?qZ&|n~zDkL=9CzzT!;0uyd1_Z2wdSjA;f{N?Rh|>sBU|;`B z@#H+{og(t{ywrks2?d*YAP*_vu}e6Bo}$5722dMliqhR9J26qMIQ;}6%AGz3hVwX- z;$oTQFY*HeBrg&J|KULxDQZDsJ%?&KAIUHryq}Q4kT-^=0zSExmIwfondk$_S%a)H zS{k_@KbjSTp#bc?KyQ%@8=KmCFSke zyzyfV0|A_@MMh6A+wS#St?>7df2MrCDy7X&iMbE;@@A)-`f4A(E>JcmrFPJ}R~>nE zt{d7MzM+4qNhqjJSf}Il^T40P``5&z1hfe_6m6i*RPni+&7OvUq+-=yq`-*E4JmtA z;=kuCPpQys`$hwKMuPypY5=6b-|uE4HXseOHI`R%b>%sd#WN=FQTwA-$V*1+mHD@j zM8wA*Nu}}Z+L@N9M25q>#u=u4>Rb0e4JV5%pKVB}Q`y)*mtD*d_K;2Cm%Gd?aXuw| zvC?dQxy=CJFAdt^tEPXp>aCaA{*y-UU-^cE*9RZmcXE6_8u7Ot`;^)okwykX7ApCj z0B}|#2tKbf%&3>ZKSA=e@X>#2JAx|9#lgMeUE}#9k3-%bkvpEo*9?`u6_sG>D5daQ zv4T2iA=ePR;3ro>x=JN(Oiq+QF!C_h0`ilor8+yKga`J zaIc2aL>S1(M4t&P8H^@4d&2|_)*pSyHvuS0tmEu}CV#o9QZ>0K974VGPahP?i|gq_ z6xh9kIIDK|oSGk1nkp7SvJ$(+Z)vOx>iDmsSQslpn2JfhyW4x%nYdOnC$+ntbD4Bwz1(A zzV2;cRVr?VuNZ&E$Ye`K2FWEx>O-we)NW3@u2;tb=`fgnDTxEjt7`HU!KMdwH^(D8 zofyz{|H}SV1oWco!+HN|Qp#!ewFxQA1l0l?GO@A*>Ap$8!6Tzp8MC+_z8uM>|NTo} zrqHrAYu9Y}AfYvFG>%o?qW!AaPC34+Fnj@%s_OufXk}C7)5mN$!U)20P878))#!Pd z;xVho6Ze0tUce=2%Zb12su6wUy_;FN)ln#LGbUMpXdNKgIPc$&0)~Jv5HJJ`TwI!8 zfs7DAZ|mH{N!;W>(quR(ZtnW-vq_XQv@x;tl2hyd z$uHPbHUIIkbT9X_cKNE4o%abddJGJqS}=qP5dg3(&O(&gO+(d6f#si06g9h$NWwvob>rAaNBD{(0 zq_aGG-6KqL$Vd)B20mF!1sJ9!eFt!Fb8$bd#|m(I92#&rtt?H63)rb&k>0nsqH83;{1co?wrZbE+n_N~b$c3+bnk?IMJUTxx|I%mN z8!&oz_0&0d##S2gzB|PRJK%NRjJv~1qCMC&xHsu*| zzpZL?DY1JSvEY^qBXj?aZhR&KB66^*6hcPEc5>U9w1fOg`B9Pw{cd=>?f1gE2ljg7 zTrju2>|_H=M@iaGN6SHc57G`_Fe1n~0tQK;LPbTsfB;3E9lEK62585A!%EPL$F6kGO6N zWN}Y3u0LdN&m%T)Y60VBCaGSzHxX9b(T3U5a#FD#dbz+W?3DFLLD=OoUqspsIFm51I{}jDHkI2261Xv3`?; z+91fMF>~Dpj?Lm3`}}B}Zc7_mkCT^m!8O9+K!BWm*)q~!BpKQFz{HswA0E4SSoPa!_9uZAO1)t z7vnb8>U->n_eoyo&P*vYb!MvVe1O7L!Um%xXnjm{Z0(WNP4Q;F#k`Zx^J{;_$YVcK zkQZk@vUO$tfTP<3`od|q)!$s4vuK-osCVlS6iXsf;9*2u^xd)JD>FS7?LRVeRST6j zK2qFw;=fCNoEAQq0=}RcWlbS{gKM+g>!i3p^<#?@)3d7M*8k5sJ z?sNUzb}S$pdNA2|zTpKhT)L<8a?>SxJ9s(^6+dE{r!Z0Sj|d5dMk|_Mbjj6j7?|G4gcx``mgL z2N`gpKGz-KKt!VZNs-6-(NG6EW z3@l1?NUpOA-G3nYyz6C&?-h~u`meI9ErGiYoJBqd@jDey>H**CvPC+kZn zr>N*bMD(>zd3IxYjhxb@+FG&}*}Nr;)n)mE_TWR@cQoD>MVExe!KzSj_^c*y z3>swt5BImvq+L+%j{KqduSDxip@ov77Y7453F|<8Of3uW#zkw4ME7UQ+Y`TzJwDuf zAMkskhAZdv4cGZ5u!$oDU(&Y|d!FoZr8>+9id^JC1F9Xyn;h29xg+=g8#69`5=igA zEcJlNDP0kpLuzGiXjz}|lkC;}yqVbOJ_KRPc zVN*85JG;61-u0SrkdKc{y)@e93#<0uws#;;JdTEt$ISb_)qzNL|CvC0ZMf8*IG|I@bed|)81d~jKbeH$Ibc@1unUWI;8Bc_oka30e83k`VLP^ zt9~v#A$)7_DJt^J?`vfMPi?<_Y~8Bct0I0)d`O~oknH}|p&|aEkWWxy=ozg8LhQdo z57?|=gc|X0s^h;v2$r*KU~d~LSTes`5^(Pg%&-2eJWvRPPJP(hiT~m;6C_h2ceC9@ z5wx^W>x*{wE?yRmdS-q3P8et%CwoLTT4Pthb**01K>f4wRA#DS|1?}AoGg}&Apr*ZSI z?qC5aSsNg8XsUOs==6tbZxQ-p%MlZzo(EFPZ%o~2)m_TReZUc$4EEZq$rwPW9fnUn z!Z07qzS$>OM3wV>KR;ASkV$J}dbA+_SRh6(Z9d#Tt!M*pK!4BUpMuwXAY80h?KvTa zFH*~xoYw|p&Ofi{_o6i;05Lfky=MEZ3}Po<0M)}JSTDy$b2hBOH(pc_q*a5&?HHm_ zragyQHx*1&izD`PfmPfovFicd#o5pRHE4UH%ewgdn}B%)D}6OC?bHxbwLC&3S$;9~ z?T>AYWB)sJAci@)t*7s|O{0cTE&%N}e-8jv0pr{}P$^;5gaqks1}pqI7&HJ-P;6@> zMY~i$87kfu1S~f!Re(1z42*mh2rR^PCN@gOfRR7bM_zCpt26btF0dLxNfmRPs^O z&By~0uUA7ca+mi{vk-7cJ1Y5ZRNi|43sfnvo4?qA)Dn@p8vlu$5CR%5ocad@Vu1=u ziPJ5`_NJei88CK=-fzVwC@c`ivt zN+z3fCVfxU&jgM9pRBa?Y~;;~(nsa0cGtr%FXp@2;Z2N4@UEA%fL7#*6UN6qDw1`* z?5HsQljn#u(e;w;laKPRIT;d!R{ZS$6q?0>9Shx9ZzH*Z;$ye0NzrtJN=Q7$3-0cP za144GQNChLHA^l>^iQqu7Z>!r?}*e@!I4j*AtIyJNa?!pn_dW7tN{7)dqw zkBI2e&|A!a>wa-?p7t2@<%d73-mnA!8rHe~PzSgg=+(Xg(@D;xF>8{}KP5(;E`Mh1 ze_%b_-88JSn?=6EUU=<$qNC*k$rBGRKuCi z!B1m9Jj;@S)`A87uL+AvAJIEscq9l_ev}CFc|sfhEo>9?@!u^X&uCcP_PKbs8o$MV zMpR5kFa+kESZRP0M%(c0qQOpBj0aCj+C)}3ZKn!Kc-;jj@I}UsN)eUaLhZB!j+-00 zF|Zdjo*?woF9l3)K07^H#t+PWKbPGu429tXFm6{3DZF_N&k56 zFU_6_bqjK(OiR#8Xy`gLvccNm18UDl&Z5!Ke>j!w9mr7#vzIjzyf9m7e@gSy8paai zu=^U%tvRew|FFb!_>$O&L~h0}-DY%6nAHXo?#jX^7##Fb?WTz1Md6P6(_bXOp^q;$;Kx1Vlv20SlAD^M!Conughz^-fl1Q3QiM@zUx7Xq(0I6MBI14n6I~H z;ig+r{CzNZfTI(-X6nRReD77IEu~oz+m%H4jW3N|syZKU{?j$q=et6sF-@hdI~x-R z&X?&B0d4ypma>2tUwKhGz4U&+7}s+^PQ)D25T5ClyRCiLJ4}&9m(o6KI5D`Z5$1 z&jxrBl!dLWs(0YC+TuN}aQszgqU4HRUcjtVt!L&<*U{)7HS4NWWDx$ASGX9bzARkI zFD^;ml5B&%H%>U<8!_Wr-FA($Kqb0sqC4RL|EZDejv9q{@}*M7VWQF?dBMufs*&xq-8H)nzmOlABc#p$MfO6E)N}+VhMvha&v79D64Of7wXHW z5PmH@`@(~QO8%9%t%1XK;Ps<`1(Iy#S@$EerB6G`km`#TOm5Kxg|9l?j!%(@27(be zM8uQseq7Bgi-RHr=UaenZCj{G7Ymcj>sg=Hj?)m|*nHqYve!n#S*LNAtR$*owVPM6 zcXbS%<6!9j@yHnMVH848+c(LFp(^rSph~}(_QHV?<;*DciKtA*I`1= zHE4%RqwiDk*Dxlses<=3HG*M0@7FG`Ro540F{1iJnYH2(*WT4SsL!sFk_X!~9x{h{ zC{`EQvBRHF+&euQqi>~D^c%tWJc}P|XwOP|nu`XLE(9-w_{8j%V!U6BfiwXwP3x`l zc=G^*ebP#llaQQ6Z!X5Z1xvtcTW^7{5riMxm41S}b>o?B&mvW6i{HidFP=Ag9e($k zBERI#Dq&u-d*@MLt1iuOf)}>t5MN0hq5(4#3S9!lpwZcXp5eF%0@v29sp^-XWk%gv zkI_Yve>8^#Ad89M*e*a~&+|lqHmmVYfB#RPKJ9eY_1gakL4n4J$c=}74w9~_W=J5H zyO00VS2Xnty9Ntey3Wl4DRO0=r-PhOu3LKimuv0e=V$VBv6BACzv}MoAndp%+i=?1 zj|?q(;8tV2mbuwbDq{qrD3uA@XABX?wMvwN2J_6SG@SxJTES>reji?c_r~W7Mk{%io)rPh^rPNU0 zs&0!~XPZV;7EL0~P`u#7qE|^X*89S!;@J{wG6y9R;pUvkwgcY7ST#P^aMaXVD{W!c zzEO)l7nA<@oHp?6mctPQMc!SlUe`D|c}csN@YJVP_syMkN9io7N-v%=%A?)0#iqgn zh(E8j!y&9`BWGNXCELV5i{kO019o1jDF?Qn1CUjz7{b@U)+?Y>RQH@_<-_wpLG1S9 z4cVVs>XX)74yg8vI13_1}U-6%)DwHDC%pQ_36HFZIna{GTZY!!zTmi~>I+GgfpbHZYccwa7suAuUH_j1jFBFC@|1jENR4 zKRN&@LL;uKnr)VpDOuaQ8PA>Stgb9!z24eKHXYlh zD{qos@1o^EE?TX8a>Ajbg)fvMgMsYrW5lW!{mHKz6OU?8qP$6M))*pIjixUviQBiZ z85;7biY}T(0&K(ha~O*@CIIIrOO-vvUxdsapjnz1@+ZK(Wpdj%zelOfZVbACrE=JSEGNUSD)5b4~eSd8ndII!L z{L=YS61?SmXdSUNWeGEJ5lO(BA>N5vVDc2I1LkyEAzJl0!vHE;7B86zVoAd>pL`>A zTBTXBMB{rs_7ZtRuA(aQl)e}ginT*D9KPHcG?>%879?)&7G6U%s-3YO{-$WLAW=>l zoK``W5#J>@%hn*-&ahC+7l!pS4yoeFZ9iRTnOMIjONQ$!EdzmhP^{jMS8W=SDt9HZ zgmu#rg?C$AsCo@*hgW>5vj3q6l@w|{qi~BXA;16H=*TcBEj`PDr7Lu?E{%5d%&$? zJ z9(GKE_@vJL7S}6V73&YE!7X!d3`UHJR;NcJ-AU;~1V!G$U6{p5R@xPmzo00uTZv(y zHto?Le()lhO{fs6?8OJGEYM_4LuW!K=7B=QDQ=TTP4$ z%u%!6f#LM27JCV{Q_95BibFO1>@@2%mkA2?Mw{BVPB{UVdy88T`mf2;I?Kn(cjy&S zy@kb9go|j%i`#R0A;@aA%gZbrULG$IVa}2V_D|q2gwoH_6_micE5z1I_&c*qK^(^M z0p$?hLsy8IpANGwM8stG971P2`E?p=-1r|qep~?I0luJ&eGVCuW^Pj8yEPE512G}c zm>$1@y#ebA4km|PsY+*@!|y)nou!=p{0CHqh2xsDt%uUz-0vI<_W{%7WGz>Ma49kG z^7X?8+puwaw|6A!HJK`y;9J*5?I&*bx9^2S91O%F5>4d{iHKEZ8tBdmyY^YWDkrkr zg?m4lriUY1Zbkl>CYKx`mtCtOZM{?68%0op#YI8&zL^BfV7BUI70HS6l{;>A;;f}l zr`8@6maIB=GLB0YUPY`4^>kg&F3wntbaBu#5sypk$2-Ll899wN-A@d&)#l+{<_jdc zzbx1vA5C9rBpXH`F&p5u8LNn4nD7m@4#nb~?u}gFQlXkBU46S8^7|?Cr_N?v)DZcY zL{EZwF{Z|)-cPd2Y*m&df?vD{O2TB}FKF`Ce|6V`wV#{EiBM1#D#;lbM72i|+$Ri2 z#CvB9tyj0co=)y}IT4H+%?(2%>D+V7M_F0+JqCxONzf(9(x}L*t(>qcAnZ3Ee%F(|>K|l$m zOF+6iB&55M4(WX7;`e?3|9fk_wa#KKQP0_X_RQ>w-^_ruJlBP8GZBqO@wy^==-nSD zkr4v7_oW7)aM+EhIC!66-xPkOmsa@Z{mAl@8lPOZM z`|L4h3K>+e*~`%8l8mb2)=8!?aQnStTj7yt>aR> zkU=esQhS#(kU&Qv&g8`Sx_v;XCzOxENa?V{y59oATIDcn()KACUsc>9!@O+jVzj2q zqA`(T9XT5ts+o59NjRho`JhwO+5j11F`xD&DL^%c@|kqaq4Wapx9SqrC;Af6^k>Q( z>DVYS&hLX_uYaEN0CTHUShf!-?-(twL%Ie;2*A${qi#k$3(D+s>UUhs;V!(BR_Gvf zJ6yR^GF2dAYlHn^*SgMj^)Qg^tEgBay5}1aDJ$UG-!2F(`?Dd0I0K(01C$c1E_2?o zt|;@6S*U--k2eh^egyUZb+riL&+xK3);G`dxA3*QVr?JQjusG%U{B5f+A52{htZ7a zjg5_$8ITEjoGYBBR_*13pniUB@iY<^pdVr^6wPoH80) z;y%AeVF#hbaRn@P3{reACQx$>`xR>byl)NDy3*AK!C^wour!zKYYzukx~gC6_Y^pM zZZpUt;?n{OxU|WV%yspjHw(d0Nz_5HR9NdG|A;rhD#}={gqP@f-aG-czb+3gnrVS; zMgsR0cx_(O9T>0_`@~Pb$^8p(Spl;FeBcFtHGdQfW%-kcE9Us_WhQ?$*dB#8H`Yzj zsl;K;js;GME|BVpOiWsmJ6H|ReR*%=K{?L~DA*WR>_?Jq`b?z{voBx8gwPjMT7Az~ zL))qN$Qku-L@v6uat*`wz?(eTna;+sc@zp;rZPbBl^dAKH${1d&-NrwKBarPE#NF1 zE(pA^{~4S;Kv~LfEg>WJzG#*3L+>zsApXyTyTjX-)MnGBozEU^xoy3fT<29TVP~OY zGRCsoKEjezx-~|~jm{+Pg)=5cvR^!}KX5WX26ccBQc{e#Zo7Hzs^Uk5%vTl*|Ai?Cm*q{q<}N`)%G>D*p;mhvWYUHyoxb z4)N2QzHdh-A+t}-1r;Bw?K^=WX*|!Xy~lyX?_tOZ77J8py?`qo%(_TbP}q>wL4$rM zC6$<&d04p!@>15WeA!gzj>+;}X)O+ZzL~F7p*XB}(6LR6-w0`b&tj!N)j}mx`XJi9 zS@(qj#)M}abMS*)i2B#`OZ(+l<0PYK0djMl+w%jyFMspczF2I0-u~$rA@^MVBhO9c z-zFL`|8N6T%y$K*wQ2G9kp7mKp2z_U_WEW5tId8#B<28<8epvl9sm-R7(RkMZuCiRnsoOnQ;0-1x=lH`-sgZsh5DxQ_4V5N9^t$?3mm4$RLXJ}~?_KWuB)#uwQ{h^VFW zC6{NyMB=5ReJC}B1cj8lCfvMNraY^|sm&BVWjdZ33MD&**rHJTW|WV~o@^d#?8_{{ zx98U3PbI7V1b194iw?f=LHVXGxX0G4r`5A7Cp8P(*g+Nb?Yo#5{1O4@}88@cm3>~>Q{uM z!>;7<>img!p`qZelZ(S8N|Wx$etG@BY8LoepwPNIk99fk5kMxynAXo|=MFnU83SPd zV=2O#KvZr-4gTlPpFH;KGWA;0PzgN#|4Zmj)_0;BE}bXs>8Uaa7~e+5Q*6_xKhT#1 z#lF43zu!eIc}5d41H?*lQcZ4WiO~l-tc(*-u$C5$Nj>7*jrsw z&jlTPZCWyx`)UjmFL*XN)mLZz_HcP$KS{~sy*tG-S`S+ysI{q!iXWP7Z}3_%|9}Ev zPi7G{ATzY>`Fc4xN;u`IWD1FO`w9UXSTgUQj&-&mFsYz6RJFk0E+l2MLIK-rh z*^VfaH>VJ3Co@zQX1!GrLoJPA3LQPUR!F+$;riV8w&qFkr=2kApC#-}!;%x1A1U2G ztG(w}ziH|D*(%(EMaYwX$Q7_I{lwd4DQci=$3)D_i4z4Jo)o`hDW9ZqbikAa@?u2# z8L}6s*l@13(C8cK7O2uBXjfr_=ISz>u-!R1WRAP0CF+3Y|P75dSoL{$C* zV_41a(0CPi`RIC9$tfSXt2flVD7Yp0JZdur!6}nf?FR}uA68tcUK+M!;5!VN{))U7ZK#~!p+wLISv$!oV1z8 znKJp5v(?6*cE8l$-8x)-w_yr`9xev0j1)N77U&TWs1{5rAcC6#`F(t_DA_j3=0GX# zHVNro#>Sby^nl;1!73WDT{46K7X8fy(|B`OSwIc`G7EP4?}$=_StEu0`C6w^$`M|PWqc9cF5@!xuqlnOrJ}=-&y9gx<`H6 zXhZZysKlRiKb_ipS^AWd28uX0B{NH}yQjPOYU{%QeN57C*IBrCTjAgr%7Nf8e1hC4 ze3c5+l*3tJskU0z9r*i`CO-o&(gI7LM5pd*qomJ_b5D)`?5V4+Nd#{WL@ZLDWV_D@ z@Ld1V|AY&bz=2RGioT4_9)D&qm_Ca(BdFPSrtkLVlNyh-MXjWc+#DxYktgW``!i4~kYvRW&D;uYP6Gd>{3D2|{+8+xOIxQ+Yps z1>x-M=7-YhVH8a$!CN zUW$7dSu*Aa#y&mBVe6!~FX{d1nUo3RQrf#yJ>8|C*d$<*k`L&0nJyWc&;#QAx9DZG z;avz)U$qLBTv{<9F2?woNGx7a50_|{mFV?PE=F(CN%75{;gfK$zxc^}Dclc2Hdzmiw)a|=jS8YxLn}3*st@AX@-`_Y z(M#?cf!0S3t|iCqj_QG-r&(Vmyz06%D~G*g5T%B!smX!d@NlD(1$$N% z4Z+^TQ)F2hMz3WOGuHpJ%U-}uDSGE`rQ!${Zpohz|Mj_oVZF1V`?%IXat;7c*x1;l z3b@%9&WHiGz4a$C8DLNlW}aUr^GIHPl7;eb{?VlT= z&PB{D>4RCg#JJx_MD!d9jQr*#uqd%b&^*=2AWQQVTcZ!sW&t}6)7cVGh zV~0pH*L1R_(5-ivtkrltT^{^cYvw0~M9$JCM(O`>Mez(=}4h;hJJpl z=6c~gcB~b^tR-$1Cb6&zw-YVTR^^z_~Myx zAjY3vYA^WcN5=*M25e~Mj@$%TJ6G6`UhV7h&kTNB7Z)WQ6TpixR+Is`(L;z-kRp+V zww4c42%7kl{q-nK9pm&;qE0?8UCj$o}`H!?l4aN#d^T+Hok#}uHQ+|b~ zS_YgBQroLpor_`rGqeFy(Qs%xgvap?N5f^(yB9htJ`)Ec#xq!!Dqw++5C{&&W+DP3 zJdjvfS@#y3_`8Mx6&b<#3Mx@g66a3tl(P8$L~-m+|BK?t#}MB6X#r_cyp&gz(hq|9 zOZyJBKHASTh7Z@~%3jLao%Zvzd_(FtJN*XEZeVtI0xVRYnzhA_Z?@?zsQYOX;t@}| z)E3w}AY!T{NO=EVjX;uI0xQ4el9=Y@kCbt(8BD=ol1KeA0!!welH}e2`ej?Hb#9Ib zfON_qp8?EE5v2rz^IvJQ6Ha42i^6~1B92yjQS?muD$!9wFji2NQi7H%c^njWqCnue zu*ErxK1X>b$(iC{O-!nS@w09{VLy%YO)(@pD91n;c-^q~?diY=9>*QxG#?Mq0;Q#~msjPFZQsT|0aVsoKSqP@ zG#f#`xI#`z-{O9ZTVNodcWZ}*H|Rd6^(ah%7q^*#yZ*X@OUVH7^OB43>RkwimXHb6 z$@*aCPOu6{)~WLipkMz!t-YXHwz@f{nX^ziKx8(viLBg`%_m zHBNvNh@mtT-k^$pRvJh#lY>WiQ|mUJ{>VA7t`p*O?6ZJ@ZFmak;NLdv|J&!-J~mC- z%9?gsrs_uj?Qel2_caU!@KzAN>odP_?|wmiQ5TxTAzfixufP8+>Th=gHbnTF2am9A zpQY~R4-EMJbybf%stVMu#$tE%^}+V{$h{*micPar7?t`JFH4?{nh4;qrL9v`|#hMW?JBHh8F5>N3p~-6Ofx8*e zKVK(@^P(N`>S!_FOC(t++?;Sxd;i!#DwhX%9es2n7N8hX`TT%_ACvw>dLEYp;=!RI zCtJD;-5M*=-N_QCOzEaCV9cba02K%uBLMQmPUoj~bD%=MRt&2&odQUwedO9UCN-Kp zEiMFYYwK+8KV1KrD#VD3K2HXvNyRQ3-R(}7Ob>iu)53~g9vbcR zW}08VH(QWe{10%}DQEa+rl2l^K=ZLAk!p(xSf29~ zfcu*DbcA52RGF(JGV9{L`}rwsf*UYTwSe<~bjZVS!~{wOBY9JmRHHjUG=>}ke#f!7Duv9AS5y=9*z8yujY~cAK=Ernxifawy z3O`POYpI5m9iT9?n0lna<%5>4uE|P!5CdWqAy<2aNe`^she8G)*Hc_zAu|6h1R{nF z>0A>ASYey`A?)LWmuc)9G$Xf@Rq;ee-%WxShPEHY%vE^ao#2I;JVk}t`K~+?{jc%~ z6|AJ5t|g%HsKCO&>yG~zJ+=|b#rFdaq$oW1 z;m@$}<>@b_mBT{}G&D3sWaQmqfF+PacDgsWk5e(O0ueb8;T^ov$kC(%sY2;wNnQj7 zlMfsSABkoQFM3WsjfI~aW&IvWU}&9;A}`Dww^~q17bx*<$m3sK0N5SW%Vgki1_!Sr zLwn|hr;l9Z8wLX>8WX*80d(_Na4Qg3FL&D2;Nj=*XlsL;sWek#zTKH%d|j*s0l;5@ z-@bj5LoCp1s9{i*up|fqGo##7`6a>*c|%Joqi=6fPwVb5k%3twRwA&|Mcb@D6fO_- zt9c<4uw}@*|7`mwC{LE|?69;;UKkb2LslP8Ede&%DECL!Prz%+#f6gu(d*T70xAMb zN{a5#mu~P`1Wx_S=?k-dLjXn#%wfo85KPX(;c6jbVz4iZT=> zu5Qakf%lszg6X_);sV7&Nv((#+4z9}r}|EQh*a4B;{!qkdgn2H{kymGx5s` z1Dtl6)(R+)3ga%+!St6HF{Fa+=`TNg7Km`CHx7;Gix87}&ETmBPY(+Q^xG=*X{%DG z;|9eh&O@1q5AHn8UO)6GjxLlKsb!rE&`&KjNJcCPLPQ%_eN>rKWr%y zlfF5ol&W|X6?|4eZn_(OoJImZ$`g>)d@Tf~K7AhULSkdVTH4xQ4qI2uXS11uzzEaA zY34kzTkTMvnikkF_-!nK9X-vIt6L5LA~e_eLHMHF(B0hzc?uTqeguYL7=&bx z#Rwf`smjL`XVb&9Lu@n5A!79@ea!c=gKldL2^iE1xX`fMY5`m|OHU8@7P~0b=56tB@T1$QS86bUhyY|H1Y#4P4eJHAf-u>Thss9Tb4ZE>-33WrBm7|vB-z5|I zq22rat~@~f8B>sE!)~&v-{PG3V1*h+)c6oq9jXp~W zER1sCf$^~;XVnKNBC3-h9&%Iva45Mme1-r^j&r!5YL;!PMUR z@!BH}R0h@;P?eaTmI^L49(;0tVDpAhn7_Mp&T|&YZPr;dxp8M)^ziT?*882ZRo81R z?e5?Ab{Jx1tImhsVzupYAftczk)H5@rC}XkjEW9_d_ldQ?YKwIPfuuQ20w1 zOaQ!11t((lm|3d=l)`o}M6YP2SEKhSU=18?T^#p2Ai%32qepye z??zWfgCa|SK4wvf^*)D)?EyW=+sm-L{!JcYj$pwfG~J7Tyyd?9>I^4X5&9O!#=iFe zVX|91#0Ox!0&7--wQy-#OLBf*O>C=6S>%MTFNh&MNj%w?I>Zx_mJL=lyf*07=5go@ zZf>E5@5{aF-Ks&TBLU(U60s|2;4jDPE?e27*WP<2gIqxDj;V01zcWplRyvF?q)I92d`=~+z+h|WZHNYo6I-QQg;sah zUKy*>!DVLKmDR?`7_`pKVC*Ml6OLHB!E)zl1D!#cfYlhf^oa)QamR?zfQs3ksUxd3 znsAvLe#M&pAaBXOa+4>{>F-h0bNYNJqVRD?Te|j0vAZaF8Z%xXgH?&=z5A`urs z(?ClX#=<+eyY7{S*}8b1FLvnig#e?p3C%BIh3?@ju5R9LFz`4lhEN(bOqMI(7Y8o3 zb>uO|6}*vb)ccMPhiP}*cNY3-x#J*J9%29MV?845JWJKhf$O;Qo$e0K3czau-so=GDcw=}YVCR?0M z1L(I%4dnipPk8vh303x#>orZD3Ye8=>)Vhk{94g_a(=slR+b_%5KU51Tj59iqOmEA zP~3TS0Mf)(S=C0oAZ`!@Zc$t^>O8QR{(X$}Z1YbIh0rN3G}nd`nrnUWijH!t?Q!fp}lYND4YUGmyNLN$agE~Jv)>K61fJxC?Ttvzs8Au)m0EMcctUUZ~= zUZX*bh9vgj6e%U~=+a+t->sm53yT5i@-JJz_Au~pI);M9>@oEHKS@@~SrpqgVz`I< z?o*mYpVQSik{7QlW!qR3OS{sw;K|n1`L^hQ;^-3JAZ0GEc2YbG+e~|5_#I&0}<-5vy-I92_QTHdlpuS?6pWT`}P!(#l1vg zAfU_6%+4!=E&}0zam9U%xP^7kuYa37a^XLME^?mBogBDaL z+E?Nit2#MkM$HmIh>m0^Kcv!r`;Tice15A~3Raatzx-}FrtqGUw(X7W- zGp2!4_(5;nhS$cXXD~-0T-6gC5Qf<`rygosCI?9_pzXhI+TnX4-B``KWJFu)r z)zfbJSti=eXDKGJ-Uy;C_J4ccqQk&H- zo$ZTJDwGc%P8P+Xp|Q4~POcHe(VJUedbPApZE*Bpd0{e`q_i>DMoJ{q=I6+^BOKiX zFW}p0HKCu(nB28)<|i0X@*&Q5t(q;SI&jJC#l|i~C)w_fwB*lz$KvCC((+J+-jgBJ z@o96m-KLE_6Y1*4=pg>iHfD9#>X_@tCiv#MASbovCmymtj^@zlVjg$53rox%)P>z* zUBk5?Q=Uq`s77m^roJ|(J;!AU6IX1p-Q!;D(~u^l?qM#VWXe~nNy^xj#M1S{w6CPc zhkP%F*XRkzK0$?7%oLuU%ezrYyXVWN9A`PVUHxBDETzX8ig_h4+yXO5Ta0EPXT;83 z3~O^jB(YmNAAW!D*L&Tznu?;<&${PQr^O)x{r7Yb zaKg*B8heyC({|WXvB*n538U+EJQB|<#ujraGWTWza>5mPN!V(Y@HiPuqBK$4x|)1e zX=8GZG)oLaqMPc!cr?+qx6j>7*R`oxX?$y}#A*q7Hgbs_0)AmRqm?+Z^R&ne zu5t{)j!d;vrR07fe*qg4xotTMp9^x*Je&+%Gb2QXz zu$d(Hq&!TOO2G9&;`RqkNF)I%uT(Fx&2A9UJDty`o%XfNDpbzlu_s}HG?EQs=))RH;|E7QiqosmCPPalAhBOg73%GaR^s+563=oOt)r6P1^! zj`;!k@aJeVVs`h8&@4l!ct0wEO>ao9Qbbd$H)_2dcHY}w zKSv?y;sbq@$#_e%-W_?@#37V-o1Bt1ecUas#r#<8^C@0J<3ldWhV%+?Oe<8cZ2mG_u(Qzg;&PI5WH z#2~`J>t``zurd{t zZg*OO@K&K9f`f6=aMywM*?|UHUVoD4;ya(5j5$(ST6F@fHyK7})5zbG$s|R75+0?p zA|HLYW2Tbnn&akk+1#GTx<1P=)6FsUSOsN-IO=^+ZyM@xSii2O%%h9yHlc8=xxzSyA_%e zI1|LYHj?sDc@lw$CMQZuU$S$bq9zcmKabwJU^U(tAL5C!*Q%SvLF()doJ7Apbn)ap zYkY;+*QLlI=?hAJhZT1>OJ967gZ6-15&#{t{G9fS=rl&=R}xCP8O9X7NB+_7YZTEG zbi;!KqSZO&V@08vvvp3sn0Y3F0VA=j;BqeqBw^-7gon^my)x>vWFmn9m#(a(*BSz0 zwf-Lt5}K~C1gTXeAjPh5eOO|o*||L;aXMU@oKT5x+`*10vmB7A$*q82JAQoC?2Grj z#8WX-qVHcq#3VS&6GG$O%!WcH#`~A%6+oEKjoGxE-i0*}tr>>rM)8aN&pt9qa`q*n zap#dxWmAG!S4LC+?FG=5qO^8KZO}MhCw}ifFyUAaBF?fvL@$6)h{?Yq`9e)xmdw?j zfk*Mxa|)dx8wBO~B#@2+l{luaS|ML&Sq^E~eTdf1k*GMPWI=_J!3}gaTmyCL9vE4l z-!E8{SB-e$cXWOn@%roVuTht;PkS{!zw#Y@jz&+-d4KfNq@1)?!) zaw+LT!?0$mvN*kSv8-lZAw(4sIl0ugNC}SE`9lCjTF~Mfq0q`dKO2B|5eKdkU@v(f-wc6BIz%@{r2dBDg04{47no7XrGu2KOb*ePGt!U zMT_EYKJM;=bRj{-NN_fvgbz!|`M~;!ek4Zob_phRz2YrgMz|%VNTytPE&!IcEI%2!UHbg{O3`dJzmf1%oOmek8;7_tN`D@4f>6FCoe(_2Xel9d zsaGJfB1pyvpy&*71r+<_hGTmmyP&dsnAYeZ7X{htQ6y znrusaGY^d2N;1P}u*l|JH2z}?kq&ohKmSm;d^w!MzMGrys!BR_&w&V%^%`DUbcIn& zHF{dr@I7k2rpoyj&LFUvM*>UWUTkRW)urhQ-1J+uxABRp7|unin(x(2B=N)+Gn{%_ z{=EF$d)w4o_q$a*bE)ZPs0aG2s-ViU4KH$`gqfME6q$@zcx;t;ps=|)olx>X7i0J|3HJRwo z|0^bE!+}2(mLUy{3jqt2Wfs&b2*Yx|Rk14k{u_qRq9~Fg@r>&&kp#5wM%w` zH%%8$@H}sA_et3%EEUh%IU~uHR?`|Tct_y&Xrmd zB)Y<4OFT-t3DrEd-TUFHT}vkS>JZT(EHIQMP(&94u|&o3t&5f!ibU(rzS@ha=j$$W z;cud6K8K?p-y&ZHN2ID#VEB7+F9$c^okTB%3v|Kf1WFcU%bXzNh zVG`^I%}@HQovd&4PCDV9Ot2s|)`h)83t5#-UZMM=XDOn#yl@Z!gvXQt@&BT9m`QOo zZ7k3NZ@Sxr8nkN>QAp;y7JgOEJvGiITR0?1wrhKN^c&0)eNURKikz1^E-EWG1|%eJ zn_vlqe;z&f9G7_!ecitPkKyml8r}WrCkTxEuK+$jMhF z0PpQsM^-7XjuRe%ih3M79SA;C(|rYJ_t$3Qh}j$3_lRB2{1K>}vPL~Tm<_P9mI%&G z-XXriU^FN@yKKMVEo`W2Ez5`~z&$(Dp&X_v(fF>DpQ`SL&61`0Gmv=F-rpvfq{cpW ze&%LD8VzdM)uLGV8!rlc@`9D-haLEfOw-drrdUa#Wt+@76+{e4Qk3p4HN2yg!{@g< ze`J!}(abU=s|pY~D1o^xy?&tx&R=X*C?L%X$M{h?KSb##zD;%# z70W<6J)y?NNNIUwF<|m`X?U-&4(gYbU%2HyQByuF?aae1)g-YVt(fa2>N?bfo}k)r z+LLDQb*$0I`KG+4(lOHT);mT!lPFEooauEsZtV!gSwd)B)Q)Cy9wDXl_gB3teWKOD z^-BcU4q2#-DD(2kLlw55Sz&Qi&pYPTVRVR3Rm<9FN%&3X|Kv~}gQ4z-nNOR=3&?&h z_f;t=4!gNOrcmfbfRqTT$T~Jv>}z-(=z2J|e9HHsPZ=?YP9!8Hdi$DYy6gndAo|+=|@~>Jas7P1Xw#{&10SS zyH0qm0l38nyJh0=duTm0p3t%ZsQxP+_yYV)tC#zpdt2M&!rOd&Bwn%ERQ}n4Ku?rb zPC|0Lnvz0{N zR=$OcJdT|SjtVRF7+DE<9+5Rx-U6ZljS`4AV72I5`|5&aBgPLA8ls`4NeNp|vP&KH zCCZ#A^JN7N^(&$v3|v2^glSWP0YmGSnA?wU z(uM8ocpB6|VOY-}OHhE@AIJ&B{_kc){)@lNV7d{A9|1w$cfABMyhG zgz_h^1U;@n2RAkYcICVpGNZ>dZi|v(Ao_5;ElFQ3R}|_ZnWpII4a2)rzV0ERP=wwH zVO-}#1noSTD=B)_%)bCprVr&tR~mzL)*(xH&i=#CWMASib#-rzOFPp?3tErx)iMT+ zr#MQ;M$`s^?SRRU!2HKwd`#O_;eo>Kbo9Xoc?`je&#KYjdTQ?cMCFd1rfgDGIRwU{ zWrrjak9vyfePn&CD;$Z4oeaWDX*U+Dsb_gbRQ>Ou^lgi>>tWcYc+kzEi5%6#PCPgH z#GXRZq_~&UWD&UcNHHlIN1vvUscr74nQli~^Mq)8HbH+UbFFGo_;U{TV9Bgyu9%4)nt? zw_l=nSkATUqEF%hGKlg^Pep$R{no{ksM~`MLa{ThWyYL)RJ+Tx7lc5ga1c1At#8yY z{{Fb}tIdzuqE{^wH4d6p`I>OtQ#Ak_FQ?bw4~N3uYZ8s&(onVzQv7y>_*VYv&AgnMd_w9@QFgGM4UHr@0PwWF$w-a z^;b@5Kdxd_ta(AjYhX0xjFfDO*?HEFaq=02h8NF|yoULDaXj%z|4hn2UHKIZZUq@; z3nKCkqcbxVm`1*y@6Z#^;h05%MdJqvp=qZo!N;@`c2jhK`zKWqRdCQb%k0y3cunGH zvG0FEy+MZc*w+f+@m3JZ8!ya24Mx`14{w|(PorU@Qi1Fukk*ao{#1vG(kVw?YP>xm z&toj|?b=*ff+X*Ygr5}y7D1lf-MElv^+`P6^TZbpLLS9rW0+1S<`k@%OPz@X{E}=V zEa?b;x~sbJRL4F4jN{gD(4BkdLG+8Rgt$irts&`MoG5e3;FZOrz`-GZy zJ(G8fl^g5=13p?Nzvw6^HlhzzPVZ*$9xPJI2mQLlXS2;vf)weZ-S;Il4{v5QWW7IN z4BIiQP=>TqJh6Nyk2t8_$C$`Gw$=Od`ig-an1N_u?82+Kj}k>>G<;X?`O|){9&cMS z=e&un)`?)k8*xz}y1+)CVMTPJV-_V~ZmXLjCg(SeWHrAm3A$Odq*}^=QG;`|7{0(L z&QSO}Knvdd6Q7>Rh5#p!S zf}racVvE&dWcJ3`JjhTMBPsv6H0kpM&ge} zv8OOsRIMm$VisV~NGdsa_``!&%IYKmiHXfi6r-55p4fu)W4S{=B9q4#T}p|wXIVxA zRCsjn4lq2M*tfq4Qczm3SVHQh&P6*`cR0mqHN0tq)&>l#C9c*-&*QSDTo~W~6w7n& z2W8CCtJJ5?53EZCV(UV;VYtujn_w}Sv{mluu3+yL&_ly=NZneC=ukhVO@G8=m=VCB z#kOu0v9wi7=~vMg6@#Fub> zs!OF_Y zh@fN3wh=WJb-SIiNOhPxTK*%Y!XiJn^W^9v{|X2lCE(+@Mc6U{slR5K=_~nN*R9Bk z#8B)eYKq*r+|?bbkJI`{f$iE9THwu!!G2m5;zrkCR~xEUSD+@wIar1M*D>%dRjTTjB8a8=@;{oZ!N{mAG8u zbb>g0rZ{;5yC1oL_(w_{>g9m5FLZP9o;qJ{dI-3lViV!L3iJ{!2`$C@Itu#7+?RpC z)7l^zuAlGn2vwCBaFx2xz#an#vGRT^K9!K_mns##fK1APXX3fI#C4zB}?BvBV*Yl@^8f z7x6ZCVPHNtE+|CnD8I@_1QPanM)+kwQF6CX-nXO+QX$bEzl1oBbQ0&{w7AT#w$}er z1qa?o-#a+3=k{2djGm|WFMpD219^KYG&1DJV!nPXHUa=hIQsD*QqG-4HX~_*LSf82 zh_Wb`b1e@B$6>(`ugw7)gBlW>xq+oUEHolN&Vqpb0xeM7P!D6i=f#3+V zt=Oxd%#HWOQD4Yk*#YayKlY3)xk>Ko&jWL}{XpUg>9D#LbdfKi!K=!_9ne zbFHF6o9EHe9f*(LaaS~)SzDS!BA$lP{rIv5N zdpcZL8_jfIp1=JbrQi#DZ!6*$^)eoDfRv@pTGxE=_T&cY-@l0Eg=Y#hjmDdrG=@KY z$q}S9vIylb;ij|#hVwgOPDDLheRV2lwR#H}mrv(}v z{`fHb@SM1ZNdj)*_R{1A6Fnv`Yrt1}2)4};S@?{a+X#rF)ksBQ^{eO@A(;~3pab^*WQd4&^=UQp^_vIfTvBN(UO z6EtWV8=94PGQ<^!5N%lc&@MDErX>rpgY5&UL>Dc2Fn8K>b3A%)vk0c$2%>&M_!oZU zAI-@F;V{vRC&HihJEd0>%-vd1h!TxO` zI>Pk_IlYGdRH5uLXLP#s;*^h7Zf1j(XJ^ztBWCH)sQM1G6 zZ5p&~*i{?qwo;kAv}J~|IB*)+aJXVGf+83#3E?0I{eM)z(PIf%(uYVZdJ;@1q;6o` zgxL1i5Zv7WbX2In-^ttN6SsJ_Sik4qvgFFR1Tu&+YMjcYMYGv%*?!=sGE8h5jD&Pk>NwyRR5Elwm^)DQ4|fL~PZE{|x< zNG8vj!Ou#5zDT_SCW^^(&dv{FtNGXldL^BNnO%Fgi-q2o`o@-n@CgZ91+oh6#g^kZ zU`Lhe`xC5nI^`1@F5jv|(!Vp9>Pr5&XGO|ZvCywWpt%GVKc8E(l~VNkKj>3;?aHm= z;4h;nV5Q-&PU}!=s)Mb!Y@eeJ*F$Q#-v7~eQZp#g^wm*9vNKRk%~e3d>^F)R%0~rN zj>yVcGoB@gbTfX~WXz&PK_Hl3PCi}RE4<&;F&s4v{7J!34(nXkEhQ^hRcd?_RA z3xJyyY6DSzQ~HS}q)_lMy)pZ}d39oRv7^mkc6}QRfJ-vHNcmyX4inS`axgYQOn5{o zg{=zK0zNp#AI77fzrc8SB+Tf$DrP<~%7qkJVrb*X`Iggg9YwrhUl@(qe>%`0kp>R zPuSeDf__p(A6DFynHHsc+7jEbgSt8s@ItTacjW#LV{aK0R~NL41_>D~3>I7lcef;1 z26qc?L4&(%fWci80>NE^Td?3R!JXjl?q`$tJLjIdRk!N?q5_q@*Iuhv_tV`^cZ-E& z&&Tg&Wh5Jf*~#uUiB8nCnaGmUg37^h*a;07G#NLDrYy_;ua;Wq(YQIe{l?E%%4=4H zk~Be575w%8qsyK(3k5ty-A-1km%W8CI&G74Cn1sV_l(Q$g1D97EQKoc{o5hzKvbO9ri=XbFVA5cIf{4bO23X%1!}IFFR#vMNQ{C`k#ZQ6KQZ0SvqSD`g zN|G3wrs6GC5&rIud$M~p9`!Pc0t048g3XauR(K8hzVZsgEwv?UYS=5U`#MjA(dnru zd2a7kxfdGJGRWnfj~Xrd6;cJpBa22L+G@)ysCh(7lj#4?%)FEOG^r`nS8G>C79_eYfNo(Ku^F3z zy*NGSq%I${4WiV>>*(!e+o4GR_tJ;FGa{|6r z0>BI3J>MIW)Q_ZC*w={gSEP5JPP+9m32=YxE{Y=s(E-KOQzK+w|IZtL?#^@`1&oAG zZ%^dK?!NT--&-p@0IGIvLCt5`Q0$;-{r~f-7S`oYo2UAcAf*csJ6kU>hMp3qoJ(S_ zyJob-9YJF>{`A(Z9Gn$hHEi_nke*dhGa}!IUzfuq+QWd_y#(Nq=-pO~#Ip{LdKUE_ znuOpEM8VkKcQI^e>CGq=6%|xo_)rk}s=lZ(mxqg)pO5a-RVee`ur(8Gdsyf6Kh*(9 z!1g;o^~{Qbs#Ti%tt!{-pCjDS(SY(B0`ORf=`gA`fE>O6s0bn&O`9G(jF`nT`+MvM0+bULHTJFg$!n*ca;GgV^ne}-#AL5`?jn9%MW*NF2J}X0eab+5IR0IA-PA=Qo_l8WX^hw|335+ z{4Df$+brsuR)XkVe_&;){XwY+!`_s@AjO@goK-vSd`|X4LbCI{+VUc7qsq zvl6Sc&2NTexV9F%%ulApT@tt|hH4N#PgbB+!R4dLp;^`?c#v*D3it1%T9NDZd;?Sz zmW)0x_ur!XON=}p?dXf=r~RfXM}B@0oC)wRy79k`&&w0u1-4u2M`W$eZ2g|=zK{!& zz5PjCUmi{O_3;U%A^rLM?aMXAV!|Hf>7)kZ_y42O6NRnXB{XE;M}JFCDc1Yz`tPwc zF^s^o`f&Fb=%3d@?hL#pXE>A0f@tbo<9Pb z&27{RAFgB^nN0_{0);WYdTy)datHzNF8JKGl5dQ_Y3qhmAh9osGm?)IFf4*SGV|0y}=RlBY z15v4g+}Fq^38RDfeWpb4n_U)10^q^3lENY63h@3)?PE+to`3H19y+oVE6Pp6o_x$( zHmnS6u=(t^m|x$Yy}4^G{XU;(_w?Upk<>YvfROV<-vB*uBP~?N8a`Bkvm90W z5e(sHrO6>H2%Dv|e!gS^0i|-7OeuoIH?ne-H6!~rsev*3G^u9?#n{ky=xHC)WDU#- zjbfM4O6fIkCsv}&r^XY)?-85x24SYNnUSO~4VfiGd$Q!EEQkD)JEj)g2tw{@!~r*h z%~f}M*lz4?CzCBwK-2na@AI|+%QHiKw)uvHGa)0?7*>FvF}Ylob{M!C-+#r>k;qcsTwH8?C=nE0!&4@Bv&J#8kq>McDv$ zIGodE(%O>l8_OQw@AAhgaDCKQi69*19`g8Zr({K(?tLqbEo(r;6(M|jQD(4N$Q>LA zCV2A(I>Ca$A{#9Hw21LnvFm%gA+B6L&3~4W%`f%YC6yoBY^jWVVN3j(NGsAtLp^@| z)MIMfq^Fx5E|U8A)*5VDwh$zYB|?L~6Pb6h&owo5j%Ea*5`fOR3fzbve?bmE-fyiI zLohO;yytTv*NT-0;UNYtOb63ORAk?@@1aI)gPtZR_QViE<+vAQiO_TS%5c0({o2UP zxx4K@-AM>sWiWaQm}hGoWOmwZe?Bk4w-!%!u=iQ6awLRS;*aHu0H1YQ)NTWD<@D9M zM}$*kS3fhh5oy?{6IUx zZqSBwiWI7>J>A<2ev?3EJiV()_bFN5nnaEXu<+QQ#W<~$^`5>PDBY@+gDH$Up^~?j zftCeW<*7b!tXsVPfuVl?k5O!p2b7OaIy?_KBgpJ07z=rNrK+OrgpHW!8}~|zF|=0> z3%a@S3w+w^O;1A#_a>H3g%eMd0DvN^b;0xh$qDdlD3`6g;55ZX)(^}7=?Y& z^xuaR;3bEOaQ|`{X@psR*L!v~MidWc^P3(WN5Td#2B9Glc&LzMZQffw&*LQtGu?_8 zx6|}_M|5uY|E@>mvuP8TM-_r{MsRG#fuil1G_qelF{KWIFzRk*D%%Jc z#|h^tEh|KqbZT79=$y#|fcyY!1^TSpzM%MFkO{4B6xY$ILOoYM0+)*;@ZE`c1?K(c zqUSkLBUp*Og1*}%Z3IW;)&*Mjnps6>JP00;X@ew(9`UZ*ksnJ!rRr~>#tRyto9c_~ zXxD~Cl;3|m{bo2b!USKZ_xv-QqbSeXwTr7Fb_cUJ1dMgyh;rr=E!wF3sA0d3$z3El=w$!o1z7@ZasL@oH-NX)6V?Zp(4aK^~=*sr`z~##zT2;Z3!ly7#q9)ZNOp0O!P$Xg;Y6O6L`m`#(q=5H{rR-_jMtI^LtS-E z#|&okpMnIt!jR5TRZIIuH#r>l)zIaFw%TQdcGw#!gBsveshy6L(etAg{LPx7rE1^! zN;sv%X$-w8ELw<|EPRp6DBJV0zHM^B4V5nC{wZUlM%!9gtk?@wWRG z?zoYjBftfhlY%$wpd?STW$XWpEON8u?Q;He2Bi1P93$$<`z11&AeDCSnwtq|Dt6(a z&{Sz1C=mX>BTO`KM`Zf!9MDu~-P^idNdj;K0C9B8B8cE`xr+i4!WM6jO{5OjGOsn$j7Vo)B zD9H?N?LNoR2}8vZ@PiS&dulr~KbT)whBy-C|K33v_5SN>rn*Gj!mA+duN26VT^`h= z35%I9I%Ln#QeoOvMfk9V@u^D+@_IH7%n{q})EvAA3!K3-gBsmMb-(d{u)JD@8ow=Z zCh+?@yi@(;KbP)|c0uBirO*)v^0?;paX9lwzexYiiw-y_1O5sC3i$+Ih&5Atg#UPb zSLvinrAWq|@m^Lg0*IC&<5h}-LOT!WtjCn?i7t#~@Hw`;_o3O*jG$ajg`#dC=LDY|k4u~K&#B#nq-2|Z&erG%B~ z<72z^k+QdMMFA{xJh7uI2IvFVen1m1H4I1%5S9E2bQWO9dc>3Rt=lqwT6oslfXXtC z08k^rT4xPcs<0_6X5~!kHytBC4%T|gQ(}3SGrn|h5B`xOyzZ8V;c;sED0?d}Ps5UE zRc3%PVp4(tNh81BP!TFN@YjVPym*7oNB_uM-U{yoLC^o{sqH< z&k6im&^yYkS=X6&6MDa-Hh#_d9N3!7vbxST=iA=LvZK=*nL%)@IYOf zxZ`-oakRnWLt#KZ-UvkoOn@*kvH7cO%5^(bk^)80;CZKVr2e)$vzE9Dz`o7!Y2cN2 zZg$0Q?>t#~-3!TCj3@mz0AwUC)m6eX*G53etup|8)p4bCiKuBCVwdUbaAzJU90T0Y`izS~a z-F3BL-TM7i@z~o*>Z|{WYNrFPY>Wb8Po4Q2zX(*w)!%B9cSWcqu*cuajH0hjTgoe# zQO-IcVjc6WIMxy)ZHOqg)VmL!C+}94Mjx@2@34`@*v zH`vfDiDjhL_++y}QjBs%jVRJ$%Bzl9kRfM^=gW)u)k_MYwuUvG@e}j;JR)QUk7l2B z;w@A(eg<8P9{S3S78ve^ZUU}`=m71rh4*tpzE(<$F=?&Ol)qp-v5vmnnHm{F(WHcR z#z;s~>d-)psoY^XKE2rb)NUO^nP1~1dPPq`aN-^%qU^aNR*3q+GGzuKg!Sl@chl0% zn&{0~Cr9#3DfLK+!BzoAtg3h3E>B-be)5hw_L$5hO2)XuL)mMcL~EdX{MQ z2mZ-le&ucdkc|KYKsITz)ju&f*8q%VKHl;xN3%CDCc^=LVf^*xSV!o|YMI!#KSt5w zV#OIs?7k61&2s$@N}1nDc0U#(dd~Be=zD+b#K0o#j|W4S@P_(4jWgQEq5jOvAEjtu z_j|QCa>nPB==|8&e(o;>c2Yc2>CmY}ayaL?R8+vPSKVaU-jcD6dRwP>V0HX(ECJF~Abl33JEL9xh?j@MAq2^L^E=hC=uUd88R^_#9CT=Y?GU*o77cnJ^6%2`W7cQykGEI z2+?C@;n10ndG>X}4&Q5i?|Cw6l-?E{#KpJ%<0+dfKcnlL;R&Pgqmvei;?*V44&QUP zx2TY?okph{6B`Tk`XG6(3@&MxAIy;8N)z+z5XKb`^1u|3Q?W|Eh$}z(|>84>L`CHyuXn3;;yGf zDLe~O{@5R<-az{&my59Y!``G9`r}|7s4DL*xm#u1EU_;O z)Tp+CFQQXdDk?3^2f*|0pjnXa%g=hSkyIqQ7==ssJOZ@}+<*6lCWa7jGL$JhHnhaZ zDT#VG;67-ALfihl)kqk~ zqTHz5W^NMhZ;j(;ryy>jt;qPEnmmYVJdGz{Zv~Z0mXO5OOhbLDV!{74gld$Ul-(Q& zMRpHRrLYABcE6L&tXSZrg6Xv1U&tsU+DZQxQCn_=S$TMO%O=~BG--2tY<9$l>td&hcgf})=MY0w`3pny3t?jH#sxhCUW z@apbvm}Zr!!CbXDzJN!yGV5F4@39xDXKMD*z=mxzz(FD+^vmCwyt;Lr_{dx^j(e_D0*-7V!?TsbK~AN%*(ocBwt9ollo34iO)= zs`NLNPjh^oM2WEEBinkMC9H>_IyrTa(reAHx4lAJ(&8Om-X3J>jIh*q&eABzu77{S z0d1$^3+BL3zQJKi5vb%jr`5K3bRgcBbV(3F78PtkXYEUr;b1DX`AY~IeB#JfR0~0dI98y#d|RKv zceeO$coD;m^H@l1I&u_Hq=w1Y;K*6K9F@61=tx%=6e>-}Z2pek;4bKfCB#|)giY7i zXhob4$N8R*c|SBO;jj0`LUY1sGn|fO4jthcT_8B_TQF6P593S8m1=kk=gzPrdNsE} zZ8t0Z0NuD?Nk%N_I|K(c9`r`T4^MNTvv*OU!|HvVS#ePE!QZ5(O; zEN{>9m(c*<=bTBO`$g~VyEE09x94p$QL3NtR|E9%{^+b{KdL*+<47cYz1~8BtQL*~ zO}PF7;{n*Xxf7vQ`}1`Q4dFU(M|wSmaXac&fcS({-0$Wd!Eepb%g&$ppK4eKvK&*o z%@ohZc?_%ImGc6@H!5o};iPfs$RqX>E0#^!mFszJN;dug|rYAPkex1m_}33Rf? z$WK~{O}Rrko4Z%XMzR`V>oFPM8G!+(=XT9Rn!7jY|A+Hth9}iu7YOc!x$??WlxUD7 zFkfm(gD|kfh~KNm{LFn6fxysuM6onVa>BUuPz=dmhjzGS~WMMGt$Xyh##;& zjM%R2v`!n?*hGbdfI9me0}DRp@w%U`$HrywnvDu$3GW}6yd8;~9J z2jI+I4|{_>t6OiQIwrLJ=u?cf$`=R9Uub3YlOgq0=o{Km9lJ7RfATFJ_u1bOX_-K% zh%$DC?Z`29d}LeXAQ@4GR<^j)4qng2Q(L}$CHDz>%$HaJSjHu1BtJ*FxTJt3qPtN+5atMPo1IwvH38?MMJKOR-uP-H0*48DdOJs?#;E<*khbXWT z{iC(k1I=H340Tz@v$z@wAx5q}(5rag^1~c%lc7Z-xj$z6VY`wLQ{9L+a{y zYprJef#FF|n15(Qgs30<%j0Q%-w!H0S{#*WxFOex&$}3d3k)uA4d_W8xV{^+7VYWG z5)38h6h9vb%Nw`u5F!VEwS5f#YP)2x?<$aUoBRe!XWJv*Kd7ok+{U^K&R zi&pHl+N{I$BCtMnTFj$TK<_YiCBqQa`?bmwYT;96u=?6XG*w#8YU0VE!d$tYJ#m*s zwdjYyaH{r`V=QDg6EVJ@dTtG#h%QPj81@O~<9OWKUV;4=`!Nb$I%5b}60L`?W+zb! z_GU;XwQHfXm0dfAbjSqj)`rEBAuL*w2Qkr+ak(Q|#yj20++qZ?6$|Vt$P=znBq2{t zWbHFE8n^p$rRvz&*z`Za1iqsRz8>WIwo$7r0o-JfZ?C~5w>0$8t*_=J* zZDfJJWB7D?wT(cNMq?^ems^7I7WX9d5yiXvE!7FeRr?>QJw?-73yoz0)@P(W@)3FG z5^~J*^M?n@JkJj$w!d_jWj+y8^5cqHr1Ekl8+SOH2Ol63=i6$Cw4Y1G{74tUK6d=_ z@t+gHPsDE&ggbuauGeG#^)(S1R$lrVtSlwtlvgg{93OnH%xJ8r&r)>~t)$>JuK(td zYf{l386#sA6YCCMou{A<@zft>vb`>dCQEI}=rF04kt*?p|jV_$=xFdO&h%3!Oax!vk zWZ>DP;f%`?C9W}cCV@jR_sX{JyyrWrQ}gbX-7+s(*W)70G%7qto7i-}du04qpMd02 zyW`o228TBSX-Ki$-KUF^hV6057Miu?)o64fgAjfkym#{5E4kx;G7_46Z_BQAqxH-C z?Td7pc3>tFZ;U>jIVkHFJ^{Ctb>EatuDKY^<-L zOCktDMaafo_r?A6RX3I)n8X&O5}$+q0J7jDMY6b)S#q&BqJ%C}ez&TnN#XR!nYERr zf>L08GP4mklB`tCwI-C2l%$%Z|K^Z-8di82n|W!%^LrFu&9v`Bkw)CgsKzStFOv5z zpB8^PgYA2}JTYB$I*DAJoI3O5Qa%z5XS%LRX~f2hQhmWKTh-@9>ASqydmD`Od+|A$ z=!gi=742_Nt%0;4R^otnCP$0o^x|_3#;|8! z_=->y2C__3r905!fl&vPj0+V+RPL4R`LjNh$poYN`{%m{--Vbo1d$5Hm(K_*z=%vV zD{-P2z*`kVo(-H1g0b5OqdsTGrEoaD(U3pgi;0;`}y6VBj|ZR(%9#m>r}6&$L2XlN=nKcnm7<9b&Vy( zx|$kUIxUQfe7j9dM@N8^wMgDiq%n_9mxj32`9Q*8gl=H)w>=x$tD@<=(cG%KVmFuG zCIJfbNA;|XbbhX+UUbeJA%=NXF-QtlIYY*|E_=VsrfSYUfoQ9Uzd2AcmPyTmZyuR^=)OmrlaY`7NVq?~C z$~9H`fA<2ARpgXr{HW7<&jEq)OB4EFeu<}STy0X2L8FN-8JjxSP269SbdfRG^w~y? z4{5x1+~w!D<;Nc1x)5zCc*XZE!XO}%15V5|n|}+avo>Dk6Fd|B0mcG@$=$v87aP_4 zBaKW=X##_>vx50xpA(MLc7cIXKfToDfauyet6p1E0=n$l(aK&s`Y7ORHYhBSMLzj& z?KPJx+^^9Sd%njFcFafRTO^C?2|1>@F_=8s)_O~PM{^Op=god9IJ3)+wu!O0!e@Yh zbl4PbqC2qSCqq{K9qd0Tk(42S!l&ax5LuXV*w>Y0R)1O5HDNS)96#?-O-B+;89rz5 zw%OI!4O1?hXtmGJ-i*hI$YXFh^3QABlTSy`2%Q^=uo%5|_>BOd5S`GA1vK$^LsR=l zB)mp?^YeeiKAH>@L?>;K5{S&tH|B-In`=7Xq9+j?fe%n$sdc$~wLFvY?oArn-$Xrq zq1!!o=loFlR6a;3neXEbtu^_Eh#TL-(l{iI6!%DgpMSJKDVxa;7YmC5nP8(o*Q>~C z@Edz5G#GjeE;B@sq%y@L;$Bs~$5wzp^%#z$Ul9kKhh9T?p`UZF%YsKfYW?7^=KVfV zqe6dow(op>+nVGc01Jw(Cn5F2d}z!tuApGJpkdO9-ts9k0J3m3Me3sAx7>-QrX4Q%+!Wj}+g*S^ME z*U`aZv&!(@o@%j=tiEU3nEdS@Hlkbd2m`Wj(p}dSgLcrREp;YF`bgL~BqH)!TIQB!M zql$h0IpZVPP|^2&P@}$S=W}6{s;Snb5=t%oSACB3PMz{t%biC{o;qgJNWM}xCc9G3 z6kaSVr;j9jtbz9Ni$R1WqmIXXGpd-a!4$Mer5|`vcEl|ex2#fTUCplr)4|I4`fj>M~>PabI(Wiy@L=&wWU@aNG4mCTaJr|oU1VQQo z$!Ybq;i?5yP;sZvf-SBCU=hmUKj5&89#Kdl3>d|;VbxCs_!9brJ;v_WJh!?&_K0V# z>A^O`cxKsS>O5~ug4$WSGA~<6!ooGLionpHIt~6nf8~zFMG}hp#NNp4YoDTp6pbe+ zXrbAYw?F3dM;qcDWJoBF*mB!+S4t5kbisab1i=~WDy(Bg8rln_KZtLmVBlZ0#x1n3 zBjZamX6kW{ z&mob1*^nvwfT`iKLxQBHdLQ|OB7^b~wsk+wC(th~z3t<@dc^l&q}{Ls45#T3ZaQdz zizin0@l}tA4g|kM_Q6FoB|Y2QbS-#_jYjwhzco^D#(06ti*uS_0VEWc=i9QKX4=o03J(bDyY0;Sw0 z2TW2SPrv)k8^xvq?$fdOk$wqN0cdCU&!b1o>RDfeC+#LOXNIld5xU;noH}l2!8k z_0cuQ{;aefMfj7b_8r)q@}7lShU{G>YDuKgaeDs#?TflSI)ZX-Ecbnh5{daIRf(n1 z{Q7t4SUuVdov0{n>k~MK`E;>OYqDH6nqT=v+K=xeP@~4MAIwAyTJl2K^r=oqO)vf! zLqoEx-USZCWd+bH=n!s&MSgNtWznhs^4ydj=xIF`fRdS+smy)ymZe?eaJK4|_wBio z>mKdb(ELLoF=5QruwF(Q#MpIzDN#M_kGt&G@zz)?g|K|ltl(I$_?8l2xDMBTV=G?i z#Vx)>%F|1nl*f6NY_&q;Wqm`b5U;iH>8rApu`q5od32||+Fj+|F^zOe90vK=a2hW9 zSCL>rtS(e)3`P@eEB!bLDWQ)_C%=_7zV35ZFem?}Yb*j)&`cS>TF7P3f5RX@;}9cC zu!2h&)!*;_1;;XhU$vjhrf|K68I^k4YL>pblwlw|J~gw1`-GEoWi{@we}DE6dAa>4 zx$c}T3cRwqX3};US4T-Yf!UsZ4E_m3lurlV-ag*k*?FCH;7`z-|8QwF2SL)PU^Dp; z7+vsqoV%BBU%03gUnW1LA1_aq)_fs{HXYMc0_l&@Tu0B3Ojan#A(eQl-(8>teev=i zQT=)<&Tt$o2}nJ7xy)g$hY&Xv7673Z(`8i2KLG(UJrIN zQmTIm`yP;49XN~03znMM<7;R<9rJ+^+PtcVOk3@@9)>ptU(&h5LWm)ib|P6V zw<5Kfp24RvbbzYg4P5KD9(5|NR!;s{2)(D1x zlH|;r`xjekSMg9x`a@eDb6PE56IyDzXtz~A%52j8YYCB@ALv6^>r8%xmYU~iPR9qxs6qN8Z|^Zw5|Vu3S$l_E#c;iXIL>( zB~iJjQaLBwY65L(Pjy$N+)vznZca^h%x?+`=ynVOZZGyk+}teMA!&c{mdWBgQx}de z#rkRm1Uottpdr#eXzl|jN4dkppA}bc!F;v{A%$r;yRz#eGxBNJ=n~^vbU!VE`9(i& zg9EQgG=XO`Oll{>1@<|=rA z2c=ZTvKAn1#&bCi1Jg_N%GZ19i;cIVoGlxQtX10Kk&-S$0`a7>z^f7~)l{WWRZy%J zvX39?X94@PQ|_O$3KXwMR9*}IetJytcfo%AU9Z|`pe~uvJ@!PGk4>E%9E(mk87-o} zQUeX03of9t!1Koupi1s;gGv(Go$o6j;PTr0iGn%TjGumtbH3)+`A2f%!5)=|tkX1j zON^^bljYhro`XMeL`R>SQek-8d9w$=m*qX=(l(o3|JnrC-~#BG}D?Q|N(xqS8bL7~W2)Gyzs{WbiZ((HLy-2G~0DoWCEQ zS(x*Pf^wi(%?Ahrj?%f6Yzur+oLS70hGB<@`P~%a%_*(tcfn>NPP+#YrdcehJ3G^) zbE^7ZM228oSzOt{UTg}Jr*@tb>uIWVYC1!WN=mp29G3_V&o-)7&Hf4+Q3{4Io*OhX zD~q`#Zt;g;iB*v%I9aBNi`bHZ5w{d2i0GfdV-(DrW>LXd^)fvv4Em6vHH25MGbEx% zFo|#kj34&4g1h5TAWmbiMiF}xX_uMCO#?JKq@l!C`do~-2sJ}&f%JlNe^LVf{pyvo>mwutsGH|JgW(S3AQo9YH;#Mz}w~LpxbRDR>iXV^S~WV%pCMH0 zCyrY{uFC9%-v}m@>$exr2XweD@$&C7J8uMh?(u;+FMc37dcm#M`%{^P7H@422f@V@ z$MW_qRLCit9DYR0!`$C+>EC+vh=~pw^s`@}KTCT0$;1lerHOsasHF$89}typ2$l3| z5#C|!hG9+JU;@oLK)12QYk%ILVfSs)jbBW#qN z$ru<;Ag?TYzXC_1}QM=Ab9u*N39{|KvNZga;e1Sf|D&*`x`v`^loejIGGM#tdnzVwUqX zr=wCUnw|9)4qB>=9tBnRAUXTyCgCfmR8LaJ1(po`>^L{4{m)J$HE=5YUTlT2y+5)a z0V0b4o;SmfwG(0*BzLw(5+SETg|lHJKp=#9kM9V^idw%krdYvZ^$t|$EUHi zXI9`KI&WS$@{~e{qa7OhP0C8C3jk7=)q?P0iF?A&7!Y zEqg8zOhgn8Or%)boh;^a-o>$5sE0e@D(t<2K*1pzfBS=bag}o7A7`Srbi<7X$1U=9RTPX7vG6zE(%7>uBY@B(RNo(^ zSJ)SVG}YilFa~>WY;QkU`=GiP6gQH5H8`xVWgz|V0mtQFL@xY9k@QlI&@M7<`@IVn zL!$DfMTYTO3Vr%eCM&)1f`TL(oWyFNWYW(u+ptPSESw~R>8YG!+o3#_>)NTxW9!fu zl?B?LIcSYZ76p6(3ANWx>_o$s(H#cpp_Hmjn5RdWzh2m=##+II+x{Yrr#UrkB z9)&jY2B~E5LPrwMyd@vRTEFbgq<#DKl-6`7XxG?GSlSS^pQNGB1ptag!E z(N1R6(Fv#VZHDwOn~{*3+};=X36cB8_Zpm0ok>Pg*(7xG<= zMD0h6od$1YY_{ag?T%zE3$_!a-<#>T=G>wrGemZ_y{xLxot3qt?K z{)2HI;YectW8LtoL7RtJDUSNP<0%_79g;7REZ9odvr6Y$ZrI^dBO-Det<&+E#Q-*J zq-w^kXe`rhr0F7fx;tt;84-Gtlu{Qvoh&bB^8!kR)zs9=tE*x0@z}&9B$M3nYbFH1 zh*cY0*WPEYckgpq@H+8c>MlJcQCo6_*m{9fS~?Bq)n|#t5-^mi5r~$ie*$$>ua3s^ z_K2TumeaCl(1*eSeYH@q@v!5+G!?@1u>Cs^+~rO@PcCqVizwd9YaItQ1XiM@|gCHP`jU5 zB|w5|cW9)23=I{sb54MH|K`t$csYX(`421N9WJU?ViHo>1Et;%}a_$PEY`Xb| zXZJ5Z713b1@Gp`xD`>P~4sglJmcIk)J*>UCDpAQu38Nk^C1>o)ZzS zgT<^b7(K=#Q=mj+2C`A`OMF+>i*2%Kc}r~@ux$C#FT#pg$RWiql=OnmerLKoODS+c zD69pC_D!wJw}~otX{{H6yu+V<^icjTfz#o+5?3nvdf#&HC))0OZMAR7kJf{vCf(}s zi<_dtwcTnzi?%ecrZUk%Sh}wB)ynEoT0IyvLB@ydIBy!b9ru$xT_19aNgVt>d+7Wo zp(G|lrbmW}pdKEd2b=SINJJR&^}IQX8va#V1@gs)vvNxgEi9)A;rQYrkzeB3o#XsG z{P*$UE>vMI6t)X&Ymfg+)QQFK|2IzwIPjOx2M&L3b;wag)HZ7+U(1zl0zk4UZ;&sX z{t=$r&6e4{vjrQCXO!B|%ed}lsjr5|o6C(LuT#8BYX^xs>#s_{#3AI294)UTXGB5Q z2UOe7EA$i&>xkpxzS-iDc9;9JC7g5O)c6`omlCeO#qiJ^qR4Gy{?IX}hnuBi zZ?j)9nU?AKiw8~+-2<7sUrC3HjkNAj!~z~T`3mXXE#7ydKXuA*;eJ(?Xb@76cVit#oakAsAaJFr+H?=DrBY|{gJmgyw}|vQXu;| ze!|J2IJ?f)=5PVhmU36K;H)c*ocb~eG0yAdcYe%`{PvMe?41TGZLgOxb|b}ezx)qF zU(>xNN`tcn-iKhzA7wl*tBL~WV+5-jePUN@^Q*0cS9bSlt1U?QLe;cIcvwYWbU=kd zXxb$4p5J1&IUJE{CAhoNR5=vBZmYXiSpOi3y6eLVn&;jNqYu#jtp&M+2?p-v>CVtz znD{Rt%&%@NPb+zEYgA(JeYjly$q(Wr#_9V@p^}h09eh-SJyX4it%vhNp3Qc5l!TIG zuY+NeJ}F}U3##&4cVLuaV1sU0+^ZoSw~3qAHX;YqHkD*Y5A(c`W$|r5gW4 z0fMj_=pXNkgaY76zLv`q#Gr1DT#LbTwf{8!`u@#FWFsUlC~l(#-CtLoH-s`)6Lr>p zN!*NC)urgtIMT{u6H8YMB;LL)VJJiI49(_IerwzAFZo39JXm&gO=2p!mKpYlyYe&5 z;~0?;X6WVGp^@!IY~r9;WB^ArNQoxJ6rO|bOPgA4{pBWru$^M}t&Wc%8tX(*v8AuZ zKwwNqO-sj5zezA_<*)Hr;moD9)CZEPg(k(!0k)DIDT=;%OS{znuOb146>2qfEx-8k zuAH4_rFV1T{4D&C^{-2D-bl*$KRc*?)l-MDZ~(nVn0^S#>ZL-XHG347Gih(Rm_jpx zeb;C!|Ky>%_d2nLy~lg>AFoBFTdbwLW(n&K% zHG1NqM#$mt={f<$^Y*FdN$!S{YFhJ#*BO4>R;&)HFZzzoT3tZeHX25!y>CJ@3%r1$ z9my+2ra9TZ=ApR2Ze>h?b)tuU6O+e|n3efYOSM9&NK;AeE?up^g5#sH70U_*+#M7v z&yy3F78N*XyP{EQ-mUf^TT8M@{cea<`RkO6Ra^gB1lfJa4TXKu`6n&X$PMzrntaCe z7%F~?8S=XUqYn}bLR5+Ls1RD!CWQn6Wd`G>=>I6|ePREHvYvZ&I;s&#$E8Mw5{Y{v znuNa?Iz#Wd8A60$dytK|i?Ob=fuQUG6dEBTrThkBtChxoQ}{;h;J7#{$ibj21;`0Z<;u!1`a6_9!l3Ww-lfX(C04c>Uu` zMrH_}3o3G=YpU%Zs=yT5g}|RjY^{bicq^*(sVC@S^?i|sDF1U&2>^<2@N|bR2KyYhA{&y=)S<10Bq9^kyET#REYWcpMtDo_|X5Ge;v34Els1E;|elx zfUG~)8ltN+!bIciB%=d)ceJE!G1=d}fg-*X{Ez7;0Wkd*Q&g-0CBDxDsB}%kTNVBP zl=MX(G{omV?XmoceK)c4_oz6}E&C@@>VL6U1;F!)3&$oqY9x3}qkIvbTwXMe&|R%b za^{2wB`ZTh`|*j>i~;rr%Xx~_mJH(qva~6)=c$lDcE`zZ7kq54?P~Z`&3JLD-`)yn z!Nd;~&P`R~C6yVuUPsjgT^9<#B$7+ncWyRNVKk2yM2IvsPmDl@$n+6qF>Nev%NgV> z86Z%v3fe5bU@7(}-b~MkyxmD?UHic=bw%mAao7I$odL_`ONLX|-XQxPXHwH2T&lRx z?=g0N;=jd|bHHGUT5DTIAFY$OF(1|ut&csW-Pv#7T=hgTe?H#$Yhb1)I+eymHKFR1 z&QB(?KN*UhrHAkSc`%V*7->Y`pEP0ne%qg5X1~;eh=Ts@Z5gj(jW+K`uZzZnW#hrD z4Kw|I;a#T3Nlm98dA)z&`Z1x_StPH2TCfK0z%OAJ2i7y6(8b%3QR}%G4&ZX4kWA71 zVcbAEN%kE3UsQc{Kvd25w;)JKH%NDjbR*K;C8d-!(hUnpcXxMpcQ;5%Bhu0-{oVzA ze&6>G*JXF_oinG;=giDSqpOz|rWiQQu8{$v8>K?X*V=o4AE&_Aa=*C)Xa7M-SE4^3O1mxE@b0%UCsTm@^;5Y2}74T6lPgguLU~} zmMiYKk)TTuQQa$fAPGI87vFcY?AW7z%g{Copm)Tsli9D*^L~Fg!#{2`cleFpTjEZ`8oHu zBm&1}-VT}&YAzXZM2zZR z-@TYprKkEzE-I;4wwFsn)JFQXQNzM~eu+qxyyBb`)LW%VH-f)}@hw}o!+sMf<#kO@ z)&&(O^X&o7DQ~9l zevBX=X)6B^|0-1=g9DQL+d6$r9KSznb0P*G+EjnE2X#e>tVC}Lo6I7S@vwhr%I?k1qbF;<5fxMufH}7yv^Q*d@|Q;UoIoUZ z$>W+KMIhl4;dZ<^alr(6G!w2JPB%5`zqtB)+6k8=kZHf^EnP?yf~DcrV;PZ-E^~j) zFDH?8M523O_33=orlpNLo^gQ$`INmhv{+f<4n|d1&%J>v6+y@YYHF808d;3FdYv zck`;i8LsKu*I$m^$&73S;4%=y@2ECgz45+=x8MJc@V89@>*dE9Ml%vRafj6(Xuu=B z%9_ay#4*L!aHgtZXB%}gT1aaW<$jhEI&!z-oDRw>%bg=^G$dxez0%7Zinwn-N8=^~Dv3|BO|Wr#9)hp(+cNjjvRa8~c;lsE1L>WS>Pqo{>ZioGK@y>N zq23>vLape$_9@~}sDhhEbAx0;;#|w#mBujVfALBo9@Wm{Ex)?CP#}Nzr-Wk+t2#Bo z*8{GZ%e)CaX=D&aZ~3DnjH28{f!tgiO-j6SR}& zeT^uiSAb$vz{~yaHR^X|OVd|$Ea-)Kd*K^b6vX5)?{e_*@)gq$Wdq*ZwU9{5U=Z{U z998DHVi*p!k`sF`giy8P*-Ek0;y(`6PVvWIi3mxXT8Q}ik*rKWs#3P$aC`~n)o9Y9 z&+O~H_)^uIWUkdhI9F{C1cjo3Z?DHl4u&ugKaz z;FO@%{mR|uRJRKi?#!xUdIQ&HmirFq7fbfu3eS`E>R1rPj~$2@tkB}32*ogL+Xi~^ z`UiILP^cAPCK?kv?1y7G>ZU8&C!MKd(Ed9rWH__awScA{K~cF^q7p->Sx$@( z`aMd%e18y_--n~z+OVOm_X;*;9(%1)kHl433+mtjYhZF2Q1-3`pl;JpL*)}nPu>@(*;fEUZQ!U-aEl`rE$OPMb1#(PQ$2^dZQc;6%C6feX3~}9UpkA zNA8;nlBj;6{z|rI@fN;QR4bcoESOhy|5MfmY7}tnL+%3qLum19pDK3%PeC1Bl9NOd z2#e~Ti}c?eS{l_*<9Wte>7Bn}+{!umXi2b1G-au2FTc^qrk2gwDD`>>Zzz+BJLoReYZ#Nz0tQfoW?(w<{Fp}Qc#})wusnu zls;XN;3W!nyK{N{L=Yb(Nf)N!*k9%;s?+59@N(YKc=r1<84eC+GdfT)1&PL#f0%X+ zcWj0yd8ZlK{=q@FXSEdNPmNa*iei{=FeSNGM8&#TSJ2mCVvJI1bUyw+jUD6q6*2MM zO%cc|sSD82am9KXg0nhH(S}a;MtG;Sa6*wbRV@TXI(S(tuBT(vl$Zz!N&$let{@1)+`lI;GJCBP#@{`S1|>YX5v^t=`vDx(cdBk;Tb zq+^V7s(~peG#~)m%$0zazSz0T>KBkEc${Bi*}L#^cBX4tRDh`T`=wBD4~0XqB$na-((pXOf0g#9 zSXVILI*mrt#pA2a9!_}Fxqgz3-6QH_nKyxC-xVRxVuWDXGQ_nkIf;5_zdb0n0XuRv9{LPn+h7&0a8as5WhaF&L=_CQUwfZc5@C5!Z>T+?M zP0YdY0(|u2%S+QHbFohahh0Bbq08kE`^22FDRsF?4o_GUB%Qh7Nyi+KA$e~l+y{z& zbe7CpVbm4#w#vlW3a|LBQ{41fLoGS@WiG#r`J#?nNzM(Gf=j>26}cBNp?}d{TTvAo zPPLA;@}}s+dO!)Z&w6*n@xt}v{xXakX75Vh3Ctv%FQLm(t9Wb~HED(-2Bt8of`a)E zludZrAI=JupG7bb*ijI?i8H*2saqX$;(gEic$#S1)JBf7?d($79zw|MVkC7~ zRDX($(8s(_;BI7>Se zoXassuI$c68srWp1Qd#3hd%Ht-VKEuiZ@E8HbL-En5ueT)EQNBsTsI5pyB-ry;}8M zf)q=%2VV+}Teq5!vgw$Zyr;#9X@iQzusRH6zQW+RGVpM4QKOM$vq)n_u!i=<=C`Om zn<^jteTC~zvmauS+ebJhQ5=_nxNz&y8ZtV`(uG#+X3IZ1jb>mvc+V+DRLE>p9=*YW zvw1$!3d1kbK>!IwawwlK2ZeGh3gL}nk1CV*J+g{`}6ayv`DPy&PPp^8MV3>DlzKlT6SLogB-sC6YeBA9qjhywDIgJ8;Znd@BG%(Mi+64%|BL@qOkP^7`6SA)j zSW?h{q%Ob9<2?V!5;dW6D`4egliVtz$xhtEF2<8QZNa~~nwQ6gg#Amn41*9_4+ib?DqgeQ3TiApxKFn)WQdgYfY4k z&n&!epE&KN1^lX?=xz6h7Z6A2;9D}&XuLdh)r3aCk{7qGSVu#{(5Onln=UK!t1T{+ z4V9!-Ys$Ie6Q>n@0g(^T!;{IoBwrzEIHU5q6oGBpI^Q$5=K(XR`U*%D3dOqJs~wso&MBQo zY#Yya!wnAt8iAITRUIyHkXD0U=)z1@A=_m}rCHNZh5jS$_GXLN3kV`?K(kI@Mvk9n z!s#!VDTcK-(BJ@%Kl zE{ao}R8%QdpWSAZDXHuLe?c8pP zIeVU60I@F-Bp|GgZm1GsgT=C!v`R$<{wiAuHc?87b~`&?3HWWaV8Y+9l=s(|^b_W$ z8KXp17kNXMw2H!0)AUM5BgaaLk(;d{cH8Lm-JYQ&wDV{j&_U5e2sH2@^I3lN>i~84 ztF$U=)LQ8DIK64M&sX(yRIDR*o1W}ozD#Bk?T;qwJG_ZP`$oaZ}sV1v%>J?45 z{j*a{RO9$aLcm9OpI!-_L{Y9dqC*nK4f%2s11-UHfxcjJjRV+`DvU>QI_&Khk)kC=zB5O9P$seW`g9KEp96YOwi3XPrD`?*Hmn-5iF zdZNFlXx7YF)+A%UkoY3!ZaFdj*aZX`NMi!;X&0#|LyJa;re}1LqC7hJ32(Y8b;|bEWQxThu>4J_lP2#;FlLmbihy7&gcm2QI2)Fa|8~}AwPpf zq|eCk@}|WlEP%j{RcNQoov9J}IWpDq?6IEQhjl09y?|zZ+g3VDxtZpT_hJ^<0la{C zL=JZ_#p%W>iSTeIb{2nE+Ai}aGRei1t2j%f>*5|}JTW)gCLOcN_6S0w zT9h^cg`D~=@khk_LpQ}+B!a<9jWG^r10xbBDmqFUb;RKVwfLluGj)5iD$Fc``(zmb z7+R`DkBQ0+&SQ(tM}na2uQWx+8$^TX;=bA8-4}LG!-bKF0a(3*m7s6Jk22;Y^m2`BQLw#w=Afw=7nt)t86aMm($@Glu5*y zwpzw@HqqU3t#HY#bp(>DxZcS1Qst*rB1eCtCS{!%A(WV!L6kC8TwS15pgsp;NII1wx6j}r`9dZm(L{b- zvikLGz6Q}r=x-_S?KDw0diMRcyX^eksN z57_@AvPt%&SI;k12)yUZs;p|J(4rujSn2B&bwh95h={k{qTX`wpQ$x z*)@BLh3Zs=hg6Na-T9W7>a`rA1iFu63Mcd35s;M4Me3}=_+qt+>hkaFXT`Vw&lb{{ ztjFg*D%T1Z5bkSzKMT$kdZMcwNP13^#6&W|O!@^y=(dH(pwW0JfEdT1Ei|XKqI2(R zuV5W9@4KWScCKg=j_K8t%jBt2GTi7YxkT~p82i(^X{-oH?f)YO zmh=5uoE%4ic5N2kLR|{FX1>G|J)U4Uah3j}FC574Xi(7c%ae)RRJ9j%4aQ&4VDEaj z;$ODWTDbP?ADzT0Ut+b}UB$m2U$oJhYpGWi5kfE>kG7~rbhT2>I%KQ<5Z0Hw!-gKf z%IbO24+4jzCuXdzLUKrJl+*3SPK+`P|KWhj2AdR$brO)(O?@%J0l!Zdl-6olM@Lg> z60T&yigY;5F}s8a)FifeS1lN%OF84V8FFFWRS%_l7|Q1AjUm50Ak>It-UcUCvRSpD z8d~uZ)eMf|e`3-j`p#{qruJe@O_>$euBVT2&l53>0QbM*vJsGg{U&xmsN4kh>iCo~ zi>FGks6Ff9&&8D0-9(oPcENhch@Ms>z)lfzf(!|8%W@}r-<*ND! zlR%%Hc$|~p%_PTEr8wbw*1o)1H^L!I3R#fU%38bqe4`dJd}JwaTs7y2$VP_j68U5S zM&jpRc6tdCLNj`r#UB1vf6uqqte3dLJKJOU^ko|zkcKpm$3>EOd8NaaEVg`LYCUUY zw_HrM=!qSQoz3pHRIYwiTjhQ@%`rAcR-{l zbuqlvRuXpIw z#~;QG%?hkD-!2Q}q%$5(C_+FmE;;@jkT$s&EWG|){aW?jobBWl=nnR-X?{V#pqg8m z$rOh4Jp}ZrEzjKl1!$)V!Nowj3%ByUJ@xDY7&Qr#=V^koEi<{86Tq5v;Zj)f(*#%_ zWEeCmWl1{UH|4HkHvKG;!o&K$C$kcjW)3NBOe(P^OTg_MeyK~G^ zeSI|F9{0_Edg=lSE5FaiE-T7oS0U9}<>Gsy%(F!6$k5H9!01vpg$li(2G zGX*rZ9p%UU(fibZP_=ta6<*^O;CalSAENN?H@Mgo0(D_W=Iut1gqvm=%c#l=vJ`*5 z_h7bs75LAZow7T$&c-brlLd_Ck8wOXH$%t6lfWs4E*juSa`Ok(0d3ERD!CVEOK(<3 zUrjFq5+ydnE^169WZOraM@y#I2E`)Yq5E2S&?>xJgJwHB zIGq1UBoI<5j$k5Y6Nc4TU0;qqJn4yvz@fRsJ>!I&-GKfbp#+2lPMukK6kSU_jL|l@ zZqK@`rzKadG~Xph7a9pfI4EAk=VooizJx$VNqFlSE;_nU78$QPnLtb^!|D-RKSD@PjINg`3xsT#ph8n1f1kd8~C5=W)!L6VyeKh+LeXZ7Z8{Q5s2m$5CSJuuEl=-sS zTm1vU9I^W->CX;p%>e)|>IER-!R)RA zS0A+G!j1t&`XQM4;ZvG>ar0)}Q}F%E-RtXP?)18nPUP z|767n>6#s-?7aW^0C!4tlk|tw1fLv8=c#HxB^5-8!loz#g9mpKYsZe0D{8rvw)ZB} zAv4HO9;<|gOo=Mg)Plc8qclcoF)n=+ScLz(FBhaOI{^&$f%d<%0H(-T>P&h+U~}ca z)O_-;f*l}02a$SmXn>1|nILHj3%RlEg@Ed5czCm{mC_-$=9j0UkCisy;7HblKue}ovQ z*p;9@fBtpRGkj#S3L;CoyXy|3)*4Wfj1C)*ci^LAdziO1%|e4pp+4SR^*{enaSyy60?e0*ki|#%1%w<5 zCm})hZC_g+1JC8cX3ZC6F{bb~DG&t2Arq5FaZOY5v}y3IStcZu7X9(VDcq(xKx z2k2#wugto9&-i0CL6~PxlA*`P*@zygAw5{$LN7 z)k{eKaM5{^fw5MY{qGpm@`A`nt+K7n_QxUL?`Nco&Opiyg~rn#Fku@s^iJ2^@vep$ zYs(2UsTj+%nj#5nd{dJxb_o9tdMCG*Z3x49QqQM3n78wAb&Sgl_7oogCE-7Riwh44 z>y*|J`RjYr?~})=w0r;kb@(I{gY#A&JLmqmG==>ekmBH>;>L^d0)JYRV`7lp19yS3 z(UJ0f)whw?k+h~dl@%~5Vl@A94X-n3QJ{Jg`xSNZGt@qH6_@X?XA}mki)ww66&O4s zDygVn*G(-ru2i+t8=CDB^wDmxca-m?(VP6o%x3FoAa*6M?P382CPoaDUD;GE z<%g*si2m z4;1iZ+R->^7)t4DS4>2;C(1>~^iZv9I!>3V_Hjv*k$UG@ge}bpcXt@SApcV_>%@*f zT+QeGmtWikYBGm!a{d-!T3{%a*5n54b$H+F*&`QrEh`4n@nzzf3U*LI%^rkT8wcD~ zk}UwVeoVIEXJ&l$kJ1M+>)Y*Ql_u+qzKen_0Ux8BCt?2&xa(C7rKmJ@K4s-uwN;wOh z9XV8lzj*nph;coC%c%;8wKA>74=8V_-(Do^o^MSbRf#^VP5hqT4hqva;#*I)zt5d? zADy!&R6r6TUPFdt4lD#Fb~H!$J-0S^_dym@1t=&5UZ*nuRUfFC!V*f*b2zxBw9IGpj;6qEf%_z1t;nw>NvKtx*m|`8=yX zp@2!O@|wO1dI1@RrQYaJP3UAA6{R#%rL^^BHY4uA>UuupOb0M*3 zpvR|(rBI;g98tIJ0fJo*#Uf~6P1TU8MM3k)pMhUaiQjsRF^;l6(!aBELjU#(Yowb% z%lpYhy^etC{TZt^AO{2|1o|Kr{(B21T}jz4WZ7&c$nave0hLy{}k7JKJri?+5*OZb%yH`VU<$9FD*G$*I0pU)pa!`1|q8WoqbX znqe=&68Gre#DNd+r7LG81wdS%QM@Kz|tNT6?vStIn3t(vCnEUoR)8tlWqq!4$P zT|9(@D$j}tV7xRYR0kTwv#%on5aPruh#KNqajt-VL&GDkJOnVznmQ&gw}EBy%4nhkSl6pB<;5s4Iv@3l34EuIY20KFsd zbmI}0PIq7RlINPYF zWmL-Z*(h*-rfr9uZ?-nd(p*OrTK#O%WQFx_H;n`KuIeP=#;rm%`et?#02!P<41yb0 z;2RDDH`kJt^#>SX0XeK{F+H2=I@4~9eTE>0{22P{vOnhq3v}D5xRrjM4iw6Of5*ao zWm4UGMBn&);m42hGZXBa(@m1X{F-Zu&o+s23=QIGU&^r%Lp#4%dqw*OZ4KeW=hwhk zz<``0fV-h7VFm26jWi-6AqV1$FZB!Gi={PJuDS~L;YW^Z2 zpgRQHQg6^s$uytOS&h**QMb8YKGESVyBjR}lwktigar!AJXBGa1z{tRu6Q>rc(khF zpMqJd0v*BXQ)@IJI%?}pimiTG2x(;hATOWM=B6Ja8Be{HNYC>?g$Wx~6mmIoQveSE zl|ozLqFX}Av|{XeVWzo|Mlo6WDmjN6Gqvw|JZqm0_f_-ntFHRzv5C-%v`cqq0)I1j z*c33rmm;hMO!k4c;_IU2^5p=6B#-kXr4vwgO}E;bjc@)LrC*$(X;mF(h#CTj$Anah zC#owIKuPo8pi=pT%r$a&#p7gkq+{z+;dl1>S1K=bmKr;U8 z)kc9@I`s{FQX5s=y};?eA<)l;(T?I|x5YlXYNMsQx*&)dP9$bIvm05+u@=}5gRERmyUg&?+81$jP%GP{CGrVh3qtv<56=X_3U1lR6>UfkGt zXfwbge`rNTCHrnZx!RKNx%g?uva;Hs-+fz=M!Muj zMEGRSfvF=I%JSAN8%S%W{5fV74ko{~6xx~oEs(Ooo#LAlc`%S;jRa!2lKNyMDh`eA zXAPDr63QR@v#+h~cIo`uU*&;Hb=oVKbS9f$%9zV5j`CR)oY@_>QdedJ#`qVOkKA~Z zdHcBKzy9SiDd|8(e%<&L8@T-%E@bd%JGu6GIEGf=Q&x*|Oan5Ze0{x9?xR`5FoY!h zN=sQ3TP^swkonznGahP*rsAhw3W3*jD1FXVS^2Lq#U1=-ZA=Fj3A_EnYk&siLx&~1 zzjxrbxP-Z$$@d=we|8V407v6Md^)HLb8e*^)oLDx88po_97S;G$*l}_H^ghDkjUdd z@{UG5v{hep5#c%gR?4w_@>5n|%sY-sBo&a7pe#tnQCx&EYG^s_?DY{?Rr2BXIg|95 zk`2F+c0xc`8wJG#AFtiVq~yKtepcKK-L=ITe;~Mo+n1L3Hz^N!zFhj3LjGS9-m4r= zI5lAAR`)Ig)XJo^i`Ls>>=Ya%Y2hdZL9g_S4>?p4^_T(Fe${6%icuMNUSSlt^yLx* z4uN*(<4Z%<=b<T2)v-M==H`SX%wg@{wI~(06Z!=lcW}qC-5^I-^5s5>^jfI82V+XZ3NSk_(aMIK6a(iOx8=7 zwBzuAOHP10Yg&-wWAnLz?FW@FBhnBh(|+5G{YA>Xe89ozwWKY;TWv`HFv2-|$50hT zn=0);EXaro@pYrTo+$XQBzl>mhn6<}4w%}(VKj(r7Hw~~dpU zI4{BNd=%@r4y%vh$r{ATcG^>uc&ph-)lHe&e*+4qKcHb;UYtPX+{SRczL>tdI3K+K zezs5nNKYRt;4!Gzu6=Y+$n68-;oqjNoDpGRlCHIbo3t8I#-+C>};TD>CJUZ$n0xbMQ)AjKWbFE2ZIQdkT0Qbk0`%yoz zws<&&&7S^YPq*kXPb&9g+Mk#!U&-3~W^+XLu;y^z&>C@5!x()F?FJ^x$*Y5mrGEDp zP$(B#$y7?`!0@6-{`Dk4%5|&{*p3GsSeZsvh;0u})PN-wN|d}?1l8}^m(1gOo2Kc5 z4deRP!I!kiuZf#im26hGX}b$oa}ctD#A$eNc9$DsgkH{nS$4@W%acgwmFdfqwDV|rAEhAj-8v#PK7>|d%Tk%c~u=wVxn+V%-qK0qtKm1cM0RJ6GwBQ6= z5j{9BF|+g(qWZkPufQ*Gid1VgKfoi}(sL)JQr)X4E^<^5PYXB+P&F)hei|!i;1dtd zwb9Hc`yvf`vpVra(oW;Xsd~A4Dp=*Xt#P z^}3xPS5NsK%+`x<_K#&EMK{y%d>nhS=<+F@oGh@;8?uPy7G)ALv5;d4H5)5d`~st| zh1xt_3-FT4q*tQ#`y`|IU3M5`hI>iUwZC$K-JLvW_wmi7&#Fc3V^S-+rCsmQ>-!?d|GG672F8Or`_ z;PJu0Z6P29tT>a{KIXU#PL#}()=D;ivbYdt$^LmVPwQeLbC7KI;Mw=}rE*CK<!UH{~vlo>lH5@$42CU2Ac)7Mt*@ zL~_aeNve~R>9a|>#iZwBQyfKiM?{9{sGp%purDU%I5tw?P)p5yFYeOEU<2}gsLkE3$*gd+mFXs7|@%k)LVUoer)?OZCUF`sL$gr zDt>M$+=-*8N6R{YUn+2!Ws z`Fu|}n@uR&OVOf_&9Qj`lYR8~ePCDLAg4Dt_J!r8K`|``;`d>-Ir+_X^Nx+S{P*L} zjypvgNCx-$Q-DA$fD#ZCrtlq>=~`u|tv(l>E_cD|x-O5?i?``P}ne;t*zMb@}IV(~0Ec z!z?cJZs8Xy?K1tw*;rU_uI6%vNBFT|)Cw;OuDRt)g7_P1bA!@;_B7{l#Y)m${?@nI?^P1o zn60L<4@arrG*ufxY0X)g+^md%vWxc}JAvVx6#9x=rz`@medcoXvck+d6}fM}WDk?J z{w8>9yg+~&Ll0X7r!MudCBK>pRU$UkOZ+^zbUSEH!ru|FfBe#HnHRZfpjD! z*kj>h?03_BIlShvrLM}@7ZSro(wR5yLi$9F+8k>0##WMq77KrNYuIhH@#yAoN%FlT zSS;RIj*2@wEsfvslVp4OrPls>=rdxM^j?4Y0W)r~BqF5ph}YfxJN}GI&v?@y5~C2? z?Wk=ygKc``16=U*X=rHDaeMPjzKx6jNitU93Ig2&x^Ifd=q!=U#JP%Hp9dv<=w?m2 zJ)Y#Bl`@Q{(*-sNWm>ZvE6_rE1@gd<2mQi6d)^1V^D7Pw0c zz%wQ79kr0CW^$43mnjt=A|FOcXXbrLKfOoN+Ima&dWUn}0kJ%t%awGHp6MdakBC)D zflo!-QJqCpsC&9dU4C}zN27oy%t(pfxNfUNabL2`*l3la{jg|4BTAxJ)x#1|`TGx6 z-<4#-f4mp(579tPRNJ6~y|yfxry}RS8XX^fAf8LSfB*2h^z+_f-j5@GrN_qadvlMH z_A948e#sF=hl}oy&EH*Se~g-k-Td4&G`yu3ynJ=Ijq}k6#pXmHy=Z*tRP)XfId=Gs z!MY{OrBw{q*zNa+WftO19epw6lpZ} zQFZ+2o@#zW(+opqTr86{Txp2zT|GNVuTSD9FIfbkzC_|4dZJwRP;ZO>!4CfVG3gUAu+O%I=WDL&vJv;*$k|iw0~)K@^*xel_46yFqkqEq^jnL$Os(Yl^59{Iw2s#g~sU9xG1Lvw6Rh zk$aN`$@wrEmwd@)jq0ws-+4z2JcYomt?ga>@JDk7Tze(!n<~|Kqtza_)kzT-EBye! z*I%+$`#zG0ATqG9kW^vG2eKslc{a()l<#F-id;Q#SicnksIJ*9O}|kYV)u2I@>E?M z#Gly!*50Bu5B5CAI(cxYN=e}Vm33clkeGe^4t#~oh z0b`gdjHLIT&2PT5TY#qw)}G&BD8))P9C+Vx4QGyT^5E9cpuGG2tT`R1VSyNc&Pka5 z88Y%*-k=UnEa)4w5rXL0;HNIH5yWmHq6N8oQ|K>!G36HC%CuPkDyNVt&hjlByp;MWo74dijo(BhFM;f;1)l!iwYBZR1k_Yxk$1^}S7_~| zu48o(@>^AxMx{ZF_WSa4i7D7I3{`?SCxS-DBF~hk7~k?-D#*ba{tT^_<03brQ2z ze(t_;6FIJ~#l{v>#-c=tw+03}RYo`ddyP3ovn|sa-(oYurBP0E=EKPrQSmYt8O_Q_ z`0$IDG$28L+<&vWEWME#U!q_3MIB#ut#yf8DfP(SF4ihvxmnxWXOXcfrHj`*W2>9) zPP#kgz%%~F(RY%mQ+s|tqOzA*DIhsZNK&4}C-lzaDB-M{O~4~JP4|H93MP?H!LN|5 zJ$>YotP>yTxqE`b!Pt>kP2p!~VilEQyC2saec+N7tdVP90X&27N}L)1VN)oMN6d(R z_g_mLY*{tzDI&Hv)>k*#$Tmxj|clApc9dh=-CvMTo?Y{p5FC*GElld6l5 z3K?oTo3Vt zGP~3>Ns&T{b=Dr6n+C7HrKTt6*iOMd12^|Ln>)mY!FscxMrS(yqELah{xMd-6(5{j zJ?YLkc%{~zvG6xw;x9l3)gj{JAH`@XXK(0~O-^l*3q4AUF0XV>hz7-S4DEA*kzGX^ zF~G|-U8~Tj57j&EU721@7@(6LNWq~RDCzi0W(zajG_p3 z0C2`n7yyF46Vf{gs&)CYlzgc^979`HzTzSY4Uci0Rw;H*dEl&J`nKEEG!KlYb_7Gm z9dKu+&y~#h>pm7n6qTwp+L727C@2#YW2?BH^+tMv5YIekN0m5?=$VZUkr}cj$VSk# zjmj~f;qbR&(Dg=lRVF;B2}TLsn#oxUWO@o-`#bH{pd zCPRzJl2M;W(DVmE?~ULp*IJglR4lRoSbQ8qG+oj`gKm(|P`apM+#hw;Zms?SFX*r{ z?!Wa!`Iwkq zCftX8CiF1%^4ih7CIS|rNF=r(nkwswYt~^tSf>;(Oz*oo`}F=9k@scj72XlDY(tG@ zA7HM4I^2VgTfe!?^b6Qv+NeoNd;(37o>izE+~p7I7mfsgG7?BB)siWnUn>>K$3upd zB|{vDu|YL)L(2{%7)!xV=0y&Qb=9vSCd1J9Vib;nfKU@ZxhLP6z^9+XZ8pv$k!F<2 z0Ce)qKLJ~S@O>hDm<>hyc`7pFE)aQshBSFhz+g}(H&mAGoE7Rn`q1;+)-8_ z0YST;Hom2PcEBhD0GucO(9(bhl=kXtIU1U@z{=iC5}iMDdc*iq7yOZ3_|Cwp0z0sB zdO}}{QW$-dG@GmJ3Be^38I58QnaL6Q^zC$*`|%#)WGxci*Xg+XtAfc2imzMiUIpJR zS?B()0W>iOd$?UB((=E^Sr4ENPQ%ezvH;v^1zMF7FRhmZ_1-UnzJry5pA;oFB8pj` zT=F9l)sDZtm@=?xgRw|ioq-nfy9j1*fygE)JQDR98JQ8z3N66$Qn|6O_O3QS9pldS z-A&`M*<|$Li*9SduUE2C|9s>BV7-5G1U0j2^75zuV5h$!^m)pV#j(|!1ynr`!Xj&H zhIL(Kzg(B07ZGSsli@H!1j31yc##KK`x4<|136&>%HoIS(;8qqib6cDm-^5C`>kS| z6BOi`Z2G@EJ;ne!+;Yxn(@zMs1o37;O-9n(fn1h%nWwnb`lkcaBb?kOqq= zV^IRX4|xVLi3DTE4Vn2yKWXG0&%5HrI`b}GOl$sH7r z80M|~i6pi>i~VXC^A{Illrr_09=4sBTxVmRT!EJt6VOrob2?>c4h!s?B^z%mU5zhS zby6?r3&Sa?8^w3Z7eD`O^4mwTTA-!cqv#3qJjMa(f2FBnH>~%LwBNEn@HlfJR{bhr zLW#p%Eo$RSju})1QQ6PS%Yi9`vQFuQym#s59kE(3W|o+k=-}B8zx`&=yHw$FKV0sS zyMV-EXyK4YIXxC{d~nV}>$zVjr~?hG^h(g{8NnMwjY<)hmOvJJXB75>MO<24u@DDBLdT-@ zo*UTq=IM>CW>f;A(+$oTDP7>e*9R#er zBXr+P;K)3?3!qxv@tr=mCfDBz;PdJ6rBJm7)OV&*Q~?tKLAe$!vq*K)EpcwiV>j`N zuyJwL_vw=MY*HG$bZQrMgF+|k~P_k(|y&tt^dA=|VjWaUAl&Q4#!rGf4##|sN<3MaA z-Z@lOyz)cj9z}N*H2Lm(i3Cz1zANmuu+x)V2LR1@U{wk$naf-LrqXVbTdnJ~Fpr*$ z{pCAVbj?OMgGRdQ!Pj&~e5Y30xEiC6y(XIpmp65PI78dYn<1;qY{og9;e^WWty8xw zx%2cX$@%Up;udc$#a~t*^?SHp7Xxc~fobmf<;u2)JheWaOy>V~=hncD6X+fpqc zp=@8b)iql-xB2r7Sxcu=#YrXmON_>IK#xNpNi6$eaLMo11}QUvdO74ywHoYckcl2nIc0kD|Ps#oQUaOSiij3u@0enthel1 zoGHXfq-a*|-tSVe@tug>=YFf3lU+4C!BqQxM|5R=pn44?$o+qeeRWt>SsSk)orjWc zkZx%>bcu9#NjFGy=g3BW~t%nLZu8gb=0nwrIICcrqHyb^sd+Sv%anZhdApVEE z6hPrL2BpaZN^BDPLg1=Yy?o}i+s)W&SF2n88*7+q>w+_GX(siS^!WQKR?{gvzs;VkUa@8j7CL#7H8sX#5rKoMQD;?cpH71QnEgcRB zwm3wM`z%f)H9Xd$;u;HHoO!#|sqcqZr+nofUlt31esTYmo?w*sfWvHf;#tlw%L$N< zxInR`6S?Q3eL}+EF!YV)vDL4o{jm%L$+XS=8q%u8Wdzs{lV-7J?^%B-4I=6f-1KZF zk}?5ky;RmG?|Alv;tTFZe=@9c3Q&;=V)tXjy@sPstBSupyMh&ZmI6aA!~eqkwhYO3 zRJp|fqC_}27?$xKT$WC~TDxDyeRkC{KHleeRMC7^`K}bxl8H3QCsXqw-we3KuB<-e zLe6e78ZuzjqaZbDJF!PkKE5xws`fKH+)A}X@fua#pvX7bbGIB)K&({(t70(Ts$~{Z z$y-ic-n<~7xE@Jub*6J+eLbC)6FdH-CQ5B^iQ~n=5Zl&S@%ven7K1D=1DlcUFzypZcW-rsq*=dGseT6AsN$Z6Px`;}T679N@JYOIoe!p3?-ykDo7S(Z-i~FXhvl3~f zxsP*cEfeFoH_zAQp+V=ZnVDbep75~8V`B0wwoYKf z&0^O(FX5JR64e|AzGA~q0d{5U0sQjwlod7%bu*Apa-)C#u$|0daF8C@>qJm8W{L?{ zp0r$7<^D1r90LXJ?}TwsIStIlFDRZH{`kVYoIpn9{61)_xhMAq8;`P%WL9S`d%rx* zcS7}|NV3c<4ZGsXBJ5|{MZ>EMGc#0JTGNYIPtPx_^y4OPul47&X_9@)W0z`06Nd?` zl2c3ZKIoI466JbUjc8-)| zZ99HO>}}({+a37GXRq?%L$kr6K2AjCXrm5^Mx+teWT}K{kL|raDfptjbTI>LvbJ;} zwYjHJ$luOAU;u0GHhj-)Vp%RUScJP&q67CvajaMC`gBr3?c!6Fi&#VH42#=w%OKh7 zkt6Z5%+}}DEV-?&gg}W9Jb6+?7Se}fPnPMwS}S6J;(5onRC|9I$(Yi-#>u?|R6s(r zoWuh6m+78^&0~45eP`p}A6}rk$CqAATl2~5?aUf)Z)<}X20tF+7bNM3bks$Tu7uO+ z@ui%5NbZO&+(80acM#V2E5mr!O+i$W?}PVRuVw_hf{08ecJ_L#l$cI%H`Q53r&2r||52%@x-j}liLyY!kdDm@VjcBXFkxQOd?-=>FP12L}IMe-`IcQ+lSc@md2IV7@o333ra@y^VX8iyZ)^Mg$J<2=+D{l zu7ufmBR-OAS276V%d)+?op8m~jr^*IWFt)1l$2#%& ztB=X_Uo|T?CoZPUzg`O5Tv$$RY&6>M#AB6ev-dX%;p2-)*NvOm5#3)Qww)iN(~lR_ zM3`5=;^d?HrWO{L3YN#)1DP50hUpyK48;Ps{POFB`iwTN{Z!k4I5*XMA`$5!Tb9iH z*qvIpZB~c?waHbPti9^E{DNwODP{0XV(7{%o>BcWmizAA9V0H;Sgoow2psL*cDj|L zm_6t2yu@T_Qr*tY1tACJ+K~4k0$WC*kR6w-LXJQ}lYcY*O(b{Yo2O^EKfAwZLos|w zH?j>t-7#kv4I-RU>JAE`{v7byHo$IgRfUa09A|Ja_6x<&n#0o(j*Gi$`(>d&9quPk z+<-IIh?s5nm_PJJWGtcJ)Umj4t#oR)TQ&aEig#!ezD7v|%;s5DK5}X`89JJz*O=i{ zH6@}`7x+2UIyt6U1Fg?jC$Dd7z4ok!Sl;cq4mMd)f3Cf9$b=ruS%k+Pykzev2cNZ_ zY4v*Dq^m6HbWDS%AzpdLYHg|?oiS>dG?Lnb)UMf;PbPmX;;JrYW5$=O!qPDg?w(=b zm6Xa2?AY@oTy?{cKM(rCqwK1{T_fNGM;Xtr!EzSseul>g#~;C;zaN2d+0glwOF3GjAZ+S>uXFRytWakmbfq={!-hV| zky%>F#vr@OT~Q_LJCUDpSas_ev2Pl6;cMQxyHr5hE8dT^jWb5OcukFI!VkVm;MR|k zH6V}$I%Ue>i)Vwbw&z&|4P|ERlINREqM9?v3c}5@>SCX{6jGB8DbM($r$x_=TZeHH zW_K9cyMvc(+(}Zwlm#l+BxFj~@2x+5bZ{Y@7c_YmT6+>XcXr2oYC_

$RPzvxSEl!(N#}H_N4%#?P0*Z=)b&z*r*5e9i@p*9eu2w)=B+7O z>zYF>vuC}1l)XhPB8BPn`Ak1=qR6@nV5@h`H-Scs*O}Zpm8dNWElb`KXRknG@723` zZ^9OC^Udb(8o6F4DXJ_OxNF&_tA7;YJ=~G?Ty0Vqy5QtIZHZ0dyWnhlC_MwOS~!)3 zfWfbDBD1t5w>N)TFxqv3Cln=Cu{RU8%V+ckW3zZ*`LZ1Qs;y83SX$-0tHD9PQ69nj z%@TZ?B5j97HyHBan!-N|#z78QfKR+rzJ&->Igo*fv5H84GAR{06{-~)YXN6d%)+)c z=SJ$S4i3Ks*4b*LF_+`f?fSKQ#$a_ZgEF7;(>>RoVx}fH!ax#8qRDJ#;8Z%I!NgH! z6qVYOo8n0B{hH2YG@RipsVW>aV?xycS5R|dbnYG=&gu0QhQ7ezT{-A^*^_bKmsYvZ z92bMq(JJ2F5nC+Xr{=JPO{8>3qc2~*Qu`FgF`aaiReAT8th;XU^jDSdV~0MV-D4Pl z=(Ar2%#{61$s9>tvUnVYEY38yz0ltKmqhb>??wtuoC{_XiRW4WtV0G&G4p4KL1b%# zlXn+`{($Wf4M?>Y>gu55rJYNv$1^rO8dhHZDI*Sb0eFl+xKx3UZjgWQK9gMF{>gbW zoE*Jlv66U^DaAnIE?cfObuScaMZ4 z^0P*a=~*G1flToB)z>h*-wwZfBY=WXAu`;zQolXxKZyUEE0+X4?t3ZsaIu);nsQgU zTL2%TyYqEFYi^p4BYEs@NJD7jTx{ph4do{s@6WdA<%g60%$;rUf= zZF&5gBd97;Bp|)?3;qX>5#N}NwLyAB(aaLrx!=Ns9vEFiW*q%dm>~F@x(tPEEv2E3 z@9^Yt&i?)_%rFpZbW$hj;E@5X;Krtj{8<5h0w53k5Qa|%l-j-Rd_Px6!BprZ2C>pa z`Q6uZ8xBj=+s^)OgoRxz@icHRz3~Qe_{MZ z&AcP7rU~%Z*S>xRYQ_Eh;E{h9!k#Tt&^;b#@W>Z$5r8t>?3LhXf3mgDw~dd5oQLL6 z3<%-Ta*C_XYPV^hj7$vb9E;v4f7C}5J05unO7P{_fBfk@!-sW!y7FYzA0!u3BEQRn zmvNyxY!^0gS6|09A-^eAV!puoql67evGM{m3`4yg8BY@l!(o)Wvx72{2Hzow9wYXy z1BA~&6E^IRb->5c0s(Bf?McO^FA3RthgYh&ZQ`@neiJrFSMx)HzrhYz!oR^f{#fBC z>+K0Lu6u=-AY>2_ckAKE2?Ws^JHLmMTAhDhcTLJaATD6wt2;i77^!J3_}a& zF3mB2`e*OLf5rozcoYDxRM+zcg34Sz$F&k-1+6VaFaSw*WSGa^iJ{DsP`#}#H{B|0 zX8QwJwH&GN-xcZL*o>y%a^_F0KO)zm64*E<5NBl3auNiw)LqAyJgnPek=dJjrl0y>xq`xJe;MieaZT@_u?0TIlm>%?YfH%Txb|SL6g~~KhK3b# zxm#@>@U~H-^yuRVTD<>Xy8ExWx*@rHfj&CUnh+eB$U^uEV>GUL+V)7xxnzL91_3ma)6%}G1d?g@`@w04cQ);3?ot%1Cr|?2Q<2~s) z)$GNKZHGFN_PWHEa0T2)IS)~brF8Z_HN&~DW1UY<%$SQi; zJup&i+f5QpbRD6NTbd7luw6=TRDFH7WBL4p^hDNbwwjdfM2fvnv3<gz=R1O2N#mMs*~Zcxk6&z`T&;+XJePzVj=LBTw%}?3b%*od`r5%eX>%b$nW1&Yc{$J-L?y6cwX?GuZuDIOzh{FW{LJ; zZ^x=Tw5FhdnQ@er6W`3%ru?8q9e=5|!2`j%1WPlanbH#ImQL!YmjLW5k;J2^0 z@3t-b(k)F+H)A=^KE3rflb7y|Z=9UCZZ6lZVmL0k27?DqQy{vC8RqF9)>xvwtfSI@ zvd<5`A3n1hB11$+rbg)Zqd?%DRzTT!)WHh4JnXW3V2x6?PvZ=Q;NN!5U)$mk3n~0YYmMueoP zQWY0yPs)AE?^>$4AS{{5jb|>^kcwt_Zavn0@M<{$Oi49Owr1h7Fk2P>IdObWRWTJw zOHOi1Wlp;ZmRxNRCE=46hx8k5kdw&~PV@EX!7yHs0FZtODMFv#W+#Ogk#=Pah}$J; zeXJ*LRavewGs}rXFZe9i_B*TfTZ;ZcQVynrE+_#UNl||TYV#5cM4{vmTZQ5f%XbnQ zYA$>2spzB)!mQi3Ubvj6SU9A6ZdvG)5cfJ`nilYB%K$-(0HbSl?n1v3-gKUHG4Dwu|bZv1~j%<#=PP8E{N?m4H6 zCAM~VG>LCdo}y;g&MrC^XcV40r^&j`)W};A)tZNHmbRR|>`=M!FHbWQ%_PN2#YnK+ z&D971!HbLmy+CBk>cCMW(q0BB1ZgwmzZ=W`4$BN!qGv&=j<4ADuD7P+B{tk2eb$m7;L9ZKt^1%H+&aLY7f>p2Q7%*w0 zmC)U7Gs$)OZ?qypNx_kiAu^@1YmJPsWxRP!q-T$p=he)uXvpjpq?uAcv{2k&;He{& z*({-Vljc?Vai2GLodq(TfG4Nqulx8=?Y< zwZ_Yb%1fDt3?7{u_5=FY0m<;8+X>nd>_Ywb=~mYsTXN&EIKJY=z8#UZhS#m->kZ|O zcYGcn_4VCLzeo~aZ&km;%@BOWyn9$n_Ifatt$95eI0DOR5}EM3$D$m_Dcj-EX3J)9 zIK4!?tMQ3E-xn(XlF;+VfdD$?lnne}gIQj6EB28;D?R_V@Q+qroIO+@9QoElQFoT2 zY5+EYzW9$BjXXXRNcAYqYdfXJCYlKwI;ZP+!(lBYY3U$dx9jlo1C^w>)V$745Uklk zMse{b&kl326gJ;#dGuQ`Yfgw`d9%UfwrVj(o2X!jg4$1@uG3B#bEwhygu8OkYvgp> zQbfANb+6WDh8>4RUw2yHtK#}}D_@rR%J+O9hOxBt;pwHu492{o^N`?+6lGb7Zb6Qh zA`(4J7r~6W?2fo~ijlgs&Xhw;oP*TJL%NO)8h6%q7L6<=v}8Bi5A*yStX+JaC@5VQ z;1LE6H{OfP5#l|_&jH>Jc`X-%jT`)+&CIu`%6`znOb#o)4~Aa@)SS4r+b2P?X0Z&r z%sBIREN$=;htOYL=o-C_<*{?0z|smsAu0Mn|Mf_gQ`%n1xga7D z+3d^?sciEG0VHb)3$9{L9!r1QudLln9PkrX?5VCkZ9^ zo=WA`-(5fzV*^~WMlDOAjC(ehGQcdHgyE-$ezzD}q4GBd{eJXkgux3c2Ci4SV~3J? z|C83FV%<}}RQBX(JT4G4`9U(4B)USUfr6eMsY<^s!7U>-wX?SuuDZH<^Lw?)woUxY zB>EEOfpG;mau4HG3G7_IcgUxByiZ5EDAw%kGzExVtkpSBnRhCEM^pT*@BH2FNA)sn z!vC{Szvlskv?l5&BF+5a)WJ##LA1bVso4cN5FWjEvP=Vmgd|%;6iFf>F>&3rW`2Ht zd1WPFszS${hoy)ZMbrs%#Suv{R~kv0Q?x*ve2i<-egIce@a0Kr7&2#W9i<{T)KcV{44=^!4wcN$7#+ z_2!_wKm|qho-tcnTb0Pdi=Aoo;o)J!>NmuGHHeRED9LcRq)oO0WB6clAL4%Ca9_DG z3Gi+hy2@?viYQy;pZhL14B_!>A0gxd=r2B3F|>4j3~MZFw*0~&n#^K=rl6pZnwHi- zpey4dR8S!rjI_DX!mX>P_hW6XfFMHN=c8npJl+h#qlkI>tZ|Ic-8T2KN~WsSMHxwC zpZM!sU26n1_BVZDBvJi?g1ak$#D8+t4+hiq7@U7iXXkRvK z%B^#)a=jWXc#`e`Ww}gUR!es;Rdx*f)dS=HOPAsePS^!&9R{k&^SZ`{r};htEay78 zwU;?_tNQro_}6o!-#Abib`1-9GlEoJ{f^q&Z2)7ao#jp2*q+yBGw^2%}u{$}C6M`4?cva@z&4rYvCLSXrBZ!zd+I4UC)53yY_L)+JHfNir zbTm7{C?ON!dqB7v3PbmGA29JqKA}!}ARm^S1~4iaxuHyU0F5GgY}d!|Pbdds(S9ML zy8cOWn#EFKrbIBTXSGtOh3sDM6V&c}9(nkV*9^@$U~=49vv!+Df41&(2=$#IE@@PJ zzWTd`iKfd-Ii28~SupP09D?Q%mTUWmKJ!50Zy#icGt)T;2}hp^v24Mr)On$ij3b{p z;1X&O_jXL>-6iEg#3RxvoIf1-!(K<@!St2(e{a~Qfr+@_=(xi~!zp|r>6-xvdfU1B zo|zhRrAl4i)4e%dK%j~j$-`a@_@RiNUTB#NJn9e>DY9KM2Vbafr^5R;-DFf3W~~9h zfntWhNmHEq{}k(Z#ll7~m63fpL4op%A0u(iai!^w5dHp`-TO zDVL4=cQ%0_IwHg?ImT$5Vjy1x*AZd7Wo^R0l0v-zWeQ4gRbbmOJTP{*^9Zj}_ssv4 z2-#!`Nc~jvth23s8&^fFdd-A1bB`Tb2W(d`O=E5$64ubb9fC$2phP-JRb!z`5?-+jmR__3WRDtGST=wbj8_9OnR-7W>BavX_!Q9c~5ylk}{O* zoo&dopiTRN*}6e8aekU?2`D6NM}LhfeR96%Xk3NL4+ARQQg8eAb8sGx>_5VP@D#g& z!wa&o@8O_}BZU$}14w|^*M+w^ZP2&&**u2WPEh8yUb@9|Y9Gt>!~NYH$i)Px62}mJ zkvsqiBYVB_ipYW&?tUvE?uY_GB-T-LN>BzrC~St@zSJrxNSJTYxYW3*JTnk<<^>%= z-Hd$$Y6R(b2uh@oFk5JCJ6w7Mjeovt9RuAJCIDN=JzzVejFW0ES9F zKNPs7uHWMg$}eKc6TM>B1p5ZWN3*7Mh2P;`ZamFhm)m0jbaxxhNP(~q42mFvOc;zl z7A#b>X8U%!3%a3zn=*y5LHFN||wpenbN0dC3P;XMc!Ywq2^b<_%-APWaDj2HO-J;9w;EnkDjtf26;D?yPar5&I#Ijq z?avUWUf(%}l@di%cw!A^U9v5fSWtBM`dE^J&lYk#TL<7P983@JYZJ|)!pLA)hD~mR zLDe#veT_a#+K!^5E+($>aT7%(4@2nEIw}>3H?VrUySY8fVS*V zM8^gTHBdxf2wuiOW3!6-+jfwKEHxXf$G6?6KV{6iE5V3iPZ9G>f+51PR<4&F{Qo5< zPDGk(;Yq?O1_s7cP3`=sI$asTg{_rKS;zU>TAoS-=@$yplDgiQujH5v(`Sr^5;-8a zckGysE7B#(^&bXJ2-CPShvWg!@ceCZH1CWyIn9y<*s&55E+KoOZ|jwA&Yn;b)u@Yk z-nU#1g|rDnl&Bb6q5*%OPe2lC0TDn^mZ_bNF(^TYEnDGl3f!Lv7UelYe|1r>2Xr84 zXyYrH_;}y}ur(-g@V3rSEAPBCNEQZ1;8p|CDx%&LG!ad89Z0mr87i@?wUh(Lj69x7rL%v@YWcPJgvon1@;Y7Cz<$k-fTl3nYb52w#?sV55)x7a~6n& zMdQH#KL3SAX;_lm3lv%3`53f{rI17f(XVgo*~a33I%+nwKN$ps!32%D4PXvSV(q{4 z0|OTNrkGj_7Xavu?wY@(0wCn$9_QZFIo4GI%v%Zgch5s&Ky3K0FgTdJFuY+_G=2LX z3fR=>?Ft?a^mD(_M3OI@~5H_r%hGAhIsWI7g|# zfg=w&7ynNBH|$e_3_;3`=<$G@6Z&Jwd|(2Kt+18Ay_e@ow+hrsL4k2zDV0Br)0#K+ zRa#g+n~0IF#f{@#s(!bCK;&?@zGQ^?Ob(9mR0$-Nqh1V*2W z_t#p~;{y))1l{sl3G!VlgcP-z)^AYUvw6B+DYZD)*wfwQ$G_8TSmz0`Z_RYo&+66O znIY+VbF4d_K6zE<4Mz@-PSPDi%nt&A{06x?sWl#( zlL7c_`4^v|kaRr_7~{`)6)Vd==M}?|SI3Ul?!@9zwHM?hjHjF)OJgl_uUSX=`oid= zN|WlYt((OAB~c*EBuju2&e&`Zl7L`EU3|+CbuhTQ7wg@J4_p}0)MOSaWa@|9`EpJ9G^o3Bq(Ai*$NK0V^olZP|(O-TuyBi6KsMgakN#b$4UTfdH zbM;7-XzQF+>oDAcHQXONN?rJ@o{@~Sk*Nzuj!p^|H#8)xx0#`UC*k7a3X6>m&C1GZ z9qI3vaa;Xj3c_?XrSatT4xrpjW=8M`!!KO&M+f&V<|?svv_fSGX+51T4E^BjF1B#S z-N3{K+?9nl@@Q=Q=96#?v@*zPXVKTaLUe{`WF4^?*Bz4}hL4lJ>s?dZJ|Rm06FUN3 z0D83Eqgk2j=6_P*cfAx|__3RvM}iYU#wQ}1SUrFBy5z9jq!d;wA@x%=h>$O~*C^$s zc4p2v-3%nSoaG8GL?*T@Ju2?-5aKGoNxhEV+eqJt@O%xE zl2%eY4%-*%d>Wz&n%7oGBYi(`a&LuMM)z4;8~_w%t2JK6Q8Q*E-i zKUO>7%er^rEqA_6V$_82=c#bMW^eEPAQSWH+*azFd5=dAtb7at!2wLsG;sE~ zNxA+_j6f8yf)qATX$xe_JZ9MHS5(&J|bFXdJ$Qf~@#-36ok3B3zm8vA)Rq z27_)O{}QzzuNzale$UKk@8Q1Z1NTevwM}(*g)E@1sNlzo?jVU8*13F2Rpco>p$||b zZLM!}61g<%O)vynCS-}dzDIW*-kE{D-^$uYYt@pRNV!P#oRUUNFmxlsk#7@t%l%!` z07lLxpegVcDA)Y}6)=y`-9ytkCc4Mn=j`n0$aA4;ntP1iDYnkN1NY+R_s|rhy>?`i z-iw|_w}I7}Xy0!hi->Ph8O`2!s}d9`7<{%k3cl4Lsl?mRgs))eGjvEv`mS0;?UQlL zwXM@*^biBV>$sugvd;=o|KWBxG~KcMIg5`#ozF_&wzR)rT(6FRa^tnPi;#}4w+_cp*!nC#E(C~Au$mJ;$@@@6GRMZms zZ9Rm(Inr?D^!y1ABXV+Vtfe9*VTzq!tUF`ns+3_-Dh8(OI`-FCL4@fam8p|50uA3D zK6KlsAcW6`Yk_Pryw;cAtkwiPyfJVm!Wz(Hx&`T#QLW-WpaMPEr1QL(S@dsJy=Jvw z$*WU-5>D;A-?_W}|Agn`wsw9(4cQ;jVdEkopoqzCcZiNFoJ7_~Lr zncZ!oF)(?HjFWXZjvEIxo7KAly@RBoNyJ~tx08#t*u_h=l(C!zODP9incE$`1UV0- zJ>5~w_B)J6Ushvl=M!xw6-N)|>eC0tjTb4HT4O>u&J5yRtpW>e)>|Gu|ri0{)9z&@bk8e+oCG6G%6V z>aej)aC$iF$401NR4B{Ty|@&s9v!kwN~1t@+l*d*aEBP`V*s54zdV^)uayYs$Jr36 zzl8oT`H$!SBoIXD9!3NWm?ixFRMp;tqvN>Mf`kr9AF_jHeJi6%$R=I{3>djh&$JE<5^u{d*qNg(pL@DR|`Qfelswn5g9)8;P z(_(=iWoF^YzlyH>AR%2nd@{KrY|Kkjw9d21hLYl$@)kK-4BzvMo(x*gcQghJ#&emT z?+01@$T`mShs0T2vJ_MfxZ6x&-6eN{qq8raEz7^R;2H^9psUkDuqh~V9kfFP#{}WS zEU8K|o{31e(KZRuz+dzgj=Cgi^ct?;8XVP*OQqh)X9~cOGAImwRftpQ5NKtW;M!gYHdN@V2gZlfca7*#s#m}>({Wi#!*RnNgS*1 zCS1g4gr~EJNKX=%?hSQ%=~@{IiUrgj?jEoq`J*HFmD!?qF7=Tn@1 zl%H=AED{!xDD=-*5E5P?fQx+Zaw2R17g?P(xIX~TfJ<3fd2M`(B|F5EbF$-fXn)(- z{nfRcgP1M>!!cVg-vcdfZ*p=E0mr57>7Fr5J0nxRyYyCyal*y(<=Zq65xPf68M#}v zRfjPhqepK5r2oyQV9(8W^x2h72Qx`E#V^!f&Bbw5GfvG9pKs2!wIyU zt^67fEwepZ7G2bF`-|3YLJC|>U(+4r+x*L#gJ}urF^5m4FD%Vso4J&83YgGZVh|Jg zRGE^BIJa06qtcWziFfquaTG3kLN?Dt4!1Eh3+l}}F?W#ves}xR<^clDS zp9~OSW5=3f37X0vWgtW!fUn$5>>fMd3;KB6<$2gi(F)=aRh88fVL~`6cfmC=@HM!) zXeRIV20wu;fQ5@BSC%$uF;Z0_q%8z>{?>bAC97yxketiR+D|1?gonO&;x}r(!h`AA zO=YHCWK@%hEdSp3`M~;`c$YrLE8_Ay%k6!;-z7$@X9E%Vz!H+DPOb01XB!ragxv~b z9*#7Db_LxCGq|!fCbkqO!=y=r#KGbU8!pGkDwWJJo0n#swdL7Sma9HE28)q}8MT15 z4UJNQ9*@C8b?tEY9p{oZFsZ3M$HV(f=zpuAg%1fBV?bilwQq#MB34^g6 z{FNLw#yE{>9ZyWR+1LsQ#5C@HOpRuj7@WemP$B7ZFh?n`pzX zHHt&Dn=z*_Yx#pMErmiMQ#6j32}Ai(S{RPVgYoWMp*ZsB<;JGbuAmc2M*BkUrPp1* zCzYp$Bq^IS%s8%*Vb}3-7A9u*Mz^G{iCOd%F~O<^E=JIVL11bjX$izGj-=S-RLz$q3rIS;e9IBf6$jNfi*&DL)J9g9vp;PXXcNdz;M0+B zM*$X_Fd&g?irH3_Woq$g79)YfNn`Lj8r~X|_8Mj*u%Zn>NlGZS-r~iico5b|pdx!< z!FHOz(W=zd-sW5%(~9m%xu_&CSS&MRc_WP*)XL?>sQ&($SuAKk6%r~q=A`s=5F0Qt zu4(w!+hyD$u_0RTcV&4m!`ovzzM(0^*2(HQNmtjtEi?n=5gDXvYL~ZaYBL(%ylQNF zzX^^Dg3KwbNG4rhc2~c&x~LbLzxE3 z09^~EN{6ZEqlVrnnG6O@aGAi{1&=rT6t{sxY|7X?ELkcb3ggVZc7w>muU5tf!~UD^ zl~$=FFBzYKhaC6(I0#;|=ZjM-77Inc(`+U%G!yyK)>flbvxEa9c#R|fwWE<7DsR6p ztcd604KaNtZk_!-rflobR@(W6HX~%x&J{Z^4R#nsCW{`d;JRUPh?bW2upvDjR2ktD?Jcm zkUZnz#Y0jQ)_IK38f~rxUtSE0T_re@wix@#;@YyX=x^X#fb!_kz!u)V-e*3XsqNko z>*GBs#ip4UThl~b)#yFSayyEy?rww5b_gx-G>W#L5eM~n2olp%dLsMWSWUjL94pI3 zK+h?IC}2rrE^Vcs)r^Sl z2F(?qKx28pP)rOfU;8PVQBP<^OsVZXdMaq90Re5wpJ%JK ze3^Z>SuHw##3mt5!gYAe^t>}10 zTv?4A3<}YV9N)fDS}z;bNeK*21J8(bmX>a1OJ{R8$r0|alx!#5RvARsViko<5l z*6|cE{;C{SBK9Xv-_x>KDv0R8yF z?NYtfVtW|3$j~5hazkO)$C9d2C8X~$)g$D)CpGS&?3E<07cpuPU6i^}N8pTIR{Tze z)cjRlTL5Rh-Wv3J7>+E{)xO{KeJz(>=ta-0k$pi3>;?T!nr-F$aHhHM`% z;0T*4*-wrsLeeRzgqylkG*_&8#z^qyMD-h9>f#0FXgv4>o(E2JSUOcDF0+`Lu;BJS zsym7@4F55ri#CO!rq*d%9VvtOLR590pZoPu8|D7a$yscw^Q+N<(~VQhF4k!+7+K4f zZU$43aUlY=vc`Mm^OpS9+SJpsb4QBI@1f}GVLo&XTQw412jS=JnaG(-ZL*9bF8BlL z5rzE(BD!AEx?cJ&bbJW*4{Sq98iUJW34tIbN^$+4vV7D`nel-v>uS~S(2o`rLYM1w zlpZ7&nP8Q2+`3sYhVf3{OMvng9{|6Zf|V7dQ*Yys9rt~9mV%iX&DGWQ7)(P$b2ff* z!G|vL#f5D=|3{cbrq`)pdf9v1aDDNG@3Ef_P~uZc68R=yUt;c_xM)0MkL{|i16yz# z$!Ys$2{>Ba;acd4T&qeNE-7OO4==`fwsz4(E+5>nZPS~3&GR=yk%l|0k*?#*g|K`c zX6bRP0~hF*dwwDlr~Z`lS32TXJSIvR2=gm})zKc)w~}P~*yIkxoKIQujn_VR-AA?y z2R2t(J6j&{^%(|&)=i}4g*Bs!#f+>XoYd0|mSoMTdT9FlUddr^^hx%HSM5N_>I@88 zBT&(hbCpn)GmeQ?(>ltP7dtDBs^ja@roX)7;8L%pvokTXL(E`I)=L-;k5KS03HIpq ziHH+U*BZ4SSBW|Z^keM%$(L4G;P3tvK)Am^S_tggjsOr%*BaVGR&coMVg9c#!IA;5J&E%`e*C+H zHW1B^_wnBK#=r!Iaz~F8k^V7rGW;*Xkc5HPpvX?1J^nqS7MK#n-R1Wy3MyczLy~8} z3~M&B{^ej{`vI>(3;$#Srj|#2NFjhJWV;mFPU<3j_7x{medBBtNa! zt=f_OvtolC-sD663i4;Nf=jShOsgR*3NJ!D)jl!3W^6z^p;%nNvyHodIYQW}kL!^C zGx~8Is6L#Gm>lZ0$?bm(mys#In?*je!r!tN-8eM_GEh|AtJvS7{ocWoH{(<5y!%@K z{nsOQn=^P-Jn(+c@B?I&9pk;4cfrQyWjC-aLGueWP~)0kX(mD)rL-g8;u@ zzyA#R7n*>Hcq0SB8g6nz08UYA%lG+3sXV^C0-d~&B`q)SU;9G6dMT=nOSsy%6NVK5YO0tutglo4@de;J zf3d~y=usxLn}5alXPLo2UIO0F#WnNrq0cZVOT-c*%ca?O)#n zd{Li2vj1x$%lln4#jiD3^>gFkUkzj|;h`hFmo<`SQ2HY%~vmkOV0nePD{%3jWJ+UU=a3 zorzCgXgkq9q%N-@Be?%2{TIc4*a7p+H!xLdv|?z)2m&4?F*NTmQeWhSqG>bu$N@XW zXC#vnNSGYc9S&_*d_UW8Y)S$O{8n0;vfXcnJg;n$QvPMM|5}DsjJSZf%}t&cwuOp~y&F(x>z4o1U1;^9eng}0VY`HdMp{o4(D z9_*7?xX{1nWB+8o-=p@l6GAE_6Guws+CwwOOOVwEa*;5rTaQFi`2#6nyz?Fk@z$sW zk@WA=pWIG*y9OtMg#Q-{a=!yG9o>@>7DR~GHp5HcHvmRDS($l!R~h!c9Xm?qq+RMO z0~?EyO*@Cc^0!u{ZO8UsME*3%`dRvx)nLivb{u zYkYP7AGzM&Q;B#Ci7bH6$^$@K%qU_`^=823=NKdZbH2aW;jfDTEC+1zvm!IxAn6ni zlzx(U5$SWVz{~#!+M6EXCi{U%euNGop4l8Pfs-OIuH4o==y9Iyu=?SJH80=N#vizT z{|oDXXFl!AKd7v&MFgm1N9mu>5|RGNJTD7;k!%TiT5R8_+V{yb_viq2-7vURX%P*M+=_OC0Y+*Qctf zBRa)R2LVEP1A-xY5y2m{Pb24t88&3X&a=Z(XSr4&*fwS{!B;z^Wa|&$V2>wpjaver z7ps_ZGM0pZbW6bvD@uRy5TM2oFCHiKrJ?g`fSc9y6Mc7YPrltysV3MS+Qzp0WP$ln z=KZ%wkr3%L1Uib)@{F;#-#TOl6*t|*9Ut3&KflJN&ct24x;B!IX-8<$CN`u{OlYW>L-s0k4VLg=a9>zycF>rZRVTaRiwSc~ z%+el;^?W+_D|M|b0cJV`4@?68Y5KRLrERj)2gYMzB-&XzMDoTUWuk2MK50N!Gp~vJJ{T2t|ypZc;<7pXY3#%JJ(D=y#HH$0ILwC)X5qUS3IFbP?agvU8cH;@D@cN zrkj`>IO0-__f%TIfe@o~h7hRifaLUCi_mg3VN#H=NVe7%BsFgcJ}n$<)r4M7VLvcx`^`EuL&Yy3E3jjpM2pwq=3p-JVYCGfe31$2nq6}IUUrU zl^shHBbBc&>mvUhnf?RNexrzZsvsvXyp?Vz4LU^3np9k4yFiUXKMl1-j59%!ygZ;; zBaRtCrennEjO=ap5#MLh8!{0=gsXOb8{aEwjQ_*iTgFwHZvVs54Vx0_jes;rgY>4m zyCkGrLRv~n8fhh^L%KtxkuK@(?uP%(%$aj$&Tp9W>UmylKM2>p?<>}|dabX2&G`F( zV@zJcTeWlnAPqVx0$@PDXM*7j*0$8WZ#;Ms zsNy&Yw+(9y0G}#Uh)OH`Igp69?F|(fBUiiwd{jH$Nm*9QkZjiUyx<>@8i6cC&ISMErQ8Jw@}Y@p|70x=$b^pAhH?p#<@ zO2nuuBWE|(9_M=;N|n!+zwnp~o|5GoXo6$hz=?a<6LA;&-qbv2e3%)z1Qwwe)?Psy zD%v{=qvWM_E5jPJ#9^s(b1j4Ef<;;c^(b3tev!V>Yo33%%-^>f2OCNl8G^H7{}L$z zi-{phNo^FdyYnqIJ_#cPcf_MQCW3M0UEcRGM0-cryE{T^b@Yu9gpDzjjZvCT`;9GF zc(S@^W1c^=(*NQ#V3cQiAf*P9kKf_QudjODee&>@h?cdg_ zkPEGZc%*&s=2vn&!>AVkv-cedbHnETMU&Z5?soPh$)CkkND-j=w|bs9fE?BwhmasP zbFj|4O0Z!gedOEe8G((mO%won9}$lv5Aw_tP#q_Klo|CN)N zCYX*Q!3xPrB#Rni*4SXLJBaVVBJ1*7Xto#Vs2a5X1*DV~2s#x=jG7&wAJjp?rcQ!c z5xe071OIsU@7D+o$`g(Z7f6s&KopXg@*F`R<5Vc#UYEGCM|r*-I*ANlh8ql~C@|ct zTH>KrEj{tNePU|*zI_;NC8pixfu|3qw-l(CM8?LVva&L-mVD#H^iQd?BhGM|o+VJ#4-V|rmkq)b+Ll4$-E^Hxlanqb_ zZpil|4q;LxKiChiD*!6{csAtl`U5#&a@eK90VjRBKKmzo{-d&>K2XxZrgqem`hNaH zE4@OQCBX3JpJ&8etg@_{09;av=?3Qo6%LQyX`=rI(~IZXRSBh-R3>&c1HxsU;Do{r z4A#pO3{k>kQNl`GJbyrfFg3m|y{mc0G^Ia@vSvg@W15i#D7I%RtPl&{o?2^>?{2qP zu}KZjC`Gnuu$#nnPU*fiOd>NiISPejAY9&!fc+vf#eME>eZIYHX@w37ho7p1nVH1} zu=6(eNE61anGU`D!{mk z1@<4V?Z4Q}>lElKWXO&P$nberbn_Jwb5YNC!!fo51Rc?t?#_3b^H?v|Z(dJWqboIx zgL}Tx9L!V3CNU$wnD|{rhCFo=vv3lWtPNubGB-ez7Qo|H39darRs3`>T>GG`VPKZq zzKVX{-%>SC!Ces)AHO0+>aQcRRi=5mXEN{Xq^way0y~ZG4eqw(%$Vo!JB65vrY+Yz z8*o7rE(_;JhsX1;utrJYLy4tBIN3!x*+FuotyxV691cK>ag*P!VC;JpVdMAf9=u3s zfYQb-R_7hmMC$Ax9DI^$$7cmp6}<0xL6b%U?8HcB+B`ENjhX#i#D8RA8W-XQ9b%M; zf9wHrTX{D+a>s#kuEsofrA^ResVX(G3cdmBu!(wtExupbmk5xnDTbje4%`Hg;fpfB zk6TIbmEp=eS7V#6HcY}wIDk<(3fzVbS`6GCyN3?V_O)Bah}&M%1O<1DtrZg>Kz}HjXnBFtbh4ZWT19UKuUsx4M4NNKa6?7QmjE! zP;{;yJ)az=Rqj3=rWfHjLPX2XnQX#c6*OguSx&?Y@r}S_)brehOhKC#p7Z# z_nsjvG16p9ceHJE4xrMMxLiyn@=yY@r70lw?)gSn@ov|!xMKRM!kaWDj??4UwRJpF zwp#&%U~+VblDIY(6F_U>Jo^t86j&cx-Daj^g06aLz8*t)nnp1L*3Ic{>ju=Ufsys6 zI)Qm$@JezdngW;#@JDeYZJu-afo`wLoc>LxKu#@9S_znf-SM3?K1D79reW%3>an8p zprOy)Q%SvjveL`xcTP_1h3n6CK@QrmWcbrTctR4J9tdQK)x_qv2(%TKDzIc0WFI$Z zfJfgG8-Oyrjplp+I(V-K8yy{U*t;LN*nEls0Hybwf2%5Md)lm-tI+guZ?79H;t~hc z%xON@h|t;ZommlSz-DtF{zO6=NH}sJ=vezAv{39GH?Wj_!G8S@XB+$?K<&;#a|pw% z)eFkqB(tWp*%xacO(dEeMU?!E8SM(f8rs0z$>+-O?-ER@ak$AnXh~|qg;d1)*zqes}F09Syi8Ih81B=j>uM+c-d2m4aD!s1<_8iP% z3lwwwalaL;V3Da1Oa1$(R9_Ph;whRoLv$!xmIjf3p~ss9W$6>pg??+jqv@G2gHwJ7 zKH8z6SfcHzaEwB!uv8|e$ib(J%g}H$&(o+Yp&q@fVlP|^g6vX>oPTW8a3k|R0^E;B zFblg69CQOBn2+bM*2-StQ4b{*H)=oR2qcbxb>0&r_TCvg5rTciH) zzfPAfIAawP#P3H*2l6V?<@HZ2Y|v| zQOXZl$CvA|7wi+yjFX*D-JSR<)KZN+gPymEd`R#j#=$pA)vGd1@968z>{hP}|- zJm83_Fal+4es_7$kUpxt9fT8*Gmiu#-qaq7a6(I5qdNrS3c zeFCW0ikDsKxY08eUcc8BI4;p&S3mrh{|195#t4FO6?XCG_QOp+!dOVa~ zViW_uWjqR-D+Ggm^$E>RATe9%j~siT5QhfjaE{6UWhIlDBQNa=5i_V^{RF*`o`Egg zyrJ6#gcU${&0)BE=4sJ4-gceYkuFVlt%LCV5aZ0C?wncXM$sCn=8s+gcXTRv&5=>h zNPI2tH8aiqXf1Y4@09F!UF~-rFSYv04egz4d8%{=X9^Dcy(pS)GI$EvoK22itU>kf zp|n)BkeeeEI0-i2?V06?3XCwa466r|X{<9N#h2sj>7K)xJ>^B4X5bGT=yL8_!=~kj z4LRHDn?lV=;+U*RninpuaW`SQ(QlB73-$K6K@%CUwSai|UOH2*;nN?_VoPqoj9TA7+j#T>0t zI-V7^yG4!gkJN@+?RfBXB|b zUXOd=n?k+P@f+xVuMvcMOQ8GT)H+dau5V;_K{EvRaQb?9SWNQ{%ezOnkZ?B`eh$hm z*QVHYntuMPr{>&%JDrxA`t|8~Z09dNH+tWN58QWtim!S(LgsyPsN!A^)oIA+@#%Uo z);;c?lE zy+7ZXWUY-#NO-29p#kRR#`p5_Iyg8Ow9*OOufTvTVS1P`ytpn@pKDA8KnA(VMFIzW zW&a#nXc+{GZE*Im4FO^u`1e-fpg_@pka#@^>d#Z(7lpB_F@2|PHpyK3?obv*ar(UsTbA(OIm5nIs$}fIn}Xb zA1%6b7+DX(+kWqvpve zq2tf)Kc42l>2A~L%#yW}AY+kVQp72nm z^fBu9iRDcGgV`WQ>A3c#;c!YW({VJvI?i|jp0Wwj*6oVc-tV69bxsyY$t#XR5=%xz z({<>kdK@hZ5qi1cLhMxi_QfK)!<-~13A$v2|+F+c1KuPx|x%9Ri2>=B=a6EfcMDKJFGee{;M z?yS<32EV`#F5=_ATx0bZq_^GPbTFrfW4-^gkK;WfK$aUytmoDMCuRGOYz_JJV5c6i zun@eFGQC~3ZpmiPc#b?Z}{5i zLy8)|ttI6)Gppw}M{yKnw0~PQ+@wEjH(AoX*;zz79=I8r-K)sObRhULKOmeZzHVoH zt6rmQOQAzFFEsmVoa?Z!^B1lTwheYiX0bJ_T+!#yw7}Vg$|;yPB$B71SxljXq06M1 z`J31&2e(hx^U=P~)HbH2M*<0w>L1(XKYk6U$9u(*|G^Z`B`9)HVhc;3MqYj?bTjl| z=%jB$Lp-w*)j2o9TdJrk5-e!FnzWnC z!?nEgxzIOKdA`Po2ggZMCN;I4rKH3^g0SOkAMx$s+&<6YZXMfnt~*V;XZsFuPW}KU zmkT{Qzg2*JlU}Z}nn!=@zz>%ro%Tvko>p3pXU&&aWLb#_O+*uDYOPnM2A*=flnC); zFGJypu4$oh+WkTUX5t-EjnZ?~qxlg49qjm4RJ-jQhCTugEhs8i= z@C_W0b9}byH;2w{f8Jtoap!@Psdf`Ua%%mS573uuUo6A@*)i6o2$=2RB5r&>LP0N> z{$2Ga2*vK>-zHKMe$e_2o!#zURkxD7)-^9d6!Rpx*}>%f(p259c!sI1sY)q_&`uF? z!sc68qxg&TrXmf9MOKA>?5`x(ka_6+kFoXcFFo({pHao#=AA(rHR^!oU?KRVbPKe; zh*WPtQO}!R&exs6CSl+QP9X)ZfQ^G95V2@!d=my)>At=0gTXBH2uM=$L>|jN``V=Q zo@W)RDuN^ssHM%4AB_`Jq7PLO`a+|ihpe?0dOa{j8{%BR5c<`E^)x5nxu-sXCa#+{ zXNJ^UefKIqV#Rl<)J8w_dXla`2&DrfRZelV_1$jz%~X#yKfIGAtv7e-5%05jQi@Eu z7*2yj5zLAjbNXG%0IDB$sukR|<>pOO120j8x6+d|rHP(Cy?e%G36nHIPkw&Cxf&Dn z&C>r$6=xsU4;tWBD@&)$BnOungm*E*xmylts*<%|CYP}V4A+Y+o{d)Xdl7RYRlfL% zAfj>N9!vPb4Nc6+auZ$Y+Y8^S7Lrysk$k5R#Ed48P;7($q@|Q4-l0{!C8Im#x#CA@ z+C6m4CDai^B#yBfhht@A2#`4NYyv{kGBZOoG&Ce3CDl`bL`O##ORtGlTwI*1-|UsW zzG1oEO?7_gR$XjI^XXX0F`u6rf&o;PkWj@{b+*pCFV*wu&+dlK%;G( z37z}V+(u;?zT`KRj}K1&HMmNC!`DMPUpu)Fcs^@y-J;~d*-&}JVeqTo4!(6-Ufery zW&jzm7u~dO{t`5uMUoKeDssPHtVDj}G(oLBthFF%#}peC#7NgG>6|~2;7&UWb&y0$Q!h{v|(jlgM-|=zvoexdtKk)sEQ$@%?FL!HY_%b z*H6kVO%eY^)p!f|Uy2%}O*JNEDnQpxhYIEhIJe(!RgY zwh|E{(kvQvjx86;ta-xYq}=Zj?4nQKWEbe*9H5n(aRX`kqJuMoJaPWBgFrDEGPPB; z=i@SG9r{9c$AHooT68vyJlAj@+1UXM{)`*eiQlgakAKDDsnO>(M!-)ji{X!D%De)$O#_Z(6Gv}s; z-xnABBpe-?i;>^YG^sv6#`k}MIZ?fE)-=iVnKc0;D(3;38ub4`X zQ&#Os21}j@N3sFA0On6Ues?Ks!e$s69_}1@-JD-2e({ZLTVKblIfmClb(lcKZ}U`X z&zTibv=7{R5ki2vy8ha*k5$8A1MNa4H-NM~e!4H)J4_;#2JuuQ7>XgEdfS5t`;;PU z0Rli_)0()@Yhh<&I(7*wsfiBIpLAKJdW2oYU$I+99_i4#uU1kEcclYY_b!1NnmA08 z$RfLx^ZfN?Ho`^MTX5?*`Ulz;Nj_0nvh6mk*GE77I3)9WW!@051@TL2k1oHSsd0ds zV6ytoeR;3%4C27{Vdd8-J4AhjaRza^9HpT6jx+xB`7U#%@NAu=2DwLaS9-|N^qQkl zfAQ^>VPVLdZ=-$$qW2@v0UEqbj91R5&->x_ei-F_m)XiOBlb7O6$$c^7Mgm?+?%WX z$-C5b8PL|j^D}uTvpq*y4TV5Q~M;mkcN_*23%naUVm!yokTmknymw3?VL6O2% zFi&?{>*eEKFAZmIuzp8yl9qPIP1V(=!*okf6Im7uK5%|2)I|W48K_}EqT6dj67_~v zYbXEJIOdNS)9*GuwHx3V51C^%igsSqr0w0YlebyQUcrpfDCn}uu zHe}Qkrm4aVdM}cY6w(cBHU-fYs4$1Im?0y(kke6`Ei ziutJDUvS!8dV@_gOKIZaNJ_SsSrbgF6KxS&!NL78(a<~}&~HMMkn`mz7Os4V->Dty zay20p{sfvEZVE>XU?K5da>zRn6(Awa0*23Y0o%5!acU271&U*tQTD^G^v*uC((-PkxJjBnp3 zW%aHsgpTPp_Df-vz4_5PrSd;4*?X2<0Uch$4l^>Wo%am#c~PP+3rbLzZ!|H@D_Ut1_-`7>3aU%9772h;snXQB1=nA_`_UnJC@JxH%t?_g}6ce(Po zHb0wpQ`GPR$~BJRWis{_FOJN)%rbK?(~-p~wt=)IS4p}zVUf{2OGmU!F?jLcEbpG{ z*a*PP*G!xHzsk=zRZUrctyHoW37xe0JS}f{4hE*4r-fsY%y&Gr36}FiPHHZiQFhX7 zM%h!Jp{7+X+!16Lw@O8L?$YN9_IN%a8g9LlFuU3uPgAGO{8Mgmr(p)36!9&xmc6yu z$UKuA-g=`ZBykn$=j;2;a)NQ^7a4Ss>A=blW@hHdxDhEixeuwinahidQFkn#8z<^C z>ZXWU0Ur(5y&w77j#>Djbat9k5ewq&IU@hHA*PmhDK2YWf??ma&4uiZZbGFY{ahic z4w{bdWrm4FDqQpOvI`yf-85A~!y500{(Gc-PA@bU`Qpw|+bYAB=*EqC5mhJgB5dvk zAixa+!uYRLJL;te61{*iKL5=NtqTSMc%$`*I+N$+7rtJpV(Nr0hyIrP{nYVVy~3xW zL9a-k|BAG5m`zR$kns_qT_ERLw!Relp~dtq`(() z&N6L?+oVXFNAFymNqu>=C+Om*Svrd{2}?aPPpo2-8wq(k(FJ9-PZL=l3VZZQ^9EGx ztB#IAaZgkHb>{;~J=L@3={Q4J03)f+vLiI(L zZmMjVO=YK=={&PTmp`j?juelq;rfBBW8b0w$Fuj~K&DxuQ%13*YnKW0U`xFl zj%}lEYg^?Fk8RUv>!b`SgIm))7e3+Rc%{Z>0!Jsk>Y%r2o3QI1VCii1i5^Ro@M-I; z%$KTYYrjqOwb7=H=6A)^=HyqYYpGKbQuKz)ZNg0EUpf!JR%LmR?cpV=GriG|>N}S3 zu9umoo05E*${niY%G~$MOwTv+q=<<3ox&FytlvAVaB*=hYz$>?%J)uO1KT(``;v?U z3n{}mG&q2vkmd5CQ!HdT1B%X4JHhnmQ0nP~cstD6(mRt0Qy1G)BUH#J`$f{9bw3~$ z_LrRD!J_y5zMxs>&?3c_1XxE*|6KQ(Ki3I(G>-!&@;>@zE)cbQwJg$ z5VK#TaX^J4{;7rU0|f5x9^TOCXfgNwAl#;zIb)K_g@P-dFV7W-)fcmKs}0h zz{t^5@U=edZwW!dKuTfFT?5ZwNe;^MA-A^r<@7_J7KPCV59ujY*Bu@dFv)=HNZ`MJ z4aon0wVbZ|YM1>(#W9z=&iO=bEG$&C`1gw>pfE55Rz!g@t~;8 zR*sLWS$h}9t7(!-YpZ)tooeM(f$=mCp^=}1!FY<)4&tz6SV$C!+g<}dDPq1qku-@; zB+@&6myp7`p0a=Lkve~YvEO-qEmF+`m99es7lTlMSG1n_C!60q`rZT6Ir^B&1V?=9=LE#E~rLXhew8 zw)`Jh62CX@6;uYGTG=XJ?moPiLAM2#%$;BJ>uhJg*(UkoEzqIFr@tsXw|A0KX=2Fy zI($6z*7=9~RD)+kao=X#`2OTzJUOCy?z8j1PMbmcJvi%4AD-<49&DD_>j~|=&3HXb zuFtT;ys)L&zTg$dD?j7H{~!PWKfU(^5G;0B<3Gaw3q1p5rUXD`xH=t+ecrVh$+~58 zSx#C=354gg={qmRS!XjGl4ohr}J;=As z-p==rKm7hzDFBB;ZKV4(=r3Km|I*JntqRyeYqapcZ1NxT`~7PzY5>AXRgxUM-(Q~o z?^*pel}Z3n!ot$-cSPC0A6EsS`jw73vON3C1b#o(hc|-xVB<<=Q%IN$W2&FMN{f&c z8@~Ag9Jbe6H@q^lk621+;P7Ihp{9ZB7G;A5^j6tVI*ve{c3*7YtyAV*%tEv%t2b zo%i7@^&9Q+{Ez`_s;{DS?T7L6(+@7N_br_sYe{=W>K;=?>bp^;(_ zXRAR62|r;PLYcr3GloVuJ-(hFxw?a4J5Tb}5|Z$VYdlG^nU5}Mp5ap&s&_gWaUwWw zm78TTFSN1qDr6o=S`N;>a7&dqbh`H`s=K|^tvlnv+iKCDe+hg!`;FEgz*CRJI?GkR75)t1J@-aD-SMiCh zBT4~dcJ5_ulD0B^W6>9PyL7!RPKmdjP9X-a7#rgV^An}fLUkp_jJllbbbNOs%vm3b3aysN6S` zy3F@SJ_?Ivc4dGnv563O4J=T(=b7x-9i!bhu3^O1T`QUi{NN&kpM&&SH;S;wWn)WxV!KX~ zJht9WQZ_8IsHFL8vUCd+@mXHKkA;JqTAqiffSfT;J^iNmoudEdbIZGI%q)UGr zIDs)SmVMB9->+MYKMix$&ali@to%(aEK-hkxv$esGS*VAobGA#R- z+m|nO*8crxv771F@n%)r-tT$@Ppwtc)%Cpe<@W~0m)EZ9a-MFT&)Jyt7Tv9dlP4W6 zOeej;Rl46qJiVGy(?VJrU$*y+diLR>`r|wX5@`o>DrIW^p zkfgAr@JBYrXG9nbq`8gw@!G_0KK0jNs&?4+lD$rKQBhm`)VVrXn*Sai&uLJVl8>#d zMgh*XXsILeC$?{-@o&KOf2D&z2;@DW)?JH7h68}HUFG{;8P|H6H1Q*3sRFrH^2WNd zUN&*XTqOk^3HR`c?V|M_8nqzk!+ALd5P%f7sn?q#<wZYRG^ zUAZPUv1pqR-RX`8%2X+waVRfawZX zM0=Ou`diItzv#qw?Y3<|(wgyF2%7u#jgH7oC2YW@( zp3@g@Mu6iuIfU?0C2@;f5QJ4y;WA=k{VFrC!||+5-zqk=lFMUjzlgcT#2m z{@33VP1+EkU{-F>o^5W;7Y|=Z8l4P?PF6Cq%^FA}LJAEx-^!MrAcuVAoF#>3cvMYd z3j?-_VWI^}w(;H*t=2rnW^XUwDSZ=t1aQut&B{B@)6uQ6XzO0y2)#+(EiRM?$OZ^B z=l5PYjiqIvOST(W7G^mNKYK)Gy%&suC7bhe+X$|FmFdD$q+6;@7+Z9m6F%HkrK@+UgUb zYLt`S&}mVs{_DzpT8y1>()%*Kaw+nFhCrP2A*L56irS_tQN-JvTLSvi*|=(W*%76b z>l;+dZnOjLg6Di!O;0~G$BUuWE8N>UhCgM{DCks+-->i{w*h2;Vn%R#cfop8xKLM5 z(KY*SqKk_2&m0-`58-NXY&E%p>Yp4Zx|SfgE0twzNh`vjVTwYwkVi-*r3qRV%Rhl3 z+lF0Rrs!p1X2^hwNx;c0;lS9i0ORn!v}_CWnTPH$f3EBCL{dv@Uf}(y6k0D&Vc?#A z{$$TsV@$t#ME-!|Hr;e9R2zdyL4I;*ird~$fLq=GxQ}>(=9R(R#lBSa=2jx16LobX zNmF)pKi5_5ol!r>Zr6VH#9d`G%enRN2356%ie#zHtCBEu>-%~e{hNR~j9LXIohdEL z36BhY#+f3$TldX+n0gxb5%)S*QNfYo-4dTmp~mJ5;ae|v!RDGbDIHEy{UK2RcYD#V zk&9N3zV*>6q62LsyLVtaJKfaDs_I_ofAP08Nng-DzpSU-r*IsML6iv>Bw@9^$=o4% z+YyYuD&-i)I!YhLp>j(_W4 z*KZ(np_qf@x65Nw|OoRecim|p&aQaIe=@W^vZXGXd4h)D7hM)E;eOpI?P3x@#0 zf|F+1(u^FiPH>mK9=V8sb%MV>LHhRBjyn5sRVjgjZqAIAI{T@Ra(U)>NF+7_nf2++&j6G5;88vS?V^Hug{%mq z3B7zCH?Oa$0(DZhfhFARTZOFbHWKn^+a$AFJ+NFcGVkANo-|@u>lGB3?uJ(wSG6a- zvq>*zArmTTSSf@rB>?0e1CQyZAmJiNyPJ8{``kiM~c5&{YKbhw! zXNnT+3OoC&DdRdy^+y}2R3_43npnN=w4>d+YO zQOC2GdKK~IYSZjjqhIkG7eWN{np%dwA^6$vTm`WGd2O~n5COSE zMqg&Lt90uU4L2J{oAOoe?*}Z|>t`|(tHV_X8c8>#u&YpMHVdX1Hz_8f{H{_>e>%x? zGSpQzJJ6n44BM&~ROUfBY_~U_TyB?a7w?kD*55(v4zX)rS6Pqsy>-r?{}y_3vEhu= zBTfS7%cd0nOJ~jo%xlBVN=tu@l1--RoO?H$PbP`4NXAl;en_!-I;OX(a`v2Wrc{ zeD0-+QJr>x_>I=()pb%;#yHAK)x^)_Kr5Bp-50N&Q5srp9Z z?HcdZrn*jHVDbK&rKA-Pz;+}?f-LcIJ;1m9t?ZRh0JvGCa>EFq|EVUqJCv1wTD`gI z8+U4E)pq^jm*jTuu8*Sfe1J!G@iZxayOVuMUDNf`Wq%RT+kpwGWP}FVf|rXQMv5AC zN_0QXTOxnt(sdA>FDlD`s4hB?Pxf#t#&z5EkW1>;lp)=BvYfGHVEBm8|DseN z_mG0CD((O|y{TY~u6Hb*3iE0@ z4Ey{+9&o6Bi0O}f#Dwz6#sMN8#WsK^x#$~G!Wh})WTJ(#gS-Pzsl6p|f4=B^c#Jn? z0sdi%+O0|eZF;q3E6-x~*kbkUOUg{R)f9nHkStZ4!DE*^4{*p!7T+8S1&!6&c0a3Z zHqPeVMWCu?C9Dzy8UB!bBk>4b3ep1J{^$aI0EGEefxS5gxw%|5{O8SzvswB0Kl2QU zO5ln*m@bG21vTjBTMWT%9Yq92Pp9{30p7TOAYt$r^d^D7e|Vwt!XUnb)4rl@#zHYS za#fv5v2*RI*NaKcFF-VrIG{(|wCcT{F*6V+}7hPYCB;}F3;%S}W>(ll1W5%1K zluH-Y#%jnO-G2NC@g;xFGPA2de(d@Y&&tbrI2T@~`72Ty9Is-H@Z$4sU57lD=hd8B zrhrqjVDUecHk0eMa|IF#(lw29@D)mLe@y>KF1*u+1=dJdg^5=Vl2%^Nx)4KURpo0FF=LdY!M~H9mCv+arL}-u%Tcog#l|`$Z zAuiJ{Mf27^07a(>c*_}BzntmbJb+5GZ~p=s&GMlpVpD0%t=iaw8YvzydGAF zBqoRJ&jqusQfZe3S>&Wv6ARq$nY3)s^c?11#6R%95p0}BB+=Nb zjVy}&Eztsm>~n>fMT>~ktlds>>B6XFH=vLE5uPyon4?SCc*}nj9YV|hGQB;)&Ma!k zq>jiMp?fG>s%dP#0EvHe5KPga8;7Ls+>qBYM~O`G?p5oj`;0QNk3|@|N_(Yhkt+$U zo3LbE~*SZ70{KD z$xmP9ZwJ>frqZdOj%$2nDi|KUu+yuVwzB?YiEJaH?yqBP@d3v;F^Lqgmx@`~AsOw% zFmxqxwzhijEDJvlq{i90Sh{{lgzPAO>^fy#ZQ43aGR^;-w8aT%g7p7O8w5z~u(vs@ z@#gD~Rr<@%I=jcXGXfK%P>IDKGjMx&OZ<=^Zq#X;4xin_h`y_@>YfJ?dB%6o78OAB zlMJ`^6AiaacEW|GS2wLJ{ND*Etllv9Y+UF&e12O*bjO|Vb}qhlBkyp|9x#5+Pd?vy zZ}8Pm#b`(N$$`4wvyvYC3dI6&Q zDeYd}b={9sPC1^PglX*Pq4*`;=m(j|LT3HS^bx|_B8@#dJtez#G-RUi*=??M(ajM; z+@GG4R&Sj0YWLGTZ1o9iJB2I`yvYcn7%ynL(WrmWe)Ae3AkM_hYYN21FX| zw-F9*-txucv&neRrwD~4q<(GU{5{tOeJc^n_XAz`jN2LmUhy4or-b|_4NB8B&e8n! z>QA$`$vv;$I^M#^_}2r1^}Pi4BV6bS^lzH7C~Y_}fl}1`Glrq>)jz9@RP||pM479t z5~-SWN{{Am@bv7l^ji_D{W1oM+-1e2e?0QMmQ5N#Q1q0md?z?dYo$w^^>p=!H2Gzl zb!#N;n;NYEChoGS0@VoHeAV-989@j8(_P6+Sx_{N7{Q}zcrK8ikryY?x9^A!$rqKH zzojC+JsChwbfZ(EzHt)!FfwY>t@=_Sy-+SQ)<`YXShGO1j7LRaJa-`?}S z4XCFn64|azwn#!jl#2povVn6=l1)sUG-kLp0(&uwJ?!(aD^{wxDPQK5)T%8%O)j=B z@4Y&u5$ehdtATmsU9~$+-r5}d$uW=;S)v0*G;dQC=v5u0!sc)Q2+Kj0;WMMzsLouk=5Ia+!=K~ z#+>U>BMf5AoR)Ofm94quQBKZd;wZ3jEelVW)!G-riO>55%>{;?Rxy4F3}rv7G@QIz zftViTJ8kVW;7&4ci+|)GpFsDKKfJ0XFlKd{v9?oyDra3qZuO{fO4KJj^b>o{eV&cY z*>=?=LGLIEv$P=KlApZy-2gqdD+^iB3;?@MO6SU2xXL^7vQYCMr31vDtrry>ay-rU z%~Fh3){-IMpKom+u1+%Cyju?aT3xJ4e+wV-HD?JUi2o5q7YbI6aWG3L$Tqa0&h)6sD|Pf%%Dgi|2Gi#4EGwV@xO9)@>0UT*i`xzK9u-~sR4xcw z@XZ>NMNq#p_5iSNwe?*mcne=|-gL3{A3A(_8$Z)n-}B$9kXmvoKg0gjX4F2rPXDnN zmCKOk(6Qf9iI>Qou6Iz1`-`5*eVf{P=Wspwxe|bG!#&uP^%?I=S%_?m^uMC2htui(C6g->wdgWxqs*-URYnObBJBdUK zYzLgasDig&pe@B{?L-{Hrl{1D=>yQ43iDmO!IiFpdtP{S6}w)S=UP6UBG~>31Z4Qy zD(tpFkbZAws2*eGta=$A*=icQcQGmC&b_Wyc}R*jD!HdH4yR2sCqu$f1myB`nFsO? zr@Z3npS{`A1GxoE!&nCP(!0jB-#DOOgr(H-!|`7QBPnxqv$r-Vd#Nein+-D~=1oU>7=w!Jt2PMf=^{CXP|^;!727X1C9j#6|6!*NK%jh6q(!b z^r2F#_;XtrTzVbjrfs_W>CY|G3^*^R?>24002w&X$kSyYog&X~$K5^U&k~XFr z*3#l7d4Iz|c&T})Z3HK4$I=N)MJ$^tTySmn3$p*@36hhfh>!}KQ?t4r!~ERbk&b(_ zuXG`QPQn>!@Hm*6PK5tbFq|>IJ8C(^IXXrEcg9=OpkK&Vz~5w`<|(7%e7+s)w6?K= zT18R>+%K45?MJWj7E>g;P|$MgtVz3s<2JpjSZjn41|T8@x&#EYHX10@Hh3%U?Nu8& z2VimEW(tYcLZ>$6Eu{A$dz}_T#Gh*$i28!!ksub8i@JE40ZT;1AAdFwkz7MICY;%lk%}z~}Hh z!YEtzp!=}>N0DE7B>$w1G;q);V9%g~u>35|aoa9*gCmnq2CtMvfhi7r@}Gty*_M17 z>KMf^=aC5KP(3QT({mKur90|l#ly1-EK+#+HY2J@b&;(kRz&~LO?^8B>P-Yt} zu`W3~1ah6qN!AhZoJ^DraY&Oc*44%-{fG}pK=4BTP+d#_z9%$E&Nn)zJtk1D;ISah zTOmKJ5?0Q!F4X+w?raoT7S`>Zs()a&ow;0A|{cSPQpS|F~q;H?5 z^G)CUr2N_3PvDlLBCzGCBz$8|xazH{Zx zi?t8#F+Wr|GIOK*zNO|G1r@fx9Wn{ckQ2A6f zl`C}0>{R(&A6>riH@#=gLZ>c^YybSHc3E5pr>~aQ(wt)dH+?q(4Q{n9pKG*c!=oMk zD;^a*nLXK7zF=m0_V@U86RzifyK$aOxj5f#eR&-?!Yk}euo(?(H=XGcV0Y~J&sxt~zdwHK`RBJDYgycL_ucL6z0Yf(d+u9pO=U7-Mq&T}K&Gnl z6bu01p|QVGgt*udjlri;*grSzL?Y|z>NYku_V@RnK7H!q;*ykSp%$k55m}zkNfEj$$PR z8ykOu!A74x&0}DQ=I2j!ajAd(`fE~BPkDK{yuAF;(UF^*TVP<|#KgqAckg6nWov3` z*4NkH@9MZ>mB4wSuKW}biN~yhy&!T^G4=oe7_zQ@aa6rRlCgsXo~j^4f}i+gB(x8f z)Qse@5&^1D<@9`Kw$2Gc3YzGYNZ5!dsNBtSf6N~h`aNOHe969T7NY$-<0JtHv>hWB$gTPMBS0ijqj!0esSGXJVX&Ihx9JPy6Z4$ukHHGmvA z06?-nkz=31iJ%v@LmWA_liY-vmSlf`kv3J#{IqpU6t0EQReVGan+hG#eDEOtSy*0J z`~YZKbHQAsvA)X92uUl(2e>)Es*_|oaA?pg4Qim1iZFat&K+eH@$xLITs6odrJB#A zWQ=11Y#k-@lZ7l<7hctP{`1~+#jic zaTFw$<@y|9PBtkv`o2Cj(3lL_O6{*oCOEI9eUh6;Q%4U+=N5k;4m%P6#!qnymfl+j1q~86T+k0&^=YpUg#gy(C7C%Y-X*o>b|L#o{0c08$-h zTS`1yc&N%wBR+K!nyg~&b0fc5lD1vG{S^PSWKbr}cI7y8tCni{$B$aFAUXAN(s_=% zP{7xh#3D+`jxc=G(BvCKG}4YgTpL>KX{t%KrcZxidJ>-QRU9DVXL5h6F^m6TsMOMPnc#xN(=o!heZ(eTG)+6wL9f|l~<$hP1cs+&-fmy z2?5ej<=XLd4;uEJG*kR{>e*@}lip4uAuQ>X^Z*LHxto1_(o9PP_2b{U8+v<=nej#N z7OC8*ujtiRJU3VGNX69(Q+W*Fx{-Y$N;Q2Yde2U$=}_x-It5-Z$yZR-+-MCeWNb)0 z@Ui|9!82UI(_3QgFV%;<^)huDXQUnN>5f|2@lzT^`Bg22V8o*>!JzL&U>E;0_Sg)tF3|zf!Et%jznnd~}=C4sv;(a&SkHzJ+ z0KqnYM^wF{cud(kC>#La2*ui2l37?0ZJ_l*T^CWQ;$JraEjL4DS#0+dvb*!Sn#6Db zEcgf@dCfsyDRrmnV6p+=Yk}D_jF&Wv?WRHs*wsH+L{5?!!1gN<(O}Y>%BWJmt;n9J zipfYYDRE}ZE32s;VcmTcgM$Mw>Y@#^P?;D1JXO(OPHg!xY2+@{u&QoeRH3JkYu!r( zs=Q0{3?ExTzvQFqbfGgU3AK8u0v~y>DSWUzqv53UZAcR~ye|w=!2yta$f2h8>=m8W zP41{+lvYgj4Wc|^K6dm>8ll4lSFbRTw0p zHVd~iIJtnQF9dzdtlVzv(K`S+ZZx#1%UrU3FF)WC%XpBw!vYxIi$p*BGd|71_e=-* z&T4dymZOJviHECP=w^vzak<`?3DAK{rSXzX+LR?h?8YmQzVPg#!n!yK!qb)Z+`ZtC z^^U>Fn#@}-3lN5rcPl4)^qA)!^_J;gu_r;!!tFz4ZS;bJ^h0F@Rd0Z1rs?DQtPzT#?<@QN^JtqQN?XTamd7i;eradUU55u1wQRq z>Sc`SRnJtfw>Qg36V``3lTOd6d4h8wHh5`*!Hbe z#=kTd5mE1~)aXZX&ckjlj(6~Ej6lVzt)65xfCH^%&4bF&CkD< zr3hp+p8${W_bXe;Ji#V(*D69xK2o%w@FV`Q)?SE4nF_{S$0R8!e4@c0H2c|$GXP9m z$bDWEd^-cJyc)y_y>}m=FeGlpWZ)R#$55=lFrCetOTUeR)Hfm3w-2dIPWC6b%V)i{ zFrdrURG?TIa~yP^o(N!+QmG(u9uw4?Hd|iaq9QfM15CBI*(wl^eQ<&mwN=>vshN4W zcoUpp|2{8smPK`Ej2cJcvMqpil!*MPXfpPxGE|^=`R-uFd~ERcm!*xYcX5RJ^_KYb z2d~lkm(pp~t<#;Rzc+6a7%tjJyvCEF=osSYd^Ib`X%QNwprcjRQY&P4YOqGd?k82viji%fG6Z6T6j*au7|)uk zPZ13kwKWmwc7@+;JXN>iYe#kRCRB8}>V(2-s9Fv>ev#WH_B3e!8^m~91D!C0<(zYmij(Q3r{_u zAW}Xd$-z>O;rFLO5XE#==?6w5MnzV=)yZsw7v>UraG?+OnH6fG{;nBJ3d|wIvUBnk z)!#=w=;%cHNRiAF5Z+hnlbKSV+$;>&+lDOlSM}$)1);;grfbvC>B-^95B4Xmbmg-j zK2ji?fbClIOgGMWprz|FyV9ZLEI4*OCD%3BFG`WQ^2ZHS2*Fq5mps=1A?L-&FtO8X zbUW_i1995xwOq@+xMFWfTio=aFQYHu{k`Hh*Jy?>a}NXzLnXB1etQ{ae)DgUe~f~@ zPRDl5PaYVqDgLeV(IQ2U=Wc7b6SCY*<=todyI^z@MfY(*@8g_O_tCzh{0mKPAPC&4 z{=Lk9Y@zh)R4@VH+jp$_TfO5NjnJ(!rE^MKPT^F^em2Xae|@bcx&7?|J&%cOx7z%bWTO z=R`E=S;e1`65si6GPT}~gVwvP!MFdV8+e~xYJ5s0c`KOxtKUEVn^fpIJIMa699SwI;XKmqT+`S^+{Tu&*W0^&9)5q)* zb-sQVuV->T8c$e$^9(ZwTu-gOpdF+*DR^KDFjfs507a zj%EIvyn<)Zn2kHMWEK=A>%+*}B%yC(&s0r&ZQU-M&i8R>C&>`GbOGWE=g@|}{VCGo zwOaQX%f~^^H&fVvl(V#q;(sHnqb(Asj1$Cp25x%s%3@rz&wzY7t5S;-rJJxY<3Mpy z#c&gekw2@|2>SkE_=?4naz_1`>z)?%|L`cY`a;m1b)OjX2*oR_@y)k@FZ`_4Se7{G zhl0Rapz3~Bbe0Xu^?q#xxL;IL^7ok%2c{lw+}?3gq_y!h4d! z8HdN41e~=t?SS{KuNE3bii=u?n-;zGnmjhYH88Yw3+_R-ShuXeopPI)(H8?2Y)!nv zE`PIB`SZ3*)`m(xc@p{&cgiFz`5C8IRu(E;v^+N;IRCIb8N?VW%NONdlgSsYEm)S) z28v_qFoYEH*jt@$FpzzEyF(nycxV_C8lh zm*>j7)7Ij?*F9aDB|zsY8nKLS7z7#4Lno8CXH1rr-ouf*T;F`cl>RoR_=H`pl|p{8 zIM2U2JoOFhBE8ObI$@^}iINp_80inSnS{v5krYIH;5x!8*qdG_$@qDkEq zC2#8~^php;Ia7X%{Lm{jC|Kz znRFbzyL{kNf?~j%vlQ#JjJ$t^=|F6yP0IJAd7Ro`yl9nY|I6YLkGaj+4LE@o<*Fe+vjx`g^MjGg>!DPV!r?(e_U=j;8uA zLwl#|O6R1h=+0bXnqb^q;G^sKXe4oK*}Z{@0%yb`&z*JJV_&RRyl(h=K(oZg=b83H zvqj?Aw*_7oBFaGu{@N+dHJ4(uwfBgR78NVp!){bbl+06xZng7ZQ6RvL5(|3)UkE8= z006U_*!~8rXC5k5udcZg2H#tZ!HW?$ zS*4JPBzgZiSkP5#s6flmWgX}F?Z;L)R@dnbFv1@+`pR&XpDcjX*N^BPUgrlvu+7!O z*U|TX$^Y?;?X&jDdfOBhz#+cfgXjG(+9zN9YMl$C*;6cwnURnNRNeSHY$E;w=?rB* zGh6ALT=z>g`H|*hnYME48+P}Tp~k4H#Ov|2Yp{vMFHjy2yBGC z61l)*xPij$lH&S-uBh$SPa7#~qGl_5g({ZU3xr|U;C+kb0$GD`8*}Hx94=oh&RA$% z=0h#h0K%VOf(0uH+ftNNs|}WWo76UV2xZe6gLIb-N(NiiMZxu+V)ol%uP0T8sC`lK z4Y~1&UOoY|Qk4S(KU;_Na<{4&LoX*l6z_=$fe%;kP^RlKhGV&AbnPEF&f1UmMN>G! zoSeX7iQD+qXF`#mi_!DUC3yrin=YnJ_qK@nm_#^(On@@OBK-akNTIO)$3E?){Jv+C zUuKQ@=3XwPl{;H2=v<7??+G*8RelYxCqGI#5g@xBsX$%DPSwQed7X{`2tm`V*# zhwg+`uGzk`uJ&XaFj)7y;nO^d=$X7Xr;<>&Px{kEQ*orJ5dVL{Cjnm_jP@@K>eO8~ z4;Rx?C>tlY9UTfC&Xb0}jO+-csg2j2CO6xy#l9r+CO{Cx_$gviw0o5y;4L+TB4fVj zE1q(El>X zVN9mmM`{gXiN2eMYe=)EPKZ^T0`M|FFjpz(~j&D=a=s*%Iog676pyvqfb4=DB3g{ z8c+tqxBSyZ4<%_(IByemWZqf3;opURO{Wh<_I905v<^BfldgWUt7B0wlY3h`|LWc@ z6@2ZoX#Pj}U7t$`{_4=A7En6?cvECdpAH2!;G;L6^+uZEu8@qf3~%!(4X@^N3eXj$m7m^WV5(P8)>=MGQ>Y zhtaG)FsIEzaMxg{h0Z4PkzM^~q~&(sA$M5ELluPwSDFqHO&+?RmM|p@j9N4B)v$!n zq1sBeF897_-yk5A^*G`wSNGZLEp+jCM6+IeP(!Xgp3{^FJKu6Lqgk>QF{R6UVZeL& z;h_sDG9sb$L+n5u0L@(<% zR-!wz1}w$T%00&P#c~gunN-YjB7ARmiRDM%VQ^ z4yYo%s_NHZ0`LGATYIiS5IG0SHe zx?XXTnWYXT9}L+G7;6!f!7d2tpqr)`qR;Tm0XHlS%bW7{PU&mP_jIuN;G+kQ4t~AE{Mm{+{vE#I4r#2>;&06pJA%!aXgzSzx zhirt%^m-?ILX^PuAus3k0^1|I1*nkfvFFF6>){fg5M4RXA;A{sC2ZJG{_=OONbBjcr1I9DTB9UQ-kq92?I-w$S-v`w7iHfsZ>`#kvHXj)E1@z(^k?wb`wEhx2k|V`fGjK^Ydm=ME%c2$_)n|?vvN4}lDz<1PM-cqxmXYu;*(b*qlHYXld6YiC zrWAg$7vlOVxMz_MTFH&DTm{c5(WLFq*MEdqpKwLaqqe@_%{LEB7C}l0a!P9|Y_ zEHD{k7ouX;^oz+OtPku`36b%k$i^B;TFcBNsk(u!``t^E$wdk!OvVv9$;DdYTWR`g zxY`p+Q(7ot{YnZG{IfN~_Q1jGs&n`E= zxvTadV;}JK=4IilS+?t>Ni>k~1C@%(U5IM9WWh6f+f7ND{F9mmNADIb19Dd4Ysf=iQp!oOoTKpWey^6SHBeUh_) zuN_)~Nbsy*d7E29!J`>u>IfW!oSUt#4-~KIi?{@EN@HKGT;=->RB`4TiAKFiQ4arm z`u9BTv&JUE@(hgHqKZa4aDBGi@5I-d) zaTH@9G1&tziP5k%ZH6-kf}1bFl-G#cjTvb>pQC|}asvXJibT+cz-ta|` zYp+i8%+ZCHdvOvY2jRQa>ExRo@?d%epZR61qXe$3HmCdDyDEmQqhwc*nh+Q)a%CWP zZLQ+Gy8VCg_W#+|;(y!qEXpr>5=`M60=`f@o^P*frv>oe)bw$4cEi3ue?7)*#$<-` z|A_?f|I*S{$no3wcrs$o5IAXRCe_j^7vI1>GWi&}N*04{jWsAZC2bk4Ge7SHZ2G%GpD~|<09ArlH?<*- zh4pXmwR62qT;F#H@z`(Ov4px2VRJ?pZd~+dK*$;G#$U5Oak9E5jqJcI5<5$o;xvwt{_?EZlX}m~CF}AGZo>Ju;X`*VlQrD=CSX@Z~}U)gXn(+Y(0RIS72dhMYrh^c8i7zD?AYkjiBWGIM~6S*gqZ^7PA|K zK(pkGKPdmJoewwtjl5i1>{Hmzo}Xl5JQXedg#s&tAMSh$dbJA@97B%8?tsufIrYcx zk`P01udYrC>uGwGb-pTpG@v;=sKf?JTmiqt#|ulnG|LsfDw)S+UvG`u2>6%3OfW{X zW^||ALh73?ZrtPzvCVkyIGQh4;zhYMrk%x{O+!IXA@d_uqWDJHU|xkyp`v=P*gcoT z=hAnfRrd@M>Ykq#6UHkwh3vMQJi$Ddbl@p;&;e%0?Y;20B$GdW&Wt6a73t}vsoYr` z9Y^q9u2}~^#yV9gdFCx~QxJ2|lGCs#@DtW0qInJ!QAT7yMuq#pmRq6Z^&bjE!y@M^ zm23-z@Ba!h#CrKe`_IA$o59RNPm--TO}xjF6f#EH^6DAvfX_lHyuZYJ?QaO+ph8kq zJ-9`pjy6LC=vqia$tS~<(v{^rnbt;1q$DU8`>|^3^Tgg(tS>-7`4;bC739>zSDZcF zQc3X-ls_Q!r6UA>QelNDEwzJsETL#AtQ7Lq(;^88%Fha7`p}+5aqKgU%*46=fTfUP ztX?K}e^gOFl(~NJCbW&o02`#-Sejd~WMVk-EpZot7FI3&``U0Mb#x_@6fRaN033N6 zidwvlCA)FoZTr^k%QfNPF)Z0s-a8>^+ao+BV?Z>PcV8h{DFyl6G%DTsH#XRCv7zZ2 z62A%EdCpBCW~FD=aaVr{NA}Qbpo{Mc(43`ABevwPN6Onqoy9xlq7BRaXu|H z_xG+PAP1pJP_7tB)w0QE&r(dk5A_TCW4F?0G-J*E1g&0desf{P%~8twWaUkp~G4QrZ8E6FLZ-LCVu zTQx6&BaLmCzo%5=HMPBtz=o8^59h9Kn(qrwKj|T&O#M$dCK^3E*UZ=7a8tV=BNdppR5ta3P%LGE^=V~Dc;ZdtfDkTz4Y*WSa_04RaJK*&}g_6Slxd<;J|hE z$vZ2oU`#8_;M*pBUT3;Q7F8@)K@Z`NG=UJpdbJ0w?0n%zHlnIwr*;8vr!PSp)dExz z!76;#>qr-cnxx**%lWLS+X7u%MO303_dZd;XEGA?(vkrJXEU&{3;i%teXr!gH)a+M za;34i!(2JewH~7)m^U8Y^^;EC*G-e%q~pnL<+&reIXhk``b%Fmx9uY9%H+^6ux6xJ1$tiG-tx`tMXc^Lqm)9MW^BC_pWK6rJA8zg z!g6gZw6$=~FC=^DY` zpczJdg)(FAb~NvLy#_nX+o*2tn{X$VCO=!Yv&;_7Rm+Cq(MEY-t1PO&d6Kp`i)Txv zRIMqyP`FaJ$dG}(-%qA5W~^LYRrU1_8+Qy*NAE1Sd1)wgXZtChVpl+7jD|B))YfE% z7fm2o4YsL@30K2xbIOaok?1MpUsDpJzepA@pWlm~^ujz0(5>4=#tXkVrY75PZs1^a zewsvS?W@H?csoMk8OT;6TdW+Y|EP^J5Hiy0#{agMoNSVp~MWS!$oZ>40A`}4R)@jfk6 z#*iOM=#E3sK`i=v|HMU(>hvyRY5)V07F+xyn!@vyI#x8lhft`6YhzQ;@4Pm5e62GN zHuY-wj03yH=TdOGS*+SWS}36D2m*o^lDr?W@H>5j}0bluA%_0O#BweNa+0TlQDhZUtPOI&8OTJS!K<<+JEzXnoQCWp;#hB5? z8qb0lE%20U4iS>G#?aDx&A*fyr{NtaY8y#XHXeRNYJ2t$*SdKMGFO@LaL51!J{`+V)`G+gnXk8HZP|IP7m5cAE1Q}@3 zF(rj45SN#h^=XA22SyLP9KH-{vbjri{`w&9AUtYG| zpEN(^rNrr)8k`!IjN5?rdH$3_jm>hdmTiQ14_tR#tO{kYH%Nw%~(jSUfjbOVl_*5J#T>Pm0`{OK>r~O)< zfa6?v{jECj8Zu8Rk-#5C9YQbNGLMEHreoET^{fGXEYO*5sNz1k;R#Mtpy9%9Wn5;N zP%aom@Hb?vedpNA{Y0$u$x6HchHN{b%1tpWMTsqllU=#k?}#)+Jf0T4W93H-QJc)aDPu%K&pR$7MHq!) z`kX0x#=yKyl`roOI*`_jf6B)wq-e*2$A2#V*crW`(WwS<$?fSLfh< z1k)H$)%rR>_y@cIt@8~TIZ-=8#-*?}t&A&TY@FYJ8(qwGoEZE=Z#W;=#{ zUs!MuK1%kJ2KO-$Q1L9*2*i{pU_MIDxm=bYpBpQzw@$kIUX(*vD9NO(>P$l^PIGVj zHvS{VeveKDnm-MeBF?qCkc_~ON_4`U5#=9=+p2nuOmalEvkjX~1UBr1UV3KwVCSJK zy*pJzylK{@JxUN-{hW0JtyiMNEjs?_GmCoP#$@WUN$0247ntuwS3V)yN3Mbf3e0Gk z${+=)pskDj5MON%x`vM$O6;#sDO8b^R&(z}$3Gs2)i+hzVGn!lM{b6*fjBQo383B&W4kzW-uiIP5ULRGX3wK8LH%_N53Tx z0|!vkbr#^YbGD2+(Ln*V?gr?-);H*aIa!6luktS z=3SDq;0V0wC+lb7W5yT!E+sb{mOTA<^X;i=1Zs z&XP&Us|ACMo2AZ%EB#y@WNQ&OSqhCN`>?y2lGC2g(kolUH#Yv1^l-!HwT9VK=vV`h zo-%X4lmmD9grarHwSyPxd8w~)oKTC%K=qe;HcPsws6F9;fYR~Hyt7-O5_xJU*t1{Z8>Wyi=!~yY_cy8jSrLd~^cq^Fp6x#a)l&IZNG(|Q%!Vyw2QD{}Vtb?{nLSnYA}(1h z+K%V!8s{jdDtm6v8M}T*G&CQPfsb4c>nOa}PO{!cx)9kH?@`sAg+B7Kin~%YP>OP- zk^Ri1zu9FV6uCVHEBn&eL36P^Hex(XE~TqHAxpdEY2J}S7TXk(z$K}Ejb4~YzK^iN zW3oYrO7dgomuk;BK{{kPhC104k?So=4%_6#g}-_@zrVc_;8NT7J;Dc3IV&Y4M~hNq zfE5jf0sF#3Ek@Ht6_;GP{lApx2AToW9{32uVE!O>TTaMIlJQC7z!GceY6@=m_AO5E z`^sZ^;oEZIM?c@Xdlvq--&Z}NwbLwDs`rXSt%601_Nd?0#*S3064xZ5xx)prWQ;2w zS2jfYrb&&@1W%>@PtyeE4Fr9cIxJ zZp-=yQ9KgG)UevhsfhhoQVUhApGITgP@Y)c_D8HKP8~CJ=I~sJ4>{%UV|NC|_60&M z~^FNKbREoZ+=!*$B0!TGQA`3e6P*JR-aVJ79 zAB{iSV;MTVA1txz)IMT;^Y%i>41F}cP-fko85KWN!>3T!5UM&&#tdWA-Z>>tgXd=j zhJL0)Y1s#P+*h$HsVy}o-6&S(-4LDkUKK%qfjt*V*mmnP4t->?uV{MOHmq)JFZQ&Ss8 zZU9QNDsKO5@fp5?ny&Gw%e&+t+pR)6K53l1Y;w_d0(NabRAEqtY_>7JSOb0Nn>@%gL@pgI?AQ+YHKYV6!6UZuAj?PyV{ z%izOLIs*R@dij(9;?~!4xHEnbc0cS_#nyQ2D%tt`Lw+U0mo zQ-GA-p1ktF~)b^pvtfs{}ySTXDBu%ij@esv*DGNc*eWy_L9r#?0lj)|LQzOGP}w@~3K#Y9olK!Y-Q-Pn=Bd@Sl6lj+>{L zILb)lC19;q81FpUZnM&-_9sM z_5MwAa()^QM79N89!H$+$*!Ke1~t+LoqGz*_2vczTyDmjW8`i5Ul$Q(`ur5-IEvzw z0Vd1R2mUM&-|wj<^~08f(UUjTs=Vha&P`?KR%4o6ajmvfWd*RROwl;am4B3ovTJ^( zb5l|`Z%M&i(lAwTkT6yIi_B*F;=i zOv!E!<%XcMr#m0ROLIFqA(E9dpT9!cp|ZC=URqbs;+(~VZGlGNL)N^rO;S&fTC)) zfJHqz0rs?Mp0r*5)XP1?_QjXo2W_LdZ9(c6p37%)7|S;YGQZi@xRnpaDvLXsGSH{T zR1o{i>U^`~pFDId5=P$3(L2*2l}U8hwv4S`*Zxdgt-GanmIY;IzuOHH1XNM zlIduLG0&BY6sMC$>+Hk57em1z)Pad-9OYbCcgy9S$_6%d2B~5$Pqt*S2P7(LWOi8N zfWH>VqS^$`3U@DoEE><=<`j8&4u@zgS&WF49x~NqGVAL96}7c@YQ4q^6&#Fw5fiYy zR0ikPkOQ&oohNI_b2SF3Cj$3(SAB-afgMM)myDrWvA3P?7mJ~Iwc${i!8UBG8|A>Z zvgHruWk!`1dgZP|NwuQZl|D;PeX3jb&XC9a^<@8^f=zg@yafV?pzpN*HWKTV=kp$P z5sMBmTQwCpi&w*>jN}Pl7!$P>zSxgN7e6tQ3=A1C3MxI!0<8Qm>c;xPbDbkmj?@>& z+oh2Eg0l;;B|C*CTR${bW6_s+hL;D3%hRJ@@&1BPX@-j=hCq~SG?1_jtwl19BR2P- zR6?Wv3-$l1T#Rp1E?=F)r`fy|P1}#}k==1o`_BZ43&`uif+ z6}GqIGb2l!0K$+1Dy8>5S8=v3EqYCQE)0* zrK31R+sdNPzfQ+mA^ujcvxONB%>J;eb0&DAbBsQW3(;?*yl@>n2nsq4le#=VZ5uoa z(%M3o{E@2ba9wrxp`JlS3vLt?HBFO;0Udati>1Q*JhOG7ZFbRjW!#!ISfQc>kNsYW zt{f``X!@=H7^{D9`q=v!-9FFO>_4&Y;H~&1py|Y^%W{Ao0zB zGjeS5Fcj>!2HE@$Vj|G0QD`1*L~nrxlWuIecg78j>x{W^hU44joznaA^&# zOa~Zr7msQn)gli{6ms9PckJRJd^=}Dp#vj6Seq5AiQeD!p1n@yQF9Pyv{)Jm$hfd3A~3(5a~7zbMqZf~=OJLU{i*vVJ*{ zPwK@|lq_3A9}U9JBRHwp&NP#R?pDE4R6OKgSC@M<&)&6o&fBXykz0xTFRktm&DYP6 z!pH`4(G~jB*qII@rUUU;z<;ePaw;QUUYBuw^rK`rtcTq0q52Q0aSj$zsd*CT@kWCe zYxgEfxs;=<256PXQ2&N{Z+$zUBztFoKK6FRpZl(`oQ3{OwjvxcS^Zh(qp8$=3rEQQ zLugjycnm#hzx%&)W|Oi3cc*yWpm<#6QFi)=t9W2f3{HE|x{8BzSa7W(zb$!ZNb8%T z!t+TG#fy7h?-6a!CnW#N9Pv1{rz4dBu~cdQ>e`MD#;Hielv{$m_RUCuSU{jW{qDH zd&W6^6}I?a29E79ane*J`0>{XX)fYHqy=Uk3MA)Cs)~n?OCCw2R8t3b?M0a%e@;#r zub>EIhc@7js5*D-e>N%AsE|X~dQI6R@+eNx0kWfDRk1H}P<2@SRxX(!H z`+ohGEZ!QOW1QFiovW(lYsT$ERBJbZE4N4)OJhxMa}u#OM)*S6A)dYHgs!QrlkNP> zE~CM@Z57PEg@ZE3iSd~|UCB`FR+!e#{gQ6=2%iSZdoXYHz|493ur}nKM;iZS)cfgG zyfwSYz2)6nf#;K=lOsB*r|~CS5DL4vd375rkGIV!@g2O+4?j zEp~I1g|mt2zu}qMq9?SU%N(}jJuy9#hR!xsNC=`c7JZp+`*ia-l)V8VaJr7~EC8KJ zj8L2XWsK5&GsmuSFP7rx9#8Nd6~tpq%j4{L7cxo>Qu=Y7sfnw054ErjBMmIz?!t8a z9_wiax_Cu!>#^^tKWfUVfK@VeGb=6B;#k^t+g4a9F79Hq0f3M_@%c}xC#ISB%n`R? zzw=HNu5oto+`7;gLo`%_s2xf5k=_t+`ieP`tl$hMq(adGQs&TL;j=@y>?vVz48i>- zhA7T7caTf2SNO#<`fd=9>8Hp4q(;59B5Eq}RV^{MDUxjr92x}oS>DDQ6>EkRx>%>* zm2yfaY8$T}0Z-)_bAldKytJqQhUPk)_pA|K!C~EDg_r~@_Ee*=OSQcjkN+w=iD~#? zL+eSzrlHs1=f+-VaO7q4?&)v66~p-P>|Of~z!BAm4=_%bhx?WoRQ6j8NbYBeR_}w3+6|vz>_(N| z-?sD!CL7ypLZ{SL)_n-%#_MPDdAy_!shC8Z-8N5;z9@YEFC+R32HFdi9G@w}^T@ar z+dm6-r9<9EWQZUh(E&4zsv2mweaft|Y%8OV(hfV}&pY-XqYTwVX&mZ>t<^W1!yTqS zeirh)Iw7jV5EdSnTtv(+QR<#xk|Q?QE}T>7bXHwQRwt6zet$l2?a7i7!UyC9T=QhG zetFyQ{bP2`yJrP$fGhBVwJAj@dNBUTrLY@n`W8Xdga%PdFP&WAnv^~mspflGqAQzk zCCL9OsBDJ0+RGo^*Bfc(2%Oh0ckO@dvVYo#1$lhu+MUskVwJ^jjT#rqPy1&gA7K$$ zmF259f|AA@Dfz?1u{asJm&1Cb390Vq=+_c4{L^Am#l7{|Z%G7o&h94!YFnc2E%bhJ zg>BOn$DZF(WC#ep;3wOBf*Apza#jAv`&$t!#NL$;I<3rD;a|uqLa`szd=$Zp1v)v4 zds1i8bzp98`5#0gw5*&nJgl6Q^SCYaB^Z0Q!%q>pqp9q7rw*E@`ebUzW(Awn1Iozi60l< z%8IKV++JVg!VPumH&^GwTyR<78t1&5+e*DHeJZrF>6@r?YJYfTiJnev zsrr0)OHPisiR_?zNtJ z(~>wD%`^CC?Cu$P9crk(_D=|9I<165QEU(s18 z@l-doS4~BQC5C@FDt~H+@VU@B4X=u>u)(Pd z|JWyh0lrUX8q9o4x<+%pK0hi%XA3WVqWs>)+ zN4tB~b5|yqr9d5Qr>&tbcta#@$Pu^R`^aoQ3OgJiBIy1@?PK-3K3e{Gv}z6>1+fs` zf%bRfYWr5jF`J#=%Po$`*4_HMmjlIDXuxohi!uc8VlXuKbiRnyccNib@R@`N7jsdm zSI{Xzunq>kw#8I%vYJURFL54+kD|}ta@~Q{e92$B^8^EQP|!6c-}-)!2n1Yf**rt&cIsfc6xk*^NCJoU>+*C&6Cr6_JckweFp zE&4r^SNQ{1MqrO6ZSn+JLC?YN4fJqLut3(&95fdRC@)zK6QI-bitB9HLUM%J0~-7UFULP4>~a#68_mN1 zANKw;tjXpH9EHIGND+}PB`Q^rDuPlKq)S(kE&|efhmfc=MT*j^kq#n7s?|lF zhKTeQKuCa;bK^tw_rKojz0UV@&c|$aXJ=+-X7`?*or!BboRNX1aS&?=*k&~I^o{dC zP9Wx%x`1^5-Md(=1BTm3YWQ~P-JS~w8J{`*c9c%$R`ByqL3X)H@$**?u6YZhn&@b& zw=Gyxz(u-2lVS@szwc!H0Zspo?GDr#R}%*7R!iOUMil~Q!Jz>Gl-yY3!v56q5;d`xd_miSVBh<`m_UPVwL;a%0N0TQ$ z8D^ZYx^lhL{vx=AJutD`E&v*(xg*nIrH%D1i_uYJg~YS;HU-MkFZbJ$7wB(>!I=1~ zt3yN$hGLzRCT~rC;?M|aNuyU`Vm?P;ov2{C;Nd5Z=r8HApsfF`_h#7rGuA|?qqOXv zN%B2;-mD+{2XocIbH>i|RK$|eDGRpo2fITI1}f`a&KJG= z3PQFXPfc9kkX$|wU4P-IK#8jA`I>7k+8%5)MX_PuVJ`&6G(Aap|MKI-7WQ+5+BJx3)}Y#%GS8@_4l2G zHx~3B1+<%*kUq?u6P411i@4Ao=m+#oTi|2@xFC$Nt(bO-KkZdcfQug3-o6d(oB$oP zAWxyxPliDs9Nqs*QwH2r5GdHMFo?fKAf7>~hbDQ=6)UB+S#cP5Q)QfN%6!Xs5@mIY z3VPbLh_wMyah}i^JsejTD3^`4DruYdI~hbC<|BGiSvGFoD#q^8?yjOX9(%g74xM6q zN6K);Z8h5EKxe%8+#x|Aa7fK)?-l*tNSp??sJfZ<9KmK^EbN@tdx+q)9wtrb+e0jk zq@jjUa)+MeN4FqD=XLbbqs}z31u^4p51m|2ZKcAF!(wyf8Dv}$SQV9PvT*+?8q`Oe zjg(#GN}ceTP?!DD)$H09psOC{!`-dn)Nw`0ST`<9!AgJp5Z~>4O}=S}1@Bt${s~gW z?$GR+RC{aJ7^9-Br3FA?3yZ9rs8hZ zx&_5Dw7?qsY^8i%K?B=JmD}D)k#%R3O7^aYLwmECTMaMb^53r68r#$90B`yp*qatY zKv5B9u$jOUD5dx}5Q=w6L*6gYMy0Ovlnr8bmiGgd7`6C1$Jg>aXqNAXLp$1rS|1<5 zV7g4lC}7;;@X%!Q8}14Jyb%@gP4o@oDy5mHCu-(QU~`TpqS(0QwjjkD!-1(jCWnL5fr3v1HEArq$K<7kCn zJLB&i)tDwA_fas96HFCMR8Y^hACb?$CEVBM8@bA)^JQI5?;?&;((w9$0rHw_^3CQU z^^F>LrTJ(Pk1-R}j5tpK4>D*J(hh=pFNg82J{80T-|hqQ~^VD_+ZM8=;!Wi_X}>fSb90jz859Mz531fRkTSbojc50{~n8VTA7duGEB=a&~9 z&MImotZ~Y5AWEn>OqlaY)(TjU3q*tN;Y<(@Xv3G`N($_EqK6;DfIVo35 z1z*$`^Z-mGczaF=t+W;1GZp~tRdVV@W=4E46oS65@^UhM>otYVek^wCpHD<3b}7yZ z8B{%(_i$O@>J3Zm1E>5%ZNuFfU~{{0j|Uc{Rq0IcjmPN3yuTq1uqQ7F3YY;U*H} zA}9r0C=>h;B_7~!xOgwIR%+A{nO?E3fprw13#lzI^mD!vp7UUk$?{F@8@k=MW#L=7 z5F|6fod^bWJ83-&4J9rfBB2c_NW%W=p%qbfk%%MUQ>+LWJj{|P4K-4vN9c8l8x%|i zCOH>_N0jZRf`>fG3f<%b)Y}rP9rCVRxjR?+L~^y$rA8B;(%>EZ(K@%UF1}BJ4Xi

&^AY%@fnr-Vaq?iD_YONp@w+o=j5^<}fkg^S;n6)u)vXHtLCV zPcU7~2mS7K74v%ksAA9o<)fLvDb>(jv!{~l7oJnGHRpf{fw##AF`wH~w`GPByh%1|wa&v4K)qJwjFwjRE%rq;<5{WP0`E zd3=+NqOH5?{GR7EQ*X6PFnP`UKklnu)^0zWsmmzIZ}Mr)YMOz7L=p+}dd6>w(`Sf> zAUN?5e^~wvxDO#_;M7a$j zHv-m&k`9`T2CLBdQ&jNZ{@1&DQ{8L4o^b-c6q={UZd2)Uu`_V~kwvNS{N&#DgoQ}s ztgF)i{_N)1cNG(VsIY%?GA3eF4=X8y~5|rS;rZ8rk z*xJi?#rx$tIDUcy)o45MI_na$ga-a%|7tVMKi$$!H#T)Y6f0P(5eOc!lR?Pf)#j*5=PT$YNA1Jffsy}Kj1rN z0%1Rz`WLiPZTc`I_IyjtE|)9e_NB~Q4#}Db|L4T5MQAe`9*80?3umZ=d;nqO!B5aZ8#Wr! z1D1=T_P{Y%C-u_37`wnA^)V<-DQyh!ov^4VZhLsDZ<`JUr}!WKB*rDkza%~$9FR&g zwyN++ImOIU=b6x}r!HL-Ypdjg*TR2x2}P@5JHQulZkgR{&C*cftFB!oC=M&lNFcUA zi3KG|t;%fBVSI(>p8SOseKbb5p5-Mn{e zaZjRdb^*+BFPHYa@K5lyErr-3#3Clf^YZo+e-4G8EjP5%Dtrpg`k`hIxo^1lv({4Y z9<%lvX5-X8T$Vi(WO*iwI=y`UEm~w<>Kfp-Lk8pf;4Q?gDQY4TTI|{@RtF?g($3m1 z3`GxJmZHg785Px`9>Sh145!^uK~@i5!HZ~fXuE1&@uXAu^2zo|;VO8>dnsBh3hYL0b{trrVuG5JyU}t7F8eZUgL)xSR|g0laOn z-{^lg$TvP6U-=oH!`9qa6U?1Pz1+f`H=M}pZ1iq`DbhBR=Mye(Z4Rt@1lu%Wt(NlH-4QlNB5Qa z{{GnJ>zh;P^F~YzWjUB*u$s-w+DyVn z*2n}f&@gA8F8_}k7hO_T{l9Gdy>*G-&2XRWX1>08X^+4H_~yh&7mFgrZt1FJZm&P}lS zpdE{?zwTn`{22pgJR8D#_G36~o)2d&2o~L{psDS$_5XVxnKjw`oV*?c#4lO9A;Lj3 zhS*y0td7hFh0*yEPNq~jrhk6+T3|7au`j*k zkp`QIqu{3_Z>^(&3S}A?ku1q$=-y0QaP9Ipl z{dz-jGuZRXd;kB%P5d&XAm1r3JFp}=u}g~&pD9o|GDt7Dmh5^-r_7Xf|Z)&8vq@+}%l3Z<~L08}9C;e{YLlB4PQ};2Oyf`AHU%PG_iqABLXn=!G z>O_vjy?`2lu9|OifIbw*xR?)G?87DFDe8T`B~91`iQ-;6Ya_{eT2-~BY*<*pjgIq%dOQ72Dv@JBq7NBq5%F!%G5)-1L59Jc zy|_F6ceS;03HRipEbq;!AuT1d(K6Ky8tFmne**bxw-U0Q@JwjSiM~p8myx(1NF5HH z*BU7Mt$l4k7e<#<`@3hf-U)}Bu|7z+h?75qiS~1$t-6JUAFw;0pd-9#{P(c5wSsShtKT-;uUY|N?HYcI#$yEE89kq`L>YOBe>bH) zRfo3A;t4tq(Uy8;x(Av~M*cKQ>d|*;jJZdPC;N3^Sz+bVmQfoq=*Y;h2MZzIn9~TN zduHFtr1*7ggTT*qa-fW*)QuUQlyYS}jHv(dZB}=e+f4h~LNzNvJ_1H-6DCc# z6tUnAes3@^)Pzp-v;1dhICj7|{|nik{5WyfoBLkUcf_xs777_|zoaaj^%+`aqIC0T z2r6an;oDa#vQU8iX6IVOP(&7dAHAY1pExWu8x2R48GF4f_{>P&Zqgf-=;d?3~ z-I_2?`2S${_Z<=WrgM(U@Oref(gm{(VOPg=c&)ZF(~Lmyx#FL3TG;<-eZ_9(faUr%l-wbkb&;%eBHApCHD{}iLL~oNJ9rUz zkGptazAzlS6N}EW!uKY#|1NZYwvUgq<;OuCd*IN<6!-AWOvsSFx<+neOLoyct{Oz% z#lQ^AP(h@;w2*#JILrZNzGJKOu~5RxbL`{J|6HV}vMxvzK3Jnma}&k!ddptz*pqd1 z%QGvWbJP|)eLXtuF;;bM(h@H?ZGT2qI9D*}GC97@nwKoI9)o5Vx4subdu zCARJpk4bLx?JKi$=hbiea=SO!o+mtv3FZMKUzKS1u@IQm2T}h7{QJSsL`%Nnh)>BO zOYhk1bH)jN+~4hgeW%;@yE=EK9`Hs7B%XK@{F)OyKz01&`&DGtF81 zKF1{+{`6ND{{*g*+hwL{$Hp{~4}fF-M9M{JpKW2Lxp@L`Vjjn`G8gU1N%ZhFT9oP? z2d+LyTnh3v6`fdj3OPog?u12ouXco+@hWK)yTiD@X@g^qCYqWQsru|9!O^kb*^P^Y z^~u|~s(7t*H`c!)u}?0(fIgrOLmOW{##!#%XQeB^jW^3NkVycDX8po10W;D>TuU&h z?zvZHF%D%<|HR5sMhx8)C~~!maj0fE3csV1aG3U2V^a2XW;mwk(h}TT&G7Dn+6%`} zIpQ@naKA_2R_&0Y|d9fycJLd`U!d zMwdfa^pTa)BnlI<=Q}me8S)&|3Xv8mbml;STxocMG#5Q}>>^p*WR@BpPMMfWEfxpE76A+wrCka}tR4P^YC6?2; z9?}F1uLy^>{TpLdS3XH)1V9~ z+}zmdZf4i3d)<3WQg`kiOLfb;Ti%A9kzn{7H-bMTP|K1y464eXIX{8+E z>#rOW5Il7GMJvUrAJY8CYU}P4bICVVUwin68!!$l#Zwbd|3lyGN!IP0{rT9!&%bN$ z>a*Rkx5*jkC}jUAW_+q%y=P48*~o-Jr~~9CMe8+r!GTUcXW;J+gz;cYLDGwe6M(Ky=i#7EoO_;QmDy|2iXe8YE6>X^9JBM%2DCkWy z02yjI56QPxd1eY^%Mfuib^w9}Qs%1_!S9?ibA|>r9EF_f)h}gPOE5?}=jk}1k~!cm zt~?xGY7-6k{%0LAVehy;(fgRDdu1iard!3HrWWx%tNu>NE*QX$6(M7KW4vIvN6l75 zp_TP0wWV|$_ZSA`p_g)$+p^!5_FryG39~v%_=o7PHGVDbc}^k7H=emjE77DPXUrA< zzWzyjwySxgOAudi7S9?kV49 z-O2}{=g2lTwOQlS&ATxbKMEb#`?@?~YBuP6AR|HzSMoo()m>`F?RI*-BJwEn z6v`PABKctX2YZiLXBejib z8v&+pGeI`W9I27NG*##voTgKR3xGsSOqIn6rfHX{pr*9X#q7)kk|h23aZ#q5pOmVA zxD0QrTD^q2y5~BQv~MqUeoE_nEp5p26izeXFn1eezcFAyx`EfvgN^nn-ZD6R(8Yh0 z=kv=alb&SNqSTQtCBbM8h2lR+olN`625?@{`b$t>D(s9uptRuhH(Jc?$eio-I$gEe zl~YS#`M!ePA>zx+O1enZ7U9qLpB7^uUVLZK z#xv4#Y{o!$uN3cI{c-xe)a~p0v7|_EO$MquQQjW<}HD;iFT z#uJVQTzi+wFMQIB90|JhpG6U;)KFvNh8mr_A@}@018B{{bYZU1Mf3gaA0hwZkF7%jNlAN^Oz>NrXt!z;)R;mzd!i+NQxeBbmA&t zLFMx6L>%+S`a2HM?f9;<09SAS~@WnNr@)%ZTarZ^>;`^eKt4|M*Zw4{(FU^88qc*}7NNI~VN{5<+W6&hj!?Rp@cYjnOcR1xn?|w&-ML1U zJO(%~`$!kl@r2H=81- z6iGvj1+tHzB^JjzmJVOB6LE0!Q=B4T&pr%Fx%Jn_*|UXX9p5oyl7wYwJ==|k*e@YS zt5sz(?Hi5KZeVCJBYq8T&crEurHAkSf@qV#t>j7q zc|tb4pSVu#;_afL3s#~RV%_rRqM_w@J}|xF?B@wV#VEsql%fjREiqPlbsA#J*0dIu zd&&;aIHdQ;Q1kLqh4tZ(&Vueos(P`ko8xnsS|Q zuWIn(sH(ZvK}!WA@1!9%1Ib9x5*$=k`^1Ni2(Rm57Uts$HSRR%5m$YFE;q&zW1TL= zb;Y;HxDEBwVSQv2h(h)0{cw+$oo48(8E2Fn^MH>AplE}BU~X^zw+}n@Xk84nv$VLs z(xARk`^H+e<=5|<3*0a^B6Uk7`ey5GY+Xm>W}Cf>0GP*l<-#D#&5_avhCBn>kPM6x$xoMC((WJUAn_t&)RYa zIYkAXm@xdTz^tkqM^?3~i-n~go}9C4$DC871mx|AwTkw+4`lC2mdEEMEheqe~z zeD4+wg^R;?_PD-BEWux=QpX=3R}+PN=* zG+pTURox*pA^t&Zn5VBObW3bL*nEZ)$l6i=1H&6w^tmPT5zcv^J4J>PNO>cgbFM{( z@C6*#opBOK5X9<91&fACAm_FhfIKn1Us?oSGVkqXP0c$U0l9hvc zz}p>q^}c{)u&Lz0u1dqad4cZd;~}nk@zvsr+=kmx5x~vbK~W;g;Ctuse>!^SvzymV zu%29oo8V@Hxk1(0?I(XQj1YJ!HeF{{(=Jatd0^SCr77yeD)6@mgFfiCy$s>dsEYpLV1b>sv=kIb>g^D z`ljZGC+D_*uu+>V^MI^Lq`XQ9<2QTN5C9UK&o=5p9iu}bpt%^;Vo=>kjM4(#Eant- zrvNgan16uAn>$Q(RoAE84zVIZ*iHDn-_rbIGiz&dE*e+>@}mV1hVe!w2gc2QT4Xn$ zp*>>w!~H)DKj|HP+-3DG@7NCBGqB^BIrsJAg%1c7=5xn5qzhggHV;UaF zYp(fD*vu$jA34O-e>onws~c~s?X8KFo8u_<2WgTBejul5E`hm#ciuEu#)I5Q=+-3s zO1&k`dbX&Aw8%BO@oPEEwI??qbvtVdB=!z!#ygF3W+mA;=FP|yg_7vX;$1{M=384dL}E?a;CqnVz&P z3ZM27c6PN=H4Ulm#&bl~%8gF`Jm%!8fTrmD*!9{W>y5fa2JKfJTIXfUi_P(Eq8R8# zk^h=IX-ULjyt^va(R5)jngPuT4jzE8;f2=nR|97)a}8si4U%JlohVWvE5nij@ppWue{G z&aYRo%>begX*$@>BFcYX1jD-{giZ)e*riaQ{DWmKPL#hONmKb-ULc#ov~_hwloq_V z_a1E-&0Ltd^SH?c|CliNXUuJE4XCo`Y$KED1Gg-e)4Mo6l-R_)(3I*DmDN`;==reM(3`V=(wwRq!$U z1GabrE1tqm(FmpSx2&zZpKp1Dq`xd?Tf!PsS8SV`C}|k}T6d9Bv}8Jq7qsFOel;%D z#=q{e;;}q5`NiL8R2XgG=ZyF%9#X;g!6+eXzLGroP|t4z%Zq~;IAPwuUEhAH>Gwrf zog~&{y77^7tcLNYf>EqCXW*O#s!Wv(G%B()=whi5jYz*cd^op;$WIpB;x86+4a$t$ zj@4mI_xz3nHYX0%t?xM5iB>fXXKQ(Xr7J<~W%zGI-_fg)5w`X|Wm>EJ-<$&nBXS9w z*K*F15cP>?GE`)Xcr(c4t9{z-VC<{>ZmG`{m&(KG$lakQZBk2G$)l#$Q|jQVySb;= z2Nm2MM1-Pp6VOJ@n7dW6PO#-E&;M-X&-{ct3Y?N}=xSmM2VC7IQD#oAHZDb-r7Xjf zd!;_nOTiNnTsn;(RCJyxJkBF@!jp|j`e>{hf3TJFp>*)>4|Er_nzKWp=yYk3NML;> z{bU-2o_BwDH8P)VgQXT}*Wi*;Vz4$S$#|?fmNzwJi{o?^^?q~TrzDAOm*%!d()!4c z{0WkVS>DjBj4W{}SL5?95+>OnR^52cYaEa}Rqa*uBzAbjJ1Aw6e^RMeS*HNn#2E=? zoCjY*1p*2FlZ7H(H%%Z?sjB#w54pZmyC)jNwe>68omAq?i8w2%0}+&3vd0DTi) zEP?OV<9DA5$NsU7dPKMJ0fsEyUFO^x!!=Wyk;%HDHs;PhTTUUgE@firmvFSgyNCzb z$}_sgGi#C$=UH8lx|GJ8f9(5P!t`60Tt>B9Zles4MXOX91V~Ll@zGV>%k|0S*(jSo zJ4zMYtmea5(!KmQ1sam2sw)|cR4Q^BJwwkf@g;5cIoYj-xi>FNTWQPHouSJrmNXsW zZAU7YVDa+gWjFp9d-=o0mv*_78X`O9PIkDb9UJY4pfmn`dMr7%sJUPD-xm<&-o@vs!(7#RzVGF<6)td^iz#M#p`J^nES!ZQ`gEz zKd0|&*14s=B&`C$Z5m;!4^6E@`13=uMMr=~J0jt}{3zB?w3+dSfS?bOb4c~Wcz`|_ zSk0q7EMVK;%&96EQEtm)Sv1az@;3|^pHy^!(IdnPUnDGXoLcW04E||I;=wnht7Fs8 zQnc?%MsL#~7)V>7?Rs7ecTd{!F;K)VhwiL{K60Op7_@FvWR7Q#toGh#{}OyT#q4M80lWrmvO$^poxA zV6Zi{SzE!}k|6%YgIZ}JW+jt0=|CwFq-R)UUm`6xVYfiNt*LsbL&G=NRjl|iiZH5N zW4AJMYwNK3{^8i9+thgwNz4wXS!xk*evZ1&oyWqt2$8w#w@b!BUl>YaBHnGc1GRjk zVzQP<>QxA-E9H212T^NLj^+^mqZ`FYokxD{>JHMSHD5@A4yp4yWH+;UG+BOzG~>Z^ ztw@7fdo8^(iJ0Nd+NR#o7UC_Zl8pR(=FH^jJGGmJpO#gh9V5_(a~s)S5b!_iEyvPg zf$R6EOKN1E4{*Nn`D4`wBbx#`cP$dE@0)vfc(UYRMPI;o@M-ADPPGa6e!%61Fj@5A z>#zG2o$ZscZWDL^s@eYZ+ZQ^}z*J4_t`YMoom3U=ab?Qxf`tH-yXn~f%s;&?adOIf zMVkIn^5v!?2mP$PUR7nD+1F_83nvR5@(kkcC{J|?-{-Ggcfu$;-5-|nd17dy#A&v= zD6Pa&GoR}E55aR4oy zGyw$Ldl|P^^XZPyE29dT2=0`q)!OeNjF+hwImK`~lXgFkO%Ee{^4hiFu=Y>zaH+2B zd}HtihBxBn8GMN>J6?%n3_3zn{d10L^!BX#}kafC*|y#FM`*7reYfnbO(l0TP!rOEwLAiZJp;^ zt&yv**D=VMphLwQ-hlMk&vhe917zw)e(uwCw!cn_DX0ie3Dh&22y_Lar-EHl>JWtv zu>~P1l^Uh<1_-E9x3wr}^}*hfG4Oq4zX^F=)$@sgfEFp}5$YhRuO6gs0OKAFFF+?~{R*sZ0iJ6A<|?xx89^-8KVzRT zip_}K=4P!*JGp?I{Wq2#TE~GzVugO5`#y0uWY{hDu`?UVo+utn&tM^LSE-2BYACqt za`p2qlFbiAu4ir9_dZ91o}7aa$}khFf3Z1)o9)kFW=w^2Y8hL68+Su5%S#~gnkVCt z5BfGev#y4hW12TZw^;Rr#Yu|0L2%nwK*p8Somb@JqYWPY_3C~2s=0RYW@5^r=%;SX zCzjh-3>Xwm)S{O@w(OrUjUls#*6p=N^#j9&zS~*)KxV7!f`227m3?BSycDYSC|hvbE(GH33!c)^k^QevpGL?$+z(?UNrJs zrHkRDY;pIM_1oASq)i>MP8$F>eBew1M-v#g(4JaXHAdWzM(0;{jF)cAiP3$rx=P1a zAUXJ0_cS^U{jJYxZJ@cT-|$u92J4yo*UT{Gj61VHT0u>|T>`QU2&UV8Is6SPRsn^y zavXgK?wxNj@-8U)F(bQf9JK;OtB%%go#fNzTj4+ntaOKJI8w{e-9UDAZGKG-!Y!F& z-Qo`EKTx35fn=(>eJq!<(Cq0qRTBmrWp^ez9}4+1E)_xm*TdM3^O$YqFN3Uhbz2x9hx1PrP+47@pQx6({PLc- zr-l3MZHm>}Roy)r@wru5b9;$NidK z2iz&7R_EqazTQ9#50w&quoPu3Z}S6fY* z2%+|d^QD%W)a&*9-CzxC{?Z(#ZE#O$Ehjho(dPNFV&#;eZu5Uh4zS>(BnQ7U(<$_- z=%@7^Ql2Y5jO+sfJT>0;4~=fxaLV^^OV#ZsGrl7O4sjPRjAs7SFH_Rn&$5_Mq4>TZ zCQ^9g6mXQeE&t)h%YBNu3z<4tXT8T~j}8tmJdU0RT<9zegAsDLNg-%~I3?8C?ge($UE4@4q#u3Ez%orU}}C91`%0@$Hbql|A%i z9dZh3s<}^ucnVFG77$P01#tc?fBj^a!rGybPN_0aGF>=v60>R5G-m~e->toGm|-k? z?gx+B(kVHh>LM1X!D5rguT%KuReQR5`e5JV%O${L4mfc|uN&_n1eM(D8Gdcub1(`(6{<4cv5MS^O~`Y&R_Xf0 zhv0aTSOs~#nw<4Vwr3?ypDZtP;Mms2RF%gZ$7B=Cp%Zd9L zmV~#s4Za!HU$MOTfGBZGh`myx_%iRfar?sihux4UuLF(( zmb%_(r?LkqW2iLUx|n4*9DJyTAR<4k?4=Nkz=R<&LS71Y;Wm`;8-+K(!f8R!&xY@*)(;{-wRgQf3t(e}fuGfA=lI&@CykVYkP&#%1`&N%x~i z>jPOLA~C?D^=rLFo^Rg1xqy)yMTp2i#wXOIndbG(>OivnpWdK%rW)yeWbbWwP#`F` zjOa#6;^y??oa4&y@z;4jW1@XVK4mmXXY$#gM78r;}nz7o#pGZ5>$ z`@fIhhfFniqC5u<%|Nu{`X!X_$6TKTbV#y5TFMm3} zE{R`$6Z?_)U8&k=Sh1-qQ`-r6ui&GKFApa+ij5Hu<~qf4(NJFEAz^Iu+g&UiNQAT( zynRS-3e>oQ;%(aW8lHWsBkatnLsgb~iN|23N-aE??#bJl4C^;TmNZN@2gmzPsNkCO&Rqs)aBcI&-&|lx((}3LRWZgDH zsNWZ#6=oSZX!UI-tEGFEJFN__XzxmvPYo&PJR;Pfi!}(VFlfEx;a%*o^5UY^#)$iE ztPL%)d9hpxvVR+M5>pR@1Nrc$f{BM?olmE_fxt>l|2I}Z06eTw!6zhteW%ptZPDtZ zr@KHsCgMwB?JYLr7h-Oq!qy=~@`gA$i$?pp|9^!fqfm8X$#rBie>mAUsXdb`Ch9!y z01`GZlpdrPQFFyAUsLoKe(jc(NFVMKuSDFK)awn?(-9&b?1HG*fCH?%g`PWZy}bYG z5Dijh8YgxSO;9bL=4vxOr7s5#Y8OUW5w3e)tRBpU6DYT~e3ZKEuib#wLJpo@D2&Gd zH`OYqn**!vLVISTkIFB3bUa+Jx&$PkZ{5x*St*~-xo`bYYSr>y*VxpVk=?duAB=?h zG=#%*)c18m4sH<_m6RZO5lw(i)8+9XC4x4D_Qc~tx|rQK)i zHn#ooIq)3K){rEgD^&R?+8}0=!a5CO3w-vR4#=PHa%AI{d`o{BR`~`vTxD~#J&XrA zk03vQ!j%@xdRSHA^`A^w2ocJ`f?r$!TFW+h7wDQI z^W@X-RugZ5`+fbeE;gf6D7{xe67K)sf>3ZmqL6<8{UER>3%^Sn z%P)N%=o#>9yFkafv$6i$XI?PoVqvvt{+eX*g-^SlqxhINfR?T0hIqs*K z%#jrr4iv5W{VXg<>xz=(+y5yF#po6`XRnZ!Q+nTxd+3p*T!!YNg3 zAbH@_SVs0_51YO(FVTxBjo6w4cJ7qfOgeo(yV1leh19*&9dY!4sy~ki^u9^qV`CpK zFXiy;zi#j3=8MQ1C1)6SvdMX(L0ka%Q|!;NRd4PY`g+c`*D^rvo`?q z;$~(JoUa#5Kc`L^Mvt-zb*}yleFmfvirKWr{kV~U4&-}n7Sy`dk8#G^>3l?P?x$2yc+c^jZHIRBXQ)+b7W_{MmR4h?7t3G`k$Ga>ejXDHRC=X?1%yMa z;l3QzSU%ZuYTZXMY+@-7#6e7Opnq}FkMNWKAy75Z>bajRNZHj#Q5qsDG3u~ZvBP>) zgOeT`Pa9IM_>THy;j|27;$4R{KX=F4;lAbHOEk&_0=KRw7R-Vd+eH+2K1mO5`0z)IVt)!Yq1gm~BI61++VIC=MCBc7qOZfVVYr=&j_D4@i zn4U082i!ULIF%yOBpn;2PSD{{J= zLc~22ZrF?pq_Fw}b+Wfp`Fq~I(#<<%wk6&ojd|jdtZoAuD){cCZM{#SXqw{2ehrg! zJ$_smI(If`uhBBL?PSHy*oii$o6c_bf?ylpxo{Z&^%$};hpc#^e9YjJ6n+ zvjQ(T2q>&!aBD;%W!nlelJ11QvWBYxBu1xZ_&12p2ov`cNE?L^)7a&A;SPsIT(aT7 zkY7=9ETluJKfm@UnEhnI`j-v&+1mh>Z_ok;lj6$v7H!LM0>ivu`1q=Ve+F>nQ`re2 zaNWXw^vIUnoPTTIn)?U4_v01iK)J*1+o+UEhPM|GaH^~(4cMfX_BdA-RqJ^_LQp}H zZei6Oj08upF;+96WTks&1D2&`5QY{zp)g|c$N_7ZgIB-;T5dXc)~Y-C!bFM`nTxJh zdCdnPaUC7PeVT&$H}KjGZ^l@1Dh$f?LHOYP*Uk$?Nx`pg6HPrpz_`ml5Ek%)5yI`P zh_)Vl!Wu75Svtq+D&mPm%^-uDjzi}elvPBc3!CpHcwX>?>U*J9uOmVqt&V;gGQ+mI zOgm-`m1tr$=M=eE@C#*H3tZ*0Ix?rIh=u`|)Xd0g2ax@(&H%NK+Y0=xAcT%iMYMSk znY-YxOFM#dvNgdGF^aaQE&pn9H`B&P@nOQ9a94|3S|{f=`@f1ro}I;!A(^A^Zz?US zZQ9f#aPB%uQARKB=7Se;f)n$`Gkaw|paCvgm%mb<^mV&`$kg|8)#%&)q+)G9ep%-2 zH#TKn=)c1s@Q8Sc6+y z13D!5Ls?`rsVIj&)Zd68PsMU=mBtz|>BsbnJx|_TvOCv0o65aU?jV zi1M1+ns+bDw9<_4Z`kS~>I}s=%&Cv(F69FYBknEu4j!1qRz`(cE$1gp-NuqVefVgx zeD^VSlm(%oE#fbWwCzm7upV*)qwWAKD&@m<#c>xE`xXqf3|nY=03e2xattz@9&ca6)Gm37u`OroztB)r;! zAE@a30${P}?FvNPJAGd%jesjIhDokwqfFllpF;Rqn71zd$IkzDM=~;bnq#vDOj$(| zlK-#yKU4qzNS>tF_6;D<0ZU9N2jwdDK9VM3H0%F6ih}D#ygr+MC&)*+br~qsbc0=4 zTI-64ub+=r>-c}aC(y7_v4RmG+uZ3^3&3LNs)^z5bW3Tv0G}2Po?yQF#UleRPhABJ z&<*xxw9#^ym6ixKkE&$we3SdH5M^+Q)2!=`ma7)>DtF3Ve{cx6%RKu56K9v$*{1^?%mOX|7mm!D@QgXROu8? zEmDvP*zn^%f0fh(3x8i`7Ii{1+>jx=!S0Q+H%(XTdQ6~Zy^f{I47R?^!yaVmuHOyJ!0zJ^(ROKF9=UnbjVk@O-bE zxOXox$LP|!t0(ZxSq)%Us-geHzF}Q{NQhPEqwZ1rY!wL@P|J<%YO!O7@;KA-z?s09 zY6n5k&BQ*SJR4a=o`pQ$96E0>3)jm`3G~AWW?im%q4uWu9WGH_;3QCpz9P3^r8mGw zFHcG*CF;myY53v_;G0bkBj*1!(~^if9h>HN30c<~>ji>}2bUt~(agetKdBK_N1;CIF^beMcot>n0{cCm;$Q`2FKi&2ZuRgI8DI z0&a9NQCnj{!l24sKcCB@G=NQI%%q;f;O#2EF#>;bo2!MTd=pdwMm^NGQV+GoX_&qB z0=^`lo*X6+F1Pr1s0ZUhkGOP;BQW^88TFOP+#wH5pgY^eJSO0LxgP@p>QBynOcCB{ z5V#=Ki9Gt*u300+`pU6TZRM3+8W68X7CB^Dgz$UUFr`8VMDvlw7Bt!3GSgt67wrFV zBs8g}+P*#)VsB+%Cu{&jbKVU$*sAB?dM+ym)U@=tI&P=J!53jL$pL^?(Ts5cN|hN% zr8sB$NF9N`EIM^@+Oo3tK;sB<-Y87Rv8ZFhd}RG?_3(@^WUN*xO=S?5Gr}4P zFwlc{zYrf+u|K z4ne3z)H}$^+cU!vF~QVf+MObdNr<##pv-mw#Jw}34ZByjQf z$|>6;*&xAU5mp~0uij}v*qjKU1y(b(!fG|XOs~paIIBstC`p&!OnX?$rMc*I&0z68 z32JzCt;?_OQb`bwyH)FmkDpKSnKdY0iG5 zxZVG&x9f~+a%uLDpwbi&q^Ji1N-t6cX#oN0y>~>g1cZnb0ZD>l3?Rrs1f+{J0clc0 z2^c^H1nE6MMCpVeC857h&iV4*-~HXMH(&B0+1;7h+1cHho%tuSI)KRX5uOH+C|8Px zFWS`ZLdmM}2fl*soc%5kMk-qfZLV)LR}dFj*k2nY_Pg5`(dtR-UFUPfiH3Sk7t3_D z_sKeKC>j4qW7d2sbkSl`e@q3*CUv^xwX<-r%uHUsR=ysCL&)=1y&&WwjPUC%711?Hc8FjJy8O9mbaU4a) ztIu8oWwld?tWYF*E+jLz^w#7ALXLhzv&`^?yC<9-3FUBCBX(#F#M_JSh1K9E+1Gg0 zK;HBR!r|Qy#juQ^Ng2!j`I$w=p|2UsyqR!+2`v-;Hh&vmI3N3Yu!O0_crhA(nrK|~ zX=3K2{6qx^P7$a(N#po%iT}YWN0~XLZgt@b6CVCGv_nTSGYJg|hB9Y2=wj(m%aVS| zoH(aDJ5pp7aFX_O`Kvd@4z%Yzw84iz;rc|$nJZ)F=wW8T*zphW5woftx4&rphAJZa zS!TAZQ#rv}r}n$Wu3+s(4y2@y@K;B+y4ilQQKyf6`qEJYU8=1 z1fc_yZargH^xag&!Fmb!bMwqw?n?6@EJi8eYQW53uvoXB$br|w#-JMyu92Zd>4>2B z!zi1d1J7^^&i~4u0YrE8i5XF@EbrQ+Xwa8+DFn$1-Grv?mebmY&}6B)bSYNgPuS`zlJa*@VyxHrc5w z%Ma&$XL=)!#Fy$$n$pk-iLUdDKk88oY_G1I$$2-yt7?F=M_&W&1gQ!7-4QHeJcq}n zSmxAizJpZYla?Y0|1%m8M(5uU?2a%jUz({FJ2;v;+b&b*7h2C%-Ns))D+7EVakFy_ z_R#_)vbpl@G?{g&aq^bn@8IF{IZ3RCrj30qt&dgz{&XQj(^(U7YccNKi;+)lw-*^a zrlkhm6%2wWuCK}Rj*)Cp;C@)4%$-8em}YJfP*HNefZ{-kyUI1oco*I&!H9hLw>mE+ zr0g`24fJNm$+VkyZ#_QqZ*AVAX*i|0r8SkIbvg<&U)B6qkFTv#(t1bmm+a>dy^RZS zwkvAlKkLiikWYSE*ve9z!&gD5#T6(LIJ5RNhf~+L}w|_meD?Uo@!Rq6;`=1cY z!4;zxIp7KftMxjh##^vl-hclnv262eS&)A@6uYddhU=H0356o8z z*@T9yjdl%1br=>sDnqKUZ}+rBgZvtn=c}?z`bQ*`%gt?f#Ay6fm65rw^?%n~Z^+GR z>OEAP&paQLX|1B1ayh76$y@t6=h3hIwOADGjJ7lHPi)loBFx(k@QA|x;BSL(8sGfSEzR7Qj%D4 zy9t3$c)NVL^h;Cf^Yw7emFSam<)CA}by|J8-%g3|tZnM{i8-X1Gc51Bn(qgdV}S#x z!-J_~tbOHdvf?)b$n;b4VChkhwpI06lFWfRUeGxOM zZX;SSFxI{t|9o9(F`ecKN@!gJ|Co&5lyxH}fe%?NgfP zz~ZJKzz-o$oWJumUR-m+CQC(gNCVn2xt+v1ry?lNz5f=iwS zTdr^t_Z@MqB@Z?yPXN%LSf=s;jMG;ew$f#Bh}k#($YzT($8+_j6W_jxY@7nYhDYyR z4^MSkktn(p5U4s&wNr&$IG&qu0f1X)=!t{mQ+k_L_wQp+GgPt$Fq`p^9cdMf47{ea zrQy4Nq2<~7319(yy|^nEo4<+tSZ&ls0U`qEh%FSq>A@S+Eb>M#=r4hIRz)YV~0qMRMhMz24 zc|M3|nmH_jOwh*_dwP+M$_CjeJ5Yl!<+C7Fo#*12XKDxRqW3iqtAta_7j0cqBJY%t zi$4_|r`u#oWh){DubGDKA4|g6s&ytZXHwU4C1C!ID@Ssz!ZI~MuhBv~FUs2D-qpf< zAz0z2=s=Y@pA$3&O~oO94MgCDyD))XIy#tWBP>@@3#Y;OYoC_|sB5t)dhpP&sE=Ka zwI=A9$Ujw+1MEpLA|2OK10I(VUWV#6^B{SLdI*wUhKRfaFX=b!Q+rFp5Fd>9bZqQu zB5ee-IE2GDWRtN>c##xZ*#ZgErD4zHdl-*zQPjkOzTJ z=yCj6Z1#F_8JziN0C8W)+tm&!Oi+=R_W2X+29+mIHcrz0Q~@Fet#`5Q5D$@q>iAHv zPb-B2G8vK1Gq){6l*!X)~)E@x_Ho8Td@9bQB`$h*79xZLiD z{Gm|u2EOLj3hF@B=0khXGUQFXg6c`%R&}>E3TnAgKOR2)$Nsswj<~(D@Il(EF=55s z=tj#T3rjWgS)qe3GWJAz!DwtYwC>4bA0_KaZJG=p0PJX^uE)<@5+PbUxSUL{jb2XI zDHi%R`f3?vOMRM3PRv7PJ(TI_I}`AS>_S!MtTHh?&$go~)~2`w0PI|Bl%_Dr@JkOS zIz2MFM=&=EIFMEO(9XVzUa zgnp7=^rl?G+rl%^YJS+~^SXAeZJ0iF$mr`uqi$Ie{t5BNYlnRy=H0Q1{nJ;@oB%HW z0?}j?eW@D6dr)1o6WlYb?g>@|7FUaAKW{gF)c9KW^f??HvyM^c{o+b>pbAD4<^=i9 zTs#XzC2V?kta25artS)ZJH{@^XYyf~(cq<&<%Eu0R7GGZGeb0ET zUQ+?DD+Tiqn{vhqyt(AA0$SBl98+WQNn{<%S2iokcdN6NQY?R%6>O_866t}T`Hp=Y z*?E4LA}#RFv4n>6n4SBxeP#i}sHq>t0&jIV}>+0zLI_!(NY+RI|~5Uh1rr|&PC{fNOGJaeo+lS+F<=jt~l4Q zjZ@k=M@(0HGbRP8IA=*Ex&4jucQKxOC;|E)mLj&qep2!ZHA>Hh@zVphMNqB&HHt7z z*O-0#EqeAA9;j`Ik&}!Uk-I23VqRxMA)~O%mk+J^%I83C#Jtk2;m17E9P51MiT5u% zyfUvdBfu^8fb+e=uvmEUHR(MiHRYsQ@Nto%^kqvG578s~$bxjt^X%Qo z5x=j<+#*(OH31S^BDMx{8_f@Hn@of{tJgmCjqj58XcxA6La*3=MN zR9UaBeN853>3+(XT~H0`x7|s2V}2JUYWcoO(?_(oF-oTB(KTEJQCb@zc%aRdZ$(sj zN3yvPHpcp7c?1;2)|%D{K;SNW7x`TmH^8pRJ-A172Hoq=?;d`R`+>o0r2-Kw7Vk7J z4~v#VF)RQ;rYHK?cmcpwx{Dvw2W*%C;IhE5z0A)AD^EWipjDWiC;q2BhqdbopzIko zScebI2nA;4&!_mI{rYtofQN$Lle+6udewp1M`J9?*5WdUA@o2!f9Vj9Z}PJ_3MQ6)~Gf%ofF9MDYLh)6{zmc{V3FOR@Gs-BPTfM0;T ztlV{JStV&X1sgdzRe3p8MFmM&SyfrtPkfu;EdEymU$__SZs`BrpiRrTfySEQO*7px I?OV_O1Bz9-`2YX_ literal 0 HcmV?d00001 diff --git a/docs/tendermint-core/block-sync/implementation.md b/docs/tendermint-core/block-sync/implementation.md new file mode 100644 index 000000000..59274782c --- /dev/null +++ b/docs/tendermint-core/block-sync/implementation.md @@ -0,0 +1,47 @@ +--- +order: 3 +--- + +# Implementation + +## Blocksync Reactor + +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation + +### poolRoutine + +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends + a &bcBlockRequestMessage for a specific height + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + them on disk +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer + +## Block Pool + +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange + +## Go Routines in Blocksync Reactor + +![Go Routines Diagram](img/bc-reactor-routines.png) diff --git a/docs/tendermint-core/block-sync/reactor.md b/docs/tendermint-core/block-sync/reactor.md new file mode 100644 index 000000000..3e2875340 --- /dev/null +++ b/docs/tendermint-core/block-sync/reactor.md @@ -0,0 +1,278 @@ +--- +order: 2 +--- +# Reactor + +The Blocksync Reactor's high level responsibility is to enable peers who are +far behind the current state of the consensus to quickly catch up by downloading +many blocks in parallel, verifying their commits, and executing them against the +ABCI application. + +Tendermint full nodes run the Blocksync Reactor as a service to provide blocks +to new nodes. New nodes run the Blocksync Reactor in "fast_sync" mode, +where they actively make requests for more blocks until they sync up. +Once caught up, "fast_sync" mode is disabled and the node switches to +using (and turns on) the Consensus Reactor. + +## Architecture and algorithm + +The Blocksync reactor is organised as a set of concurrent tasks: + +- Receive routine of Blocksync Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. + +![Blocksync Reactor Architecture Diagram](img/bc-reactor.png) + +### Data structures + +These are the core data structures necessarily to provide the Blocksync Reactor logic. + +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. + +```go +type Requester { + mtx Mutex + block Block + height int64 + peerID p2p.ID + redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat +} +``` + +Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +```go +type Pool { + mtx Mutex + requesters map[int64]*Requester + height int64 + peers map[p2p.ID]*Peer + maxPeerHeight int64 + numPending int32 + store BlockStore + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError +} +``` + +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. + +```go +type Peer struct { + id p2p.ID + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool +} +``` + +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + +```go +type BlockRequest { + Height int64 + PeerID p2p.ID +} +``` + +### Receive routine of Blocksync Reactor + +It is executed upon message reception on the BlocksyncChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blocksync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. + +```go +handleMsg(pool, m): + upon receiving bcBlockRequestMessage m from peer p: + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then + requester.block = m + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + + upon receiving bcStatusRequestMessage m from peer p: + try to send bcStatusResponseMessage(pool.store.Height) + + upon receiving bcStatusResponseMessage m from peer p: + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + +onTimeout(p): + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true +``` + +### Requester tasks + +Requester task is responsible for fetching a single block at position `height`. + +```go +fetchBlock(height, pool): + while true do { + peerID = nil + block = nil + peer = pickAvailablePeer(height) + peerID = peer.id + + enqueue BlockRequest(height, peerID) to pool.requestsChannel + redo = false + while !redo do + select { + upon receiving Quit message do + return + upon receiving redo message with id on redoChannel do + if peerID == id { + mtx.Lock() + pool.numPending++ + redo = true + mtx.UnLock() + } + } + } + +pickAvailablePeer(height): + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer +``` + +sleep for requestIntervalMS + +### Task for creating Requesters + +This task is responsible for continuously creating and starting Requester tasks. + +```go +createRequesters(pool): + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + pool.mtx.Lock() + nextHeight = pool.height + size(pool.requesters) + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else + sleep requestIntervalMS + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel + peer.didTimeout = true + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then + enqueue msg on requestor's redoChannel + delete(pool.peers, peerID) + pool.mtx.Unlock() +``` + +### Main blocksync reactor controller task + +```go +main(pool): + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interval switchToConsensusIntervalSeconds + + while true do + select { + upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: + try to send bcBlockRequestMessage(Height) to Peer + + upon receiving error(peer) on errorsChannel: + stop peer for error + + upon receiving message on statusUpdateTickerChannel: + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + + upon receiving message on switchToConsensusTickerChannel: + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 + pool.mtx.Unlock() + if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then + switch to consensus mode + + upon receiving message on trySyncTickerChannel: + for i = 0; i < 10; i++ do + pool.mtx.Lock() + firstBlock = pool.requesters[pool.height].block + secondBlock = pool.requesters[pool.height].block + if firstBlock == nil or secondBlock == nil then continue + pool.mtx.Unlock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() + peerID = pool.requesters[pool.height].peerID + redoRequestsForPeer(peerId) + delete(pool.peers, peerID) + stop peer peerID for error + pool.mtx.Unlock() + else + delete(pool.requesters, pool.height) + save firstBlock to store + pool.height++ + execute firstBlock + } + +redoRequestsForPeer(pool, peerId): + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester +``` + +## Channels + +Defines `maxMsgSize` for the maximum size of incoming messages, +`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and +receiving buffers respectively. These are supposed to prevent amplification +attacks by setting up the upper limit on how much data we can receive & send to +a peer. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md new file mode 100644 index 000000000..78e63ee22 --- /dev/null +++ b/docs/tendermint-core/consensus/README.md @@ -0,0 +1,41 @@ +--- +order: 1 +parent: + title: Consensus + order: 6 +--- + + +Tendermint Consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote +messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the +next block should be; a validator might vote with a `VoteMessage` for a different block. If in some +round, enough number of processes vote for the same block, then this block is committed and later +added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the +validator. The internals of the protocol and how it ensures safety and liveness properties are +explained in a forthcoming document. + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the +block as the block size is big, i.e., they don't embed the block inside `Proposal` and +`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in +[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section) +that uniquely identifies each block. The block itself is +disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a +proposer first splitting a block into a number of block parts, that are then gossiped between +processes using `BlockPartMessage`. + +Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As +part of the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping +protocol to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. diff --git a/docs/tendermint-core/consensus/reactor.md b/docs/tendermint-core/consensus/reactor.md new file mode 100644 index 000000000..ee43846ec --- /dev/null +++ b/docs/tendermint-core/consensus/reactor.md @@ -0,0 +1,370 @@ +--- +order: 2 +--- + +# Reactor + +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +for decoding messages received from a peer and for adequate processing of the message depending on its type and content. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. + +## ConsensusState service + +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +and upon reaching agreement, commits blocks to the chain and executes them against the application. +The internal state machine receives input from peers, the internal validator and from a timer. + +Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. + +### Receive Routine of the ConsensusState service + +Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +discussed in separate document. For understanding of this document +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +then extensively used by the gossip routines to determine what information should be sent to peer processes. + +## Round State + +RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. + +```go +type RoundState struct { + Height int64 + Round int + Step RoundStepType + Validators ValidatorSet + Proposal Proposal + ProposalBlock Block + ProposalBlockParts PartSet + LockedRound int + LockedBlock Block + LockedBlockParts PartSet + Votes HeightVoteSet + LastCommit VoteSet + LastValidators ValidatorSet +} +``` + +Internally, consensus will run as a state machine with the following states: + +- RoundStepNewHeight +- RoundStepNewRound +- RoundStepPropose +- RoundStepProposeWait +- RoundStepPrevote +- RoundStepPrevoteWait +- RoundStepPrecommit +- RoundStepPrecommitWait +- RoundStepCommit + +## Peer Round State + +Peer round state contains the known state of a peer. It is being updated by the Receive routine of +Consensus Reactor and by the gossip routines upon sending a message to the peer. + +```golang +type PeerRoundState struct { + Height int64 // Height peer is at + Round int // Round peer is at, -1 if unknown. + Step RoundStepType // Step peer is at + Proposal bool // True if peer has proposal for this round + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray + ProposalPOLRound int // Proposal's POL round. -1 if none. + ProposalPOL BitArray // nil until ProposalPOLMessage received. + Prevotes BitArray // All votes peer has for this round + Precommits BitArray // All precommits peer has for this round + LastCommitRound int // Round of commit for last height. -1 if none. + LastCommit BitArray // All commit precommits of commit for last height. + CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound +} +``` + +## Receive method of Consensus reactor + +The entry point of the Consensus reactor is a receive method. When a message is +received from a peer p, normally the peer round state is updated +correspondingly, and some messages are passed for further processing, for +example to ConsensusState service. We now specify the processing of messages in +the receive method of Consensus reactor for each message type. In the following +message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, +respectively. + +### NewRoundStepMessage handler + +```go +handleMessage(msg): + if msg is from smaller height/round/step then return + // Just remember these values. + prsHeight = prs.Height + prsRound = prs.Round + prsCatchupCommitRound = prs.CatchupCommitRound + prsCatchupCommit = prs.CatchupCommit + + Update prs with values from msg + if prs.Height or prs.Round has been updated then + reset Proposal related fields of the peer state + if prs.Round has been updated and msg.Round == prsCatchupCommitRound then + prs.Precommits = psCatchupCommit + if prs.Height has been updated then + if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = prs.Precommits + } else { + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = nil + } + Reset prs.CatchupCommitRound and prs.CatchupCommit +``` + +### NewValidBlockMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` + +The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to +protect the node against DOS attacks. + +### HasVoteMessage handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +``` + +### VoteSetMaj23Message handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + Record in rs that a peer claim to have ⅔ majority for msg.BlockID + Send VoteSetBitsMessage showing votes node has for that BlockId +``` + +### ProposalMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + prs.Proposal = true + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalPOLRound = msg.POLRound + prs.ProposalPOL = nil + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### ProposalPOLMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return + prs.ProposalPOL = msg.ProposalPOL +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +### BlockPartMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round then return + Record in prs that peer has block part msg.Part.Index + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteMessage handler + +```go +handleMessage(msg): + Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteSetBitsMessage handler + +```go +handleMessage(msg): + Update prs for the bit-array of votes peer claims to have for the msg.BlockID +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +## Gossip Data Routine + +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + Continue + +1b) if (0 < prs.Height) and (prs.Height < rs.Height) then + help peer catch up using gossipDataForCatchup function + Continue + +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then + Sleep PeerGossipSleepDuration + Continue + +// at this point rs.Height == prs.Height and rs.Round == prs.Round +1d) if (rs.Proposal != nil and !prs.Proposal) then + Send ProposalMessage(rs.Proposal) to the peer + if send returns true, record that the peer knows Proposal + if 0 <= rs.Proposal.POLRound then + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) + Continue + +2) Sleep PeerGossipSleepDuration +``` + +### Gossip Data For Catchup + +This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). +The function executes the following logic: + +```go + if peer does not have all block parts for prs.ProposalBlockPart then + blockMeta = Load Block Metadata for height prs.Height from blockStore + if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then + Sleep PeerGossipSleepDuration + return + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + return + else Sleep PeerGossipSleepDuration +``` + +## Gossip Votes Routine + +It is used to send the following message: `VoteMessage` on the VoteChannel. +The gossip votes routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.Height == prs.Height then + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + Prevotes = rs.Votes.Prevotes(prs.Round) + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.ProposalPOLRound != -1 then + PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1b) if prs.Height != 0 and rs.Height == prs.Height+1 then + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +2) Sleep PeerGossipSleepDuration +``` + +## QueryMaj23Routine + +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState +(`prs`). The routine repeats forever the logic shown below. + +```go +1a) if rs.Height == prs.Height then + Prevotes = rs.Votes.Prevotes(prs.Round) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1b) if rs.Height == prs.Height then + Precommits = rs.Votes.Precommits(prs.Round) + if there is a ⅔ majority for some blockId in Precommits then + m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then + Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then + Commit = LoadCommit(prs.Height) + m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +2) Sleep PeerQueryMaj23SleepDuration +``` + +## Broadcast routine + +The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those +events. +It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. + +## Channels + +Defines 4 channels: state, data, vote and vote_set_bits. Each channel +has `SendQueueCapacity` and `RecvBufferCapacity` and +`RecvMessageCapacity` set to `maxMsgSize`. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/evidence/README.md b/docs/tendermint-core/evidence/README.md new file mode 100644 index 000000000..2070c48c0 --- /dev/null +++ b/docs/tendermint-core/evidence/README.md @@ -0,0 +1,13 @@ +--- +order: 1 +parent: + title: Evidence + order: 3 +--- + +Evidence is used to identify validators who have or are acting malicious. There are multiple types of evidence, to read more on the evidence types please see [Evidence Types](https://docs.tendermint.com/master/spec/core/data_structures.html#evidence). + +The evidence reactor works similar to the mempool reactor. When evidence is observed, it is sent to all the peers in a repetitive manner. This ensures evidence is sent to as many people as possible to avoid sensoring. After evidence is received by peers and committed in a block it is pruned from the evidence module. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/tendermint-core/mempool.md b/docs/tendermint-core/mempool.md deleted file mode 100644 index 8dd968781..000000000 --- a/docs/tendermint-core/mempool.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -order: 12 ---- - -# Mempool - -## Transaction ordering - -Currently, there's no ordering of transactions other than the order they've -arrived (via RPC or from other nodes). - -So the only way to specify the order is to send them to a single node. - -valA: - -- `tx1` -- `tx2` -- `tx3` - -If the transactions are split up across different nodes, there's no way to -ensure they are processed in the expected order. - -valA: - -- `tx1` -- `tx2` - -valB: - -- `tx3` - -If valB is the proposer, the order might be: - -- `tx3` -- `tx1` -- `tx2` - -If valA is the proposer, the order might be: - -- `tx1` -- `tx2` -- `tx3` - -That said, if the transactions contain some internal value, like an -order/nonce/sequence number, the application can reject transactions that are -out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then -accept `tx1`. The sender can then retry sending `tx3`, which should probably be -rejected until the node has seen `tx2`. diff --git a/docs/tendermint-core/mempool/README.md b/docs/tendermint-core/mempool/README.md new file mode 100644 index 000000000..1821cf849 --- /dev/null +++ b/docs/tendermint-core/mempool/README.md @@ -0,0 +1,71 @@ +--- +order: 1 +parent: + title: Mempool + order: 2 +--- + +The mempool is a in memory pool of potentially valid transactions, +both to broadcast to other nodes, as well as to provide to the +consensus reactor when it is selected as the block proposer. + +There are two sides to the mempool state: + +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit + +## External functionality + +External functionality is exposed via network interfaces +to potentially untrusted actors. + +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check + +## Internal functionality + +Internal functionality is exposed via method calls to other +code compiled into the tendermint binary. + +- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the + size of the txs is less than MaxBytes, and gas is less than MaxGas +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx + +What does it provide the consensus reactor? +What guarantees does it need from the ABCI app? +(talk about interleaving processes in concurrency) + +## Optimizations + +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. + +Applications should implement replay protection, read [Replay +Protection](https://github.com/tendermint/tendermint/blob/8cdaa7f515a9d366bbc9f0aff2a263a1a6392ead/docs/app-dev/app-development.md#replay-protection) for more information. + +## Configuration + +The mempool has various configurable paramet + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. + +`maxMsgSize` equals `MaxBatchBytes` (10MB) + 4 (proto overhead). +`MaxBatchBytes` is a mempool config parameter -> defined locally. The reactor +sends transactions to the connected peers in batches. The maximum size of one +batch is `MaxBatchBytes`. + +The mempool will not send a tx back to any peer which it received it from. + +The reactor assigns an `uint16` number for each peer and maintains a map from +p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders +(`[]uint16`). The list is updated every time mempool receives a transaction it +is already seen. `uint16` assumes that a node will never have over 65535 active +peers (0 is reserved for unknown source - e.g. RPC). diff --git a/docs/tendermint-core/mempool/config.md b/docs/tendermint-core/mempool/config.md new file mode 100644 index 000000000..4e8a9ec73 --- /dev/null +++ b/docs/tendermint-core/mempool/config.md @@ -0,0 +1,105 @@ +--- +order: 2 +--- + +# Configuration + +Here we describe configuration options around mempool. +For the purposes of this document, they are described +in a toml file, but some of them can also be passed in as +environmental variables. + +Config: + +```toml +[mempool] + +recheck = true +broadcast = true +wal-dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache-size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max-batch-bytes = 0 +``` + + + +## Recheck + +Recheck determines if the mempool rechecks all pending +transactions after a block was committed. Once a block +is committed, the mempool removes all valid transactions +that were successfully included in the block. + +If `recheck` is true, then it will rerun CheckTx on +all remaining transactions with the new block state. + +## Broadcast + +Determines whether this node gossips any valid transactions +that arrive in mempool. Default is to gossip anything that +passes checktx. If this is disabled, transactions are not +gossiped, but instead stored locally and added to the next +block this node is the proposer. + +## WalDir + +This defines the directory where mempool writes the write-ahead +logs. These files can be used to reload unbroadcasted +transactions if the node crashes. + +If the directory passed in is an absolute path, the wal file is +created there. If the directory is a relative path, the path is +appended to home directory of the tendermint process to +generate an absolute path to the wal directory +(default `$HOME/.tendermint` or set via `TM_HOME` or `--home`) + +## Size + +Size defines the total amount of transactions stored in the mempool. Default is `5_000` but can be adjusted to any number you would like. The higher the size the more strain on the node. + +## Max Transactions Bytes + +Max transactions bytes defines the total size of all the transactions in the mempool. Default is 1 GB. + +## Cache size + +Cache size determines the size of the cache holding transactions we have already seen. The cache exists to avoid running `checktx` each time we receive a transaction. + +## Keep Invalid Transactions In Cache + +Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. + +## Max Transaction Bytes + +Max transaction bytes defines the max size a transaction can be for your node. If you would like your node to only keep track of smaller transactions this field would need to be changed. Default is 1MB. + +## Max Batch Bytes + +Max batch bytes defines the amount of bytes the node will send to a peer. Default is 0. + +> Note: Unused due to https://github.com/tendermint/tendermint/issues/5796 diff --git a/docs/tendermint-core/pex/README.md b/docs/tendermint-core/pex/README.md new file mode 100644 index 000000000..5f5c3ed42 --- /dev/null +++ b/docs/tendermint-core/pex/README.md @@ -0,0 +1,177 @@ +--- +order: 1 +parent: + title: Peer Exchange + order: 5 +--- + +# Peer Strategy and Exchange + +Here we outline the design of the PeerStore +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails, or if we fail to dial +them. +Some peers can be marked as `private`, which means +we will not put them in the peer store or gossip them to others. + +All peers except private peers and peers coming from them are tracked using the +peer store. + +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + +## Discovery + +Peer discovery begins with a list of seeds. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently + +On startup, we will also immediately dial the given list of `persistent_peers`, +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. This behavior is when `persistent_peers_max_dial_period` is configured to zero. + +But If `persistent_peers_max_dial_period` is set greater than zero, terms between each dial to each persistent peer +will not exceed `persistent_peers_max_dial_period` during exponential backoff. +Therefore, `dial_period` = min(`persistent_peers_max_dial_period`, `exponential_backoff_dial_period`) +and we keep trying again regardless of `maxAttemptsToDial` + +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. + +## Listening + +Peers listen on a configurable ListenAddr that they self-report in their +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +Peers are added to the peer store from the PEX when they first connect to us or +when we hear about them from other peers. + +The peer store is arranged in sets of buckets, and distinguishes between +vetted (old) and unvetted (new) peers. It keeps different sets of buckets for +vetted and unvetted peers. Buckets provide randomization over peer selection. +Peers are put in buckets according to their IP groups. + +IP group can be a masked IP (e.g. `1.2.0.0` or `2602:100::`) or `local` for +local addresses or `unroutable` for unroutable addresses. The mask which +corresponds to the `/16` subnet is used for IPv4, `/32` subnet - for IPv6. +Each group has a limited number of buckets to prevent DoS attacks coming from +that group (e.g. an attacker buying a `/16` block of IPs and launching a DoS +attack). + +[highwayhash](https://arxiv.org/abs/1612.06257) is used as a hashing function +when calculating a bucket. + +When placing a peer into a new bucket: + +```md +hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets +``` + +When placing a peer into an old bucket: + +```md +hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets +``` + +where `key` - random 24 HEX string, `group` - IP group of the peer (e.g. `1.2.0.0`), +`sourcegroup` - IP group of the sender (peer who sent us this address) (e.g. `174.11.0.0`), +`addr` - string representation of the peer's address (e.g. `174.11.10.2:26656`). + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and +each instance of the peer can have a different IP:PORT. + +If we're trying to add a new peer but there's no space in its bucket, we'll +remove the worst peer from that bucket to make room. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For Tendermint, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the peer store. + +More fine-grained tracking of peer behaviour can be done using +a trust metric (see below), but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. + +We track the last time we dialed a peer and the number of unsuccessful attempts +we've made. If too many attempts are made, we mark the peer as bad. + +Connection attempts are made with exponential backoff (plus jitter). Because +the selection process happens every `ensurePeersPeriod`, we might not end up +dialing a peer for much longer than the backoff duration. + +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from peer store completely. But for persistent peers, we indefinitely try to +dial all persistent peers unless `persistent_peers_max_dial_period` is configured to zero + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: + +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the peer store and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itself +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us an unsolicited list of peers, +or if the peer sends a request too soon after another one, +we Disconnect and MarkBad. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + +See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) +and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) +architecture docs for more details. + + + + + + diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md deleted file mode 100644 index 52286e6c7..000000000 --- a/docs/tendermint-core/state-sync.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -order: 11 ---- - -# State Sync - -With block sync a node is downloading all of the data of an application from genesis and verifying it. -With state sync your node will download data related to the head or near the head of the chain and verify the data. -This leads to drastically shorter times for joining a network. - -Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md) - -## Events - -When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion. - -The user can query the events by subscribing `EventQueryStateSyncStatus` -Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. \ No newline at end of file diff --git a/docs/tendermint-core/state-sync/README.md b/docs/tendermint-core/state-sync/README.md new file mode 100644 index 000000000..39e76ce39 --- /dev/null +++ b/docs/tendermint-core/state-sync/README.md @@ -0,0 +1,85 @@ +--- +order: 1 +parent: + title: State Sync + order: 4 +--- + + +State sync allows new nodes to rapidly bootstrap and join the network by discovering, fetching, +and restoring state machine snapshots. For more information, see the [state sync ABCI section](https://docs.tendermint.com/master/spec/abci/abci.html#state-sync)). + +The state sync reactor has two main responsibilities: + +* Serving state machine snapshots taken by the local ABCI application to new nodes joining the + network. + +* Discovering existing snapshots and fetching snapshot chunks for an empty local application + being bootstrapped. + +The state sync process for bootstrapping a new node is described in detail in the section linked +above. While technically part of the reactor (see `statesync/syncer.go` and related components), +this document will only cover the P2P reactor component. + +For details on the ABCI methods and data types, see the [ABCI documentation](https://docs.tendermint.com/master/spec/abci/). + +Information on how to configure state sync is located in the [nodes section](../../nodes/state-sync.md) + +## State Sync P2P Protocol + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +```go +type snapshotsRequestMessage struct{} +``` + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: + +```go +type snapshotsResponseMessage struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte +} +``` + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +```go +type chunkRequestMessage struct { + Height uint64 + Format uint32 + Index uint32 +} +``` + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +```go +type chunkResponseMessage struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Missing bool +} +``` + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. From 23be048294bad33667cb7f52fe7df22109b36f0f Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 25 Oct 2021 13:19:20 +0200 Subject: [PATCH 116/174] p2p: use correct transport configuration (#7152) --- config/config.go | 53 +++++++++++++++++++++++++++++++++++++----- config/config_test.go | 18 ++++++++++++++ config/toml.go | 17 ++++++++++++++ internal/p2p/router.go | 11 ++++++--- node/setup.go | 8 ++++++- test/e2e/runner/rpc.go | 4 +--- 6 files changed, 98 insertions(+), 13 deletions(-) diff --git a/config/config.go b/config/config.go index a9b2576fd..da1f56c34 100644 --- a/config/config.go +++ b/config/config.go @@ -639,6 +639,18 @@ type P2PConfig struct { //nolint: maligned // Toggle to disable guard against peers connecting from the same ip. AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` + // Time to wait before flushing messages out on the connection + FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` + + // Maximum size of a message packet payload, in bytes + MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"` + + // Rate at which packets can be sent, in bytes/second + SendRate int64 `mapstructure:"send-rate"` + + // Rate at which packets can be received, in bytes/second + RecvRate int64 `mapstructure:"recv-rate"` + // Peer connection configuration. HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` @@ -661,20 +673,49 @@ func DefaultP2PConfig() *P2PConfig { UPNP: false, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PexReactor: true, - AllowDuplicateIP: false, - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - QueueType: "priority", + FlushThrottleTimeout: 100 * time.Millisecond, + // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. + // The IP header and the TCP header take up 20 bytes each at least (unless + // optional header fields are used) and thus the max for (non-Jumbo frame) + // Ethernet is 1500 - 20 -20 = 1460 + // Source: https://stackoverflow.com/a/3074427/820520 + MaxPacketMsgPayloadSize: 1400, + SendRate: 5120000, // 5 mB/s + RecvRate: 5120000, // 5 mB/s + PexReactor: true, + AllowDuplicateIP: false, + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + QueueType: "priority", } } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *P2PConfig) ValidateBasic() error { + if cfg.FlushThrottleTimeout < 0 { + return errors.New("flush-throttle-timeout can't be negative") + } + if cfg.MaxPacketMsgPayloadSize < 0 { + return errors.New("max-packet-msg-payload-size can't be negative") + } + if cfg.SendRate < 0 { + return errors.New("send-rate can't be negative") + } + if cfg.RecvRate < 0 { + return errors.New("recv-rate can't be negative") + } + return nil +} + // TestP2PConfig returns a configuration for testing the peer-to-peer layer func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" cfg.AllowDuplicateIP = true + cfg.FlushThrottleTimeout = 10 * time.Millisecond + return cfg } diff --git a/config/config_test.go b/config/config_test.go index 181314492..08a77d032 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -159,3 +159,21 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg.MaxOpenConnections = -1 assert.Error(t, cfg.ValidateBasic()) } + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} diff --git a/config/toml.go b/config/toml.go index 3be385060..9c7d012f0 100644 --- a/config/toml.go +++ b/config/toml.go @@ -300,6 +300,23 @@ allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} handshake-timeout = "{{ .P2P.HandshakeTimeout }}" dial-timeout = "{{ .P2P.DialTimeout }}" +# Time to wait before flushing messages out on the connection +# TODO: Remove once MConnConnection is removed. +flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" + +# Maximum size of a message packet payload, in bytes +# TODO: Remove once MConnConnection is removed. +max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} + +# Rate at which packets can be sent, in bytes/second +# TODO: Remove once MConnConnection is removed. +send-rate = {{ .P2P.SendRate }} + +# Rate at which packets can be received, in bytes/second +# TODO: Remove once MConnConnection is removed. +recv-rate = {{ .P2P.RecvRate }} + + ####################################################### ### Mempool Configuration Option ### ####################################################### diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 6c4694624..402e2f0ed 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,8 +21,6 @@ import ( const queueBufferDefault = 32 -const dialRandomizerIntervalMillisecond = 3000 - // Envelope contains a message with sender/receiver routing info. type Envelope struct { From types.NodeID // sender (empty if outbound) @@ -536,8 +534,15 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { + const ( + maxDialerInterval = 3000 + minDialerInterval = 250 + ) + // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond) + dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) + + timer := time.NewTimer(dur * time.Millisecond) defer timer.Stop() select { diff --git a/node/setup.go b/node/setup.go index 40ced410c..2fddceac1 100644 --- a/node/setup.go +++ b/node/setup.go @@ -402,8 +402,14 @@ func createConsensusReactor( } func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { + conf := conn.DefaultMConnConfig() + conf.FlushThrottle = cfg.P2P.FlushThrottleTimeout + conf.SendRate = cfg.P2P.SendRate + conf.RecvRate = cfg.P2P.RecvRate + conf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize + return p2p.NewMConnTransport( - logger, conn.DefaultMConnConfig(), []*p2p.ChannelDescriptor{}, + logger, conf, []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), }, diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index ad5fa7a64..f6a32b114 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -70,9 +70,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients[node.Name] = client } - wctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - result, err := client.Status(wctx) + result, err := client.Status(ctx) if err != nil { continue } From b4bc6bb4e884aca1e3a19ef9aba5aacd6325159a Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 26 Oct 2021 14:45:33 +0200 Subject: [PATCH 117/174] p2p: add message type into the send/recv bytes metrics (#7155) This pull request adds a new "mesage_type" label to the send/recv bytes metrics calculated in the p2p code. Below is a snippet of the updated metrics that includes the updated label: ``` tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_HasVote",peer_id="2551a13ed720101b271a5df4816d1e4b3d3bd133"} 652 tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_HasVote",peer_id="4b1068420ef739db63377250553562b9a978708a"} 631 tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_HasVote",peer_id="927c50a5e508c747830ce3ba64a3f70fdda58ef2"} 631 tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_NewRoundStep",peer_id="2551a13ed720101b271a5df4816d1e4b3d3bd133"} 393 tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_NewRoundStep",peer_id="4b1068420ef739db63377250553562b9a978708a"} 357 tendermint_p2p_peer_receive_bytes_total{chID="32",chain_id="ci",message_type="consensus_NewRoundStep",peer_id="927c50a5e508c747830ce3ba64a3f70fdda58ef2"} 386 ``` --- internal/p2p/metrics.go | 47 ++++++++++++++++++++++++++++++++++-- internal/p2p/metrics_test.go | 19 +++++++++++++++ internal/p2p/pqueue.go | 3 ++- internal/p2p/router.go | 3 ++- 4 files changed, 68 insertions(+), 4 deletions(-) create mode 100644 internal/p2p/metrics_test.go diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index e3481058b..3677180de 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -1,6 +1,11 @@ package p2p import ( + "fmt" + "reflect" + "regexp" + "sync" + "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" "github.com/go-kit/kit/metrics/prometheus" @@ -13,6 +18,13 @@ const ( MetricsSubsystem = "p2p" ) +var ( + // valueToLabelRegexp is used to find the golang package name and type name + // so that the name can be turned into a prometheus label where the characters + // in the label do not include prometheus special characters such as '*' and '.'. + valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) +) + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. @@ -43,6 +55,9 @@ type Metrics struct { // PeerQueueMsgSize defines the average size of messages sent over a peer's // queue for a specific flow (i.e. Channel). PeerQueueMsgSize metrics.Gauge + + mtx *sync.RWMutex + messageLabelNames map[reflect.Type]string } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -68,14 +83,14 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "peer_receive_bytes_total", Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_send_bytes_total", Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -118,6 +133,9 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "router_channel_queue_msg_size", Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", }, append(labels, "ch_id")).With(labelsAndValues...), + + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, } } @@ -133,5 +151,30 @@ func NopMetrics() *Metrics { RouterChannelQueueSend: discard.NewHistogram(), PeerQueueDroppedMsgs: discard.NewCounter(), PeerQueueMsgSize: discard.NewGauge(), + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, } } + +// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang +// type that is passed in. +// This method uses a map on the Metrics struct so that each label name only needs +// to be produced once to prevent expensive string operations. +func (m *Metrics) ValueToMetricLabel(i interface{}) string { + t := reflect.TypeOf(i) + m.mtx.RLock() + + if s, ok := m.messageLabelNames[t]; ok { + m.mtx.RUnlock() + return s + } + m.mtx.RUnlock() + + s := t.String() + ss := valueToLabelRegexp.FindStringSubmatch(s) + l := fmt.Sprintf("%s_%s", ss[1], ss[2]) + m.mtx.Lock() + defer m.mtx.Unlock() + m.messageLabelNames[t] = l + return l +} diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go new file mode 100644 index 000000000..53b3c47bd --- /dev/null +++ b/internal/p2p/metrics_test.go @@ -0,0 +1,19 @@ +package p2p + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/proto/tendermint/p2p" +) + +func TestValueToMetricsLabel(t *testing.T) { + m := NopMetrics() + r := &p2p.PexResponse{} + str := m.ValueToMetricLabel(r) + assert.Equal(t, "p2p_PexResponse", str) + + // subsequent calls to the function should produce the same result + str = m.ValueToMetricLabel(r) + assert.Equal(t, "p2p_PexResponse", str) +} diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index e0e812cf5..11cdbd130 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -257,7 +257,8 @@ func (s *pqScheduler) process() { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, - "peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) + "peer_id", string(pqEnv.envelope.To), + "message_type", s.metrics.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) s.metrics.PeerPendingSendBytes.With( "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 402e2f0ed..4724e8d9b 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -936,7 +936,8 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { case queue.enqueue() <- Envelope{From: peerID, Message: msg}: r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), - "peer_id", string(peerID)).Add(float64(proto.Size(msg))) + "peer_id", string(peerID), + "message_type", r.metrics.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) r.metrics.RouterChannelQueueSend.Observe(time.Since(start).Seconds()) r.logger.Debug("received message", "peer", peerID, "message", msg) From cb39e2f91786efda5f60fdd67a72ba5ae8d07bb5 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 26 Oct 2021 16:35:14 +0200 Subject: [PATCH 118/174] node,blocksync,config: remove support for running nodes with blocksync disabled (#7159) We stopped testing these configurations a while ago, and it doesn't really make sense to allow nodes to run in this configuration. This drops support for non-blocksync nodes and cleans up the configuration/tests accordingly. Closes: #6908 --- CHANGELOG_PENDING.md | 11 +++--- cmd/tendermint/commands/run_node.go | 17 --------- config/config.go | 55 ----------------------------- config/config_test.go | 5 --- config/toml.go | 10 ------ config/toml_test.go | 1 - node/node.go | 50 +++++++++++++------------- test/e2e/generator/generate.go | 1 - test/e2e/generator/generate_test.go | 3 -- test/e2e/pkg/manifest.go | 4 --- test/e2e/pkg/testnet.go | 7 ---- test/e2e/runner/setup.go | 5 --- 12 files changed, 30 insertions(+), 139 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 33d2c89c2..98636a852 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -12,21 +12,20 @@ Special thanks to external contributors on this release: - CLI/RPC/Config - - [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair). + - [rpc] Remove the deprecated gRPC interface to the RPC service. (@creachadair) + - [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish) - Apps + - [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository. - P2P Protocol - - [p2p] \#7035 Remove legacy P2P routing implementation and - associated configuration options (@tychoish) + - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) - Go API - - [blocksync] \#7046 Remove v2 implementation of the blocksync - service and recactor, which was disabled in the previous release - (@tychoish) + - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) - Blockchain Protocol diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index a5fa72ed5..d5a3de04e 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -3,8 +3,6 @@ package commands import ( "bytes" "crypto/sha256" - "errors" - "flag" "fmt" "io" "os" @@ -35,22 +33,7 @@ func AddNodeFlags(cmd *cobra.Command) { "socket address to listen on for connections from external priv-validator process") // node flags - cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing") - // TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle - // This check was added to give users an upgrade prompt to use the new flag for syncing. - // - // The pflag package does not have a native way to print a depcrecation warning - // and return an error. This logic was added to print a deprecation message to the user - // and then crash if the user attempts to use the old --fast-sync flag. - fs := flag.NewFlagSet("", flag.ExitOnError) - fs.Func("fast-sync", "deprecated", - func(string) error { - return errors.New("--fast-sync has been deprecated, please use --blocksync.enable") - }) - cmd.Flags().AddGoFlagSet(fs) - - cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck cmd.Flags().BytesHexVar( &genesisHash, "genesis-hash", diff --git a/config/config.go b/config/config.go index da1f56c34..c0443e021 100644 --- a/config/config.go +++ b/config/config.go @@ -29,8 +29,6 @@ const ( ModeValidator = "validator" ModeSeed = "seed" - BlockSyncV0 = "v0" - MempoolV0 = "v0" MempoolV1 = "v1" ) @@ -73,7 +71,6 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` - BlockSync *BlockSyncConfig `mapstructure:"blocksync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` @@ -88,7 +85,6 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), StateSync: DefaultStateSyncConfig(), - BlockSync: DefaultBlockSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), Instrumentation: DefaultInstrumentationConfig(), @@ -111,7 +107,6 @@ func TestConfig() *Config { P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), StateSync: TestStateSyncConfig(), - BlockSync: TestBlockSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), Instrumentation: TestInstrumentationConfig(), @@ -145,9 +140,6 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.StateSync.ValidateBasic(); err != nil { return fmt.Errorf("error in [statesync] section: %w", err) } - if err := cfg.BlockSync.ValidateBasic(); err != nil { - return fmt.Errorf("error in [blocksync] section: %w", err) - } if err := cfg.Consensus.ValidateBasic(); err != nil { return fmt.Errorf("error in [consensus] section: %w", err) } @@ -333,28 +325,6 @@ func (cfg BaseConfig) ValidateBasic() error { return fmt.Errorf("unknown mode: %v", cfg.Mode) } - // TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle. - // This check was added to give users an upgrade prompt to use the new - // configuration option in v0.35. In future release cycles they should no longer - // be using this configuration parameter so the check can be removed. - // The cfg.Other field can likely be removed at the same time if it is not referenced - // elsewhere as it was added to service this check. - if fs, ok := cfg.Other["fastsync"]; ok { - if _, ok := fs.(map[string]interface{}); ok { - return fmt.Errorf("a configuration section named 'fastsync' was found in the " + - "configuration file. The 'fastsync' section has been renamed to " + - "'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'") - } - } - if fs, ok := cfg.Other["fast-sync"]; ok { - if fs != "" { - return fmt.Errorf("a parameter named 'fast-sync' was found in the " + - "configuration file. The parameter to enable or disable quickly syncing with a blockchain" + - "has moved to the [blocksync] section of the configuration file as blocksync.enable. " + - "Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'") - } - } - return nil } @@ -943,31 +913,6 @@ func (cfg *StateSyncConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- - -// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service -// If this node is many blocks behind the tip of the chain, BlockSync -// allows them to catchup quickly by downloading blocks in parallel -// and verifying their commits. -type BlockSyncConfig struct { - Enable bool `mapstructure:"enable"` -} - -// DefaultBlockSyncConfig returns a default configuration for the block sync service -func DefaultBlockSyncConfig() *BlockSyncConfig { - return &BlockSyncConfig{ - Enable: true, - } -} - -// TestBlockSyncConfig returns a default configuration for the block sync. -func TestBlockSyncConfig() *BlockSyncConfig { - return DefaultBlockSyncConfig() -} - -// ValidateBasic performs basic validation. -func (cfg *BlockSyncConfig) ValidateBasic() error { return nil } - //----------------------------------------------------------------------------- // ConsensusConfig diff --git a/config/config_test.go b/config/config_test.go index 08a77d032..304eeb0ce 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -104,11 +104,6 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { require.NoError(t, cfg.ValidateBasic()) } -func TestBlockSyncConfigValidateBasic(t *testing.T) { - cfg := TestBlockSyncConfig() - assert.NoError(t, cfg.ValidateBasic()) -} - func TestConsensusConfig_ValidateBasic(t *testing.T) { // nolint: lll testcases := map[string]struct { diff --git a/config/toml.go b/config/toml.go index 9c7d012f0..dc462243a 100644 --- a/config/toml.go +++ b/config/toml.go @@ -415,16 +415,6 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}" # The number of concurrent chunk and block fetchers to run (default: 4). fetchers = "{{ .StateSync.Fetchers }}" -####################################################### -### Block Sync Configuration Connections ### -####################################################### -[blocksync] - -# If this node is many blocks behind the tip of the chain, BlockSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -enable = {{ .BlockSync.Enable }} - ####################################################### ### Consensus Configuration Options ### ####################################################### diff --git a/config/toml_test.go b/config/toml_test.go index ccf818d65..d46cf9ce7 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -70,7 +70,6 @@ func checkConfig(t *testing.T, configFile string) { "moniker", "seeds", "proxy-app", - "blocksync", "create-empty-blocks", "peer", "timeout", diff --git a/node/node.go b/node/node.go index 727722279..d329e2494 100644 --- a/node/node.go +++ b/node/node.go @@ -245,7 +245,7 @@ func makeNode(cfg *config.Config, // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - blockSync := cfg.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey) + blockSync := !onlyValidatorIsUs(state, pubKey) logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode) @@ -571,10 +571,8 @@ func (n *nodeImpl) OnStart() error { } if n.config.Mode != config.ModeSeed { - if n.config.BlockSync.Enable { - if err := n.bcReactor.Start(); err != nil { - return err - } + if err := n.bcReactor.Start(); err != nil { + return err } // Start the real consensus reactor separately since the switch uses the shim. @@ -642,29 +640,32 @@ func (n *nodeImpl) OnStart() error { n.consensusReactor.SetStateSyncingMetrics(0) - d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { + if err := n.eventBus.PublishEventStateSyncStatus( + types.EventDataStateSyncStatus{ + Complete: true, + Height: state.LastBlockHeight, + }); err != nil { + n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) } // TODO: Some form of orchestrator is needed here between the state // advancing reactors to be able to control which one of the three // is running - if n.config.BlockSync.Enable { - // FIXME Very ugly to have these metrics bleed through here. - n.consensusReactor.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(state); err != nil { - n.Logger.Error("failed to switch to block sync", "err", err) - return - } + // FIXME Very ugly to have these metrics bleed through here. + n.consensusReactor.SetBlockSyncingMetrics(1) + if err := bcR.SwitchToBlockSync(state); err != nil { + n.Logger.Error("failed to switch to block sync", "err", err) + return + } - d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) - } + if err := n.eventBus.PublishEventBlockSyncStatus( + types.EventDataBlockSyncStatus{ + Complete: false, + Height: state.LastBlockHeight, + }); err != nil { - } else { - n.consensusReactor.SwitchToConsensus(state, true) + n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) } }() } @@ -697,11 +698,10 @@ func (n *nodeImpl) OnStop() { if n.config.Mode != config.ModeSeed { // now stop the reactors - if n.config.BlockSync.Enable { - // Stop the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the blockchain reactor", "err", err) - } + + // Stop the real blockchain reactor separately since the switch uses the shim. + if err := n.bcReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the blockchain reactor", "err", err) } // Stop the real consensus reactor separately since the switch uses the shim. diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 10a180f6a..4ee9849b2 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -278,7 +278,6 @@ func generateNode( Database: nodeDatabases.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), Mempool: nodeMempools.Choose(r), - BlockSync: "v0", StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 389483027..3bda6d035 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -43,9 +43,6 @@ func TestGenerator(t *testing.T) { t.Run("PrivvalProtocol", func(t *testing.T) { require.NotZero(t, node.PrivvalProtocol) }) - t.Run("BlockSync", func(t *testing.T) { - require.NotZero(t, node.BlockSync) - }) } }) } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 16b99cfda..b57b9ac4a 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -104,10 +104,6 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // BlockSync specifies the block sync mode: "" (disable), "v0" or "v2". - // Defaults to disabled. - BlockSync string `toml:"block_sync"` - // Mempool specifies which version of mempool to use. Either "v0" or "v1" Mempool string `toml:"mempool_version"` diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index d0770ce8d..3fd9e77a2 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -84,7 +84,6 @@ type Node struct { IP net.IP ProxyPort uint32 StartAt int64 - BlockSync string Mempool string StateSync string Database string @@ -177,7 +176,6 @@ func LoadTestnet(file string) (*Testnet, error) { ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - BlockSync: "v0", Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, @@ -335,11 +333,6 @@ func (n Node) Validate(testnet Testnet) error { } } } - switch n.BlockSync { - case "", "v0", "v2": - default: - return fmt.Errorf("invalid block sync setting %q", n.BlockSync) - } switch n.StateSync { case StateSyncDisabled, StateSyncP2P, StateSyncRPC: default: diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 9bf76c874..6199d804f 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -294,11 +294,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mempool.Version = node.Mempool } - cfg.BlockSync.Enable = true - if node.BlockSync == "" { - cfg.BlockSync.Enable = false - } - switch node.StateSync { case e2e.StateSyncP2P: cfg.StateSync.Enable = true From b15b2c1b78ef7483837798623ba7e3a039a97b4c Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 26 Oct 2021 18:11:34 +0200 Subject: [PATCH 119/174] flowrate: cleanup unused files (#7158) I saw one of these tests fail and it looks like it was using code that wasn't being called anywhere, so I deleted it, and avoided the package name aliasing. --- internal/blocksync/pool.go | 6 +- internal/libs/flowrate/README.md | 10 -- internal/libs/flowrate/io.go | 133 -------------------- internal/libs/flowrate/io_test.go | 197 ------------------------------ internal/p2p/conn/connection.go | 10 +- 5 files changed, 8 insertions(+), 348 deletions(-) delete mode 100644 internal/libs/flowrate/README.md delete mode 100644 internal/libs/flowrate/io.go delete mode 100644 internal/libs/flowrate/io_test.go diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index d10a32b0d..8117fe2d4 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -7,7 +7,7 @@ import ( "sync/atomic" "time" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" + "github.com/tendermint/tendermint/internal/libs/flowrate" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -471,7 +471,7 @@ type bpPeer struct { base int64 pool *BlockPool id types.NodeID - recvMonitor *flow.Monitor + recvMonitor *flowrate.Monitor timeout *time.Timer @@ -495,7 +495,7 @@ func (peer *bpPeer) setLogger(l log.Logger) { } func (peer *bpPeer) resetMonitor() { - peer.recvMonitor = flow.New(time.Second, time.Second*40) + peer.recvMonitor = flowrate.New(time.Second, time.Second*40) initialValue := float64(minRecvRate) * math.E peer.recvMonitor.SetREMA(initialValue) } diff --git a/internal/libs/flowrate/README.md b/internal/libs/flowrate/README.md deleted file mode 100644 index caed79aa3..000000000 --- a/internal/libs/flowrate/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Data Flow Rate Control -====================== - -To download and install this package run: - -go get github.com/mxk/go-flowrate/flowrate - -The documentation is available at: - - diff --git a/internal/libs/flowrate/io.go b/internal/libs/flowrate/io.go deleted file mode 100644 index fbe090972..000000000 --- a/internal/libs/flowrate/io.go +++ /dev/null @@ -1,133 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "errors" - "io" -) - -// ErrLimit is returned by the Writer when a non-blocking write is short due to -// the transfer rate limit. -var ErrLimit = errors.New("flowrate: flow rate limit exceeded") - -// Limiter is implemented by the Reader and Writer to provide a consistent -// interface for monitoring and controlling data transfer. -type Limiter interface { - Done() int64 - Status() Status - SetTransferSize(bytes int64) - SetLimit(new int64) (old int64) - SetBlocking(new bool) (old bool) -} - -// Reader implements io.ReadCloser with a restriction on the rate of data -// transfer. -type Reader struct { - io.Reader // Data source - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be read due to the limit -} - -// NewReader restricts all Read operations on r to limit bytes per second. -func NewReader(r io.Reader, limit int64) *Reader { - return &Reader{r, New(0, 0), limit, true} -} - -// Read reads up to len(p) bytes into p without exceeding the current transfer -// rate limit. It returns (0, nil) immediately if r is non-blocking and no new -// bytes can be read at this time. -func (r *Reader) Read(p []byte) (n int, err error) { - p = p[:r.Limit(len(p), r.limit, r.block)] - if len(p) > 0 { - n, err = r.IO(r.Reader.Read(p)) - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (r *Reader) SetLimit(new int64) (old int64) { - old, r.limit = r.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Read call on a non-blocking reader returns immediately if no additional bytes -// may be read at this time due to the rate limit. -func (r *Reader) SetBlocking(new bool) (old bool) { - old, r.block = r.block, new - return -} - -// Close closes the underlying reader if it implements the io.Closer interface. -func (r *Reader) Close() error { - defer r.Done() - if c, ok := r.Reader.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Writer implements io.WriteCloser with a restriction on the rate of data -// transfer. -type Writer struct { - io.Writer // Data destination - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be written due to the limit -} - -// NewWriter restricts all Write operations on w to limit bytes per second. The -// transfer rate and the default blocking behavior (true) can be changed -// directly on the returned *Writer. -func NewWriter(w io.Writer, limit int64) *Writer { - return &Writer{w, New(0, 0), limit, true} -} - -// Write writes len(p) bytes from p to the underlying data stream without -// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is -// non-blocking and no additional bytes can be written at this time. -func (w *Writer) Write(p []byte) (n int, err error) { - var c int - for len(p) > 0 && err == nil { - s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { - return n, ErrLimit - } - p = p[c:] - n += c - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (w *Writer) SetLimit(new int64) (old int64) { - old, w.limit = w.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Write call on a non-blocking writer returns as soon as no additional bytes -// may be written at this time due to the rate limit. -func (w *Writer) SetBlocking(new bool) (old bool) { - old, w.block = w.block, new - return -} - -// Close closes the underlying writer if it implements the io.Closer interface. -func (w *Writer) Close() error { - defer w.Done() - if c, ok := w.Writer.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/internal/libs/flowrate/io_test.go b/internal/libs/flowrate/io_test.go deleted file mode 100644 index 4d7de417e..000000000 --- a/internal/libs/flowrate/io_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "bytes" - "testing" - "time" -) - -const ( - _50ms = 50 * time.Millisecond - _100ms = 100 * time.Millisecond - _200ms = 200 * time.Millisecond - _300ms = 300 * time.Millisecond - _400ms = 400 * time.Millisecond - _500ms = 500 * time.Millisecond -) - -func nextStatus(m *Monitor) Status { - samples := m.samples - for i := 0; i < 30; i++ { - if s := m.Status(); s.Samples != samples { - return s - } - time.Sleep(5 * time.Millisecond) - } - return m.Status() -} - -func TestReader(t *testing.T) { - in := make([]byte, 100) - for i := range in { - in[i] = byte(i) - } - b := make([]byte, 100) - r := NewReader(bytes.NewReader(in), 100) - start := time.Now() - - // Make sure r implements Limiter - _ = Limiter(r) - - // 1st read of 10 bytes is performed immediately - if n, err := r.Read(b); n != 10 || err != nil { - t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - // No new Reads allowed in the current sample - r.SetBlocking(false) - if n, err := r.Read(b); n != 0 || err != nil { - t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - status := [6]Status{0: r.Status()} // No samples in the first status - - // 2nd read of 10 bytes blocks until the next sample - r.SetBlocking(true) - if n, err := r.Read(b[10:]); n != 10 || err != nil { - t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _100ms { - t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) - } - - status[1] = r.Status() // 1st sample - status[2] = nextStatus(r.Monitor) // 2nd sample - status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample - - if n := r.Done(); n != 20 { - t.Fatalf("r.Done() expected 20; got %v", n) - } - - status[4] = r.Status() - status[5] = nextStatus(r.Monitor) // Timeout - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true}, - {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true}, - {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true}, - {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - } - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) - } - } - if !bytes.Equal(b[:20], in[:20]) { - t.Errorf("r.Read() input doesn't match output") - } -} - -func TestWriter(t *testing.T) { - b := make([]byte, 100) - for i := range b { - b[i] = byte(i) - } - w := NewWriter(&bytes.Buffer{}, 200) - start := time.Now() - - // Make sure w implements Limiter - _ = Limiter(w) - - // Non-blocking 20-byte write for the first sample returns ErrLimit - w.SetBlocking(false) - if n, err := w.Write(b); n != 20 || err != ErrLimit { - t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("w.Write(b) took too long (%v)", rt) - } - - // Blocking 80-byte write - w.SetBlocking(true) - if n, err := w.Write(b[20:]); n != 80 || err != nil { - t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _300ms { - // Explanation for `rt < _300ms` (as opposed to `< _400ms`) - // - // |<-- start | | - // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms - // sends: 20|20 |20 |20 |20# - // - // NOTE: The '#' symbol can thus happen before 400ms is up. - // Thus, we can only panic if rt < _300ms. - t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) - } - - w.SetTransferSize(100) - status := []Status{w.Status(), nextStatus(w.Monitor)} - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 80, 4, 200, 200, 200, 200, 20, _400ms, 0, _100ms, 80000, true}, - {start, 100, 5, 200, 200, 200, 200, 0, _500ms, _100ms, 0, 100000, true}, - } - - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) - } - } - if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { - t.Errorf("w.Write() input doesn't match output") - } -} - -const maxDeviationForDuration = 50 * time.Millisecond -const maxDeviationForRate int64 = 50 - -// statusesAreEqual returns true if s1 is equal to s2. Equality here means -// general equality of fields except for the duration and rates, which can -// drift due to unpredictable delays (e.g. thread wakes up 25ms after -// `time.Sleep` has ended). -func statusesAreEqual(s1 *Status, s2 *Status) bool { - if s1.Active == s2.Active && - s1.Start == s2.Start && - durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && - s1.Idle == s2.Idle && - s1.Bytes == s2.Bytes && - s1.Samples == s2.Samples && - ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && - ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && - ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && - ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && - s1.BytesRem == s2.BytesRem && - durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && - s1.Progress == s2.Progress { - return true - } - return false -} - -func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { - return d2-d1 <= maxDeviation -} - -func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { - sub := r1 - r2 - if sub < 0 { - sub = -sub - } - if sub <= maxDeviation { - return true - } - return false -} diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index a99e83dc5..4782d7717 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -14,7 +14,7 @@ import ( "github.com/gogo/protobuf/proto" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" + "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/libs/timer" @@ -77,8 +77,8 @@ type MConnection struct { conn net.Conn bufConnReader *bufio.Reader bufConnWriter *bufio.Writer - sendMonitor *flow.Monitor - recvMonitor *flow.Monitor + sendMonitor *flowrate.Monitor + recvMonitor *flowrate.Monitor send chan struct{} pong chan struct{} channels []*channel @@ -175,8 +175,8 @@ func NewMConnectionWithConfig( conn: conn, bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), + sendMonitor: flowrate.New(0, 0), + recvMonitor: flowrate.New(0, 0), send: make(chan struct{}, 1), pong: make(chan struct{}, 1), onReceive: onReceive, From 4bd8c5ab6f24e0b4aa611755b9dedbe23543cbb4 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 26 Oct 2021 18:34:44 +0200 Subject: [PATCH 120/174] p2p: transport should be captive resposibility of router (#7160) The main (and minor) win of this PR is that the transport is fully the responsibility of the router and the node doesn't need to be responsible for its lifecylce. --- internal/p2p/mocks/transport.go | 14 ++++++++++++ internal/p2p/p2ptest/network.go | 1 + internal/p2p/router.go | 12 +++++++++++ internal/p2p/router_init_test.go | 8 +++---- internal/p2p/router_test.go | 10 +++++++++ internal/p2p/transport.go | 3 +++ internal/p2p/transport_memory.go | 2 ++ node/node.go | 37 ++++++-------------------------- node/setup.go | 21 ++++++++++++------ 9 files changed, 67 insertions(+), 41 deletions(-) diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 2fc7baa29..eea1de4c5 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -98,6 +98,20 @@ func (_m *Transport) Endpoints() []p2p.Endpoint { return r0 } +// Listen provides a mock function with given fields: _a0 +func (_m *Transport) Listen(_a0 p2p.Endpoint) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(p2p.Endpoint) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Protocols provides a mock function with given fields: func (_m *Transport) Protocols() []p2p.Protocol { ret := _m.Called() diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index c808ad3e0..0d92b2619 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -249,6 +249,7 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { privKey, peerManager, []p2p.Transport{transport}, + transport.Endpoints(), p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, ) require.NoError(t, err) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 4724e8d9b..efd29c0d4 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -249,6 +249,7 @@ type Router struct { peerManager *PeerManager chDescs []*ChannelDescriptor transports []Transport + endpoints []Endpoint connTracker connectionTracker protocolTransports map[Protocol]Transport stopCh chan struct{} // signals Router shutdown @@ -277,6 +278,7 @@ func NewRouter( privKey crypto.PrivKey, peerManager *PeerManager, transports []Transport, + endpoints []Endpoint, options RouterOptions, ) (*Router, error) { @@ -295,6 +297,7 @@ func NewRouter( ), chDescs: make([]*ChannelDescriptor, 0), transports: transports, + endpoints: endpoints, protocolTransports: map[Protocol]Transport{}, peerManager: peerManager, options: options, @@ -1021,11 +1024,20 @@ func (r *Router) NodeInfo() types.NodeInfo { // OnStart implements service.Service. func (r *Router) OnStart() error { + for _, transport := range r.transports { + for _, endpoint := range r.endpoints { + if err := transport.Listen(endpoint); err != nil { + return err + } + } + } + r.Logger.Info( "starting router", "node_id", r.nodeInfo.NodeID, "channels", r.nodeInfo.Channels, "listen_addr", r.nodeInfo.ListenAddr, + "transports", len(r.transports), ) go r.dialPeers() diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index b90d2a3dd..c8bef696a 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -18,21 +18,21 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { t.Run("Default", func(t *testing.T) { require.Zero(t, os.Getenv("TM_P2P_QUEUE")) opts := RouterOptions{} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Fifo", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypeFifo} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Priority", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypePriority} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) q, ok := r.queueFactory(1).(*pqScheduler) require.True(t, ok) @@ -40,7 +40,7 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} - _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.Error(t, err) require.Contains(t, err.Error(), "fast") }) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 997f02a06..77c6f768e 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -109,6 +109,7 @@ func TestRouter_Channel_Basic(t *testing.T) { selfKey, peerManager, nil, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -393,6 +394,7 @@ func TestRouter_AcceptPeers(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -445,6 +447,7 @@ func TestRouter_AcceptPeers_Error(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -479,6 +482,7 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -527,6 +531,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -626,6 +631,7 @@ func TestRouter_DialPeers(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -709,6 +715,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{ DialSleep: func(_ context.Context) {}, NumConcurrentDials: func() int { @@ -781,6 +788,7 @@ func TestRouter_EvictPeers(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -842,6 +850,7 @@ func TestRouter_ChannelCompatability(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) @@ -896,6 +905,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index e78906362..08de0d3b0 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -23,6 +23,9 @@ type Protocol string // Transport is a connection-oriented mechanism for exchanging data with a peer. type Transport interface { + // Listen starts the transport on the specified endpoint. + Listen(Endpoint) error + // Protocols returns the protocols supported by the transport. The Router // uses this to pick a transport for an Endpoint. Protocols() []Protocol diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index b4161ecd6..5d9291675 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -117,6 +117,8 @@ func (t *MemoryTransport) String() string { return string(MemoryProtocol) } +func (*MemoryTransport) Listen(Endpoint) error { return nil } + func (t *MemoryTransport) AddChannelDescriptors([]*ChannelDescriptor) {} // Protocols implements Transport. diff --git a/node/node.go b/node/node.go index d329e2494..bfccff6ef 100644 --- a/node/node.go +++ b/node/node.go @@ -52,7 +52,6 @@ type nodeImpl struct { privValidator types.PrivValidator // local node's validator key // network - transport *p2p.MConnTransport peerManager *p2p.PeerManager router *p2p.Router nodeInfo types.NodeInfo @@ -257,9 +256,6 @@ func makeNode(cfg *config.Config, } - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, cfg) - peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) closers = append(closers, peerCloser) if err != nil { @@ -268,8 +264,8 @@ func makeNode(cfg *config.Config, makeCloser(closers)) } - router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(cfg, proxyApp)) + router, err := createRouter(logger, nodeMetrics.p2p, nodeInfo, nodeKey, + peerManager, cfg, proxyApp) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create router: %w", err), @@ -381,12 +377,9 @@ func makeNode(cfg *config.Config, // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress - var pexReactor service.Service - - pexReactor, err = createPEXReactor(logger, peerManager, router) + pexReactor, err := createPEXReactor(logger, peerManager, router) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } if cfg.RPC.PprofListenAddress != "" { @@ -401,7 +394,6 @@ func makeNode(cfg *config.Config, genesisDoc: genDoc, privValidator: privValidator, - transport: transport, peerManager: peerManager, router: router, nodeInfo: nodeInfo, @@ -479,8 +471,6 @@ func makeSeedNode(cfg *config.Config, // Setup Transport and Switch. p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, cfg) peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { @@ -489,8 +479,8 @@ func makeSeedNode(cfg *config.Config, closer) } - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(cfg, nil)) + router, err := createRouter(logger, p2pMetrics, nodeInfo, nodeKey, + peerManager, cfg, nil) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create router: %w", err), @@ -516,7 +506,6 @@ func makeSeedNode(cfg *config.Config, config: cfg, genesisDoc: genDoc, - transport: transport, nodeInfo: nodeInfo, nodeKey: nodeKey, peerManager: peerManager, @@ -556,20 +545,11 @@ func (n *nodeImpl) OnStart() error { } // Start the transport. - ep, err := p2p.NewEndpoint(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) - if err != nil { + if err := n.router.Start(); err != nil { return err } - if err := n.transport.Listen(ep); err != nil { - return err - } - n.isListening = true - if err = n.router.Start(); err != nil { - return err - } - if n.config.Mode != config.ModeSeed { if err := n.bcReactor.Start(); err != nil { return err @@ -732,11 +712,6 @@ func (n *nodeImpl) OnStop() { if err := n.router.Stop(); err != nil { n.Logger.Error("failed to stop router", "err", err) } - - if err := n.transport.Close(); err != nil { - n.Logger.Error("Error closing transport", "err", err) - } - n.isListening = false // finally stop the listeners / external services diff --git a/node/setup.go b/node/setup.go index 2fddceac1..f7f6230d1 100644 --- a/node/setup.go +++ b/node/setup.go @@ -486,23 +486,32 @@ func createPeerManager( } func createRouter( - p2pLogger log.Logger, + logger log.Logger, p2pMetrics *p2p.Metrics, nodeInfo types.NodeInfo, - privKey crypto.PrivKey, + nodeKey types.NodeKey, peerManager *p2p.PeerManager, - transport p2p.Transport, - options p2p.RouterOptions, + conf *config.Config, + proxyApp proxy.AppConns, ) (*p2p.Router, error) { + p2pLogger := logger.With("module", "p2p") + transport := createTransport(p2pLogger, conf) + + ep, err := p2p.NewEndpoint(nodeKey.ID.AddressString(conf.P2P.ListenAddress)) + if err != nil { + return nil, err + } + return p2p.NewRouter( p2pLogger, p2pMetrics, nodeInfo, - privKey, + nodeKey.PrivKey, peerManager, []p2p.Transport{transport}, - options, + []p2p.Endpoint{ep}, + getRouterConfig(conf, proxyApp), ) } From ce89292712288be9b7baa57e7d84ca4bd79025a1 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 27 Oct 2021 10:36:32 +0200 Subject: [PATCH 121/174] docs: fix broken links and layout (#7154) This PR does a few minor touch ups to the docs --- .gitignore | 1 + docs/package-lock.json | 12551 +-------------------- docs/roadmap/README.md | 6 + docs/roadmap/roadmap.md | 5 +- docs/tendermint-core/README.md | 10 +- docs/tendermint-core/consensus/README.md | 1 + docs/tools/remote-signer-validation.md | 2 +- 7 files changed, 25 insertions(+), 12551 deletions(-) create mode 100644 docs/roadmap/README.md diff --git a/.gitignore b/.gitignore index 38a280c64..b753f0375 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ docs/_build docs/dist docs/node_modules/ docs/spec +docs/.vuepress/public/rpc index.html.md libs/pubsub/query/fuzz_test/output profile\.out diff --git a/docs/package-lock.json b/docs/package-lock.json index 1c362b60a..37938ad99 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -1,12541 +1,8 @@ { "name": "docs", "version": "1.0.0", - "lockfileVersion": 2, + "lockfileVersion": 1, "requires": true, - "packages": { - "": { - "version": "1.0.0", - "license": "ISC", - "dependencies": { - "vuepress-theme-cosmos": "^1.0.182" - }, - "devDependencies": { - "watchpack": "^2.2.0" - } - }, - "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.8.6.tgz", - "integrity": "sha512-Bam7otzjIEgrRXWmk0Amm1+B3ROI5dQnUfJEBjIy0YPM0kMahEoJXCw6160tGKxJLl1g6icoC953nGshQKO7cA==", - "dependencies": { - "@algolia/cache-common": "4.8.6" - } - }, - "node_modules/@algolia/cache-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.8.6.tgz", - "integrity": "sha512-eGQlsXU5G7n4RvV/K6qe6lRAeL6EKAYPT3yZDBjCW4pAh7JWta+77a7BwUQkTqXN1MEQWZXjex3E4z/vFpzNrg==" - }, - "node_modules/@algolia/cache-in-memory": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.8.6.tgz", - "integrity": "sha512-kbJrvCFANxL/l5Pq1NFyHLRphKDwmqcD/OJga0IbNKEulRGDPkt1+pC7/q8d2ikP12adBjLLg2CVias9RJpIaw==", - "dependencies": { - "@algolia/cache-common": "4.8.6" - } - }, - "node_modules/@algolia/client-account": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.8.6.tgz", - "integrity": "sha512-FQVJE/BgCb78jtG7V0r30sMl9P5JKsrsOacGtGF2YebqI0YF25y8Z1nO39lbdjahxUS3QkDw2d0P2EVMj65g2Q==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-analytics": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.8.6.tgz", - "integrity": "sha512-ZBYFUlzNaWDFtt0rYHI7xbfVX0lPWU9lcEEXI/BlnkRgEkm247H503tNatPQFA1YGkob52EU18sV1eJ+OFRBLA==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.8.6.tgz", - "integrity": "sha512-8dI+K3Nvbes2YRZm2LY7bdCUD05e60BhacrMLxFuKxnBGuNehME1wbxq/QxcG1iNFJlxLIze5TxIcNN3+pn76g==", - "dependencies": { - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-recommendation": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.8.6.tgz", - "integrity": "sha512-Kg8DpjwvaWWujNx6sAUrSL+NTHxFe/UNaliCcSKaMhd3+FiPXN+CrSkO0KWR7I+oK2qGBTG/2Y0BhFOJ5/B/RA==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-search": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.8.6.tgz", - "integrity": "sha512-vXLS6umL/9G3bwqc6pkrS9K5/s8coq55mpfRARL+bs0NsToOf77WSTdwzlxv/KdbVF7dHjXgUpBvJ6RyR4ZdAw==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/logger-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.8.6.tgz", - "integrity": "sha512-FMRxZGdDxSzd0/Mv0R1021FvUt0CcbsQLYeyckvSWX8w+Uk4o0lcV6UtZdERVR5XZsGOqoXLMIYDbR2vkbGbVw==" - }, - "node_modules/@algolia/logger-console": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.8.6.tgz", - "integrity": "sha512-TYw9lwUCjvApC6Z0zn36T6gkCl7hbfJmnU+Z/D8pFJ3Yp7lz06S3oWGjbdrULrYP1w1VOhjd0X7/yGNsMhzutQ==", - "dependencies": { - "@algolia/logger-common": "4.8.6" - } - }, - "node_modules/@algolia/requester-browser-xhr": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.8.6.tgz", - "integrity": "sha512-omh6uJ3CJXOmcrU9M3/KfGg8XkUuGJGIMkqEbkFvIebpBJxfs6TVs0ziNeMFAcAfhi8/CGgpLbDSgJtWdGQa6w==", - "dependencies": { - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@algolia/requester-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.8.6.tgz", - "integrity": "sha512-r5xJqq/D9KACkI5DgRbrysVL5DUUagikpciH0k0zjBbm+cXiYfpmdflo/h6JnY6kmvWgjr/4DoeTjKYb/0deAQ==" - }, - "node_modules/@algolia/requester-node-http": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.8.6.tgz", - "integrity": "sha512-TB36OqTVOKyHCOtdxhn/IJyI/NXi/BWy8IEbsiWwwZWlL79NWHbetj49jXWFolEYEuu8PgDjjZGpRhypSuO9XQ==", - "dependencies": { - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@algolia/transporter": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.8.6.tgz", - "integrity": "sha512-NRb31J0TP7EPoVMpXZ4yAtr61d26R8KGaf6qdULknvq5sOVHuuH4PwmF08386ERfIsgnM/OBhl+uzwACdCIjSg==", - "dependencies": { - "@algolia/cache-common": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", - "dependencies": { - "@babel/highlight": "^7.12.13" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.13.15.tgz", - "integrity": "sha512-ltnibHKR1VnrU4ymHyQ/CXtNXI6yZC0oJThyW78Hft8XndANwi+9H+UIklBDraIjFEJzw8wmcM427oDd9KS5wA==" - }, - "node_modules/@babel/core": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.13.15.tgz", - "integrity": "sha512-6GXmNYeNjS2Uz+uls5jalOemgIhnTMeaXo+yBUA72kC2uX/8VW6XyhVIo2L8/q0goKQA3EVKx0KOQpVKSeWadQ==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-module-transforms": "^7.13.14", - "@babel/helpers": "^7.13.10", - "@babel/parser": "^7.13.15", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.15", - "@babel/types": "^7.13.14", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.1.2", - "semver": "^6.3.0", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/core/node_modules/json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@babel/core/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/core/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/generator": { - "version": "7.13.9", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.13.9.tgz", - "integrity": "sha512-mHOOmY0Axl/JCTkxTU6Lf5sWOg/v8nUa+Xkt4zMTftX0wqmb6Sh7J8gvcehBw7q0AhrhAR+FDacKjCZ2X8K+Sw==", - "dependencies": { - "@babel/types": "^7.13.0", - "jsesc": "^2.5.1", - "source-map": "^0.5.0" - } - }, - "node_modules/@babel/generator/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz", - "integrity": "sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.12.13.tgz", - "integrity": "sha512-CZOv9tGphhDRlVjVkAgm8Nhklm9RzSmWpX2my+t7Ua/KT616pEzXsQCjinzvkRvHWJ9itO4f296efroX23XCMA==", - "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.13.13", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.13.tgz", - "integrity": "sha512-q1kcdHNZehBwD9jYPh3WyXcsFERi39X4I59I3NadciWtNDyZ6x+GboOxncFK0kXlKIv6BJm5acncehXWUjWQMQ==", - "dependencies": { - "@babel/compat-data": "^7.13.12", - "@babel/helper-validator-option": "^7.12.17", - "browserslist": "^4.14.5", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.13.11", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz", - "integrity": "sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-member-expression-to-functions": "^7.13.0", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.17.tgz", - "integrity": "sha512-p2VGmBu9oefLZ2nQpgnEnG0ZlRPvL8gAGvPUMQwUdaE8k49rOMuZpOwdQoy5qJf6K8jL3bcAMhVUlHAjIgJHUg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "regexpu-core": "^4.7.1" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz", - "integrity": "sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.13.0", - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/traverse": "^7.13.0", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz", - "integrity": "sha512-qS0peLTDP8kOisG1blKbaoBg/o9OSa1qoumMjTK5pM+KDTtpxpsiubnCGP34vK8BXGcb2M9eigwgvoJryrzwWA==", - "dependencies": { - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz", - "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==", - "dependencies": { - "@babel/helper-get-function-arity": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-get-function-arity": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz", - "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.0.tgz", - "integrity": "sha512-0kBzvXiIKfsCA0y6cFEIJf4OdzfpRuNk4+YTeHZpGGc666SATFKTz6sRncwFnQk7/ugJ4dSrCj6iJuvW4Qwr2g==", - "dependencies": { - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz", - "integrity": "sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz", - "integrity": "sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.13.14.tgz", - "integrity": "sha512-QuU/OJ0iAOSIatyVZmfqB0lbkVP0kDRiKj34xy+QNsnVZi/PA6BoSoreeqnxxa9EHFAIL0R9XOaAR/G9WlIy5g==", - "dependencies": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-replace-supers": "^7.13.12", - "@babel/helper-simple-access": "^7.13.12", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/helper-validator-identifier": "^7.12.11", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.13", - "@babel/types": "^7.13.14" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz", - "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz", - "integrity": "sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ==" - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz", - "integrity": "sha512-pUQpFBE9JvC9lrQbpX0TmeNIy5s7GnZjna2lhhcHC7DzgBs6fWn722Y5cfwgrtrqc7NAJwMvOa0mKhq6XaE4jg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-wrap-function": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.13.12.tgz", - "integrity": "sha512-Gz1eiX+4yDO8mT+heB94aLVNCL+rbuT2xy4YfyNqu8F+OI6vMvJK891qGBTqL9Uc8wxEvRW92Id6G7sDen3fFw==", - "dependencies": { - "@babel/helper-member-expression-to-functions": "^7.13.12", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz", - "integrity": "sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz", - "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==", - "dependencies": { - "@babel/types": "^7.12.1" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz", - "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==" - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz", - "integrity": "sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw==" - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz", - "integrity": "sha512-1UX9F7K3BS42fI6qd2A4BjKzgGjToscyZTdp1DjknHLCIvpgne6918io+aL5LXFcER/8QWiwpoY902pVEqgTXA==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.13.10.tgz", - "integrity": "sha512-4VO883+MWPDUVRF3PhiLBUFHoX/bsLTGFpFK/HqvvfBZz2D57u9XzPVNFVBTc0PW/CWR9BXTOKt8NF4DInUHcQ==", - "dependencies": { - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz", - "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.13.15.tgz", - "integrity": "sha512-b9COtcAlVEQljy/9fbcMHpG+UIW9ReF+gpaxDHTlZd0c6/UU9ng8zdySAW9sRTzpvcdCHn6bUcbuYUgGzLAWVQ==", - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz", - "integrity": "sha512-d0u3zWKcoZf379fOeJdr1a5WPDny4aOFZ6hlfKivgK0LY7ZxNfoaHL2fWwdGtHyVvra38FC+HVYkO+byfSA8AQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-proposal-optional-chaining": "^7.13.12" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz", - "integrity": "sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0", - "@babel/plugin-syntax-async-generators": "^7.8.4" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz", - "integrity": "sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-decorators": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.13.15.tgz", - "integrity": "sha512-ibAMAqUm97yzi+LPgdr5Nqb9CMkeieGHvwPg1ywSGjZrZHQEGqE01HmOio8kxRpA/+VtOHouIVy2FMpBbtltjA==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.11", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-decorators": "^7.12.13" - } - }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.13.8.tgz", - "integrity": "sha512-ONWKj0H6+wIRCkZi9zSbZtE/r73uOhMVHh256ys0UzfM7I3d4n+spZNWjOnJv2gzopumP2Wxi186vI8N0Y2JyQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.13.tgz", - "integrity": "sha512-INAgtFo4OnLN3Y/j0VwAgw3HDXcDtX+C/erMvWzuV9v71r7urb6iyMXu7eM9IgLr1ElLlOkaHjJ0SbCmdOQ3Iw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.13.8.tgz", - "integrity": "sha512-w4zOPKUFPX1mgvTmL/fcEqy34hrQ1CRcGxdphBc6snDnnqJ47EZDIyop6IwXzAC8G916hsIuXB2ZMBCExC5k7Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-json-strings": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.13.8.tgz", - "integrity": "sha512-aul6znYB4N4HGweImqKn59Su9RS8lbUIqxtXTOcAGtNIDczoEFv+l1EhmX8rUBp3G1jMjKJm8m0jXVp63ZpS4A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - } - }, - "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.13.8.tgz", - "integrity": "sha512-iePlDPBn//UhxExyS9KyeYU7RM9WScAG+D3Hhno0PLJebAEpDZMocbDe64eqynhNAnwz/vZoL/q/QB2T1OH39A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.13.tgz", - "integrity": "sha512-O1jFia9R8BUCl3ZGB7eitaAPu62TXJRHn7rh+ojNERCFyqRwJMTmhz+tJ+k0CwI6CLjX/ee4qW74FSqlq9I35w==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz", - "integrity": "sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g==", - "dependencies": { - "@babel/compat-data": "^7.13.8", - "@babel/helper-compilation-targets": "^7.13.8", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.13.8.tgz", - "integrity": "sha512-0wS/4DUF1CuTmGo+NiaHfHcVSeSLj5S3e6RivPTg/2k3wOv3jO35tZ6/ZWsQhQMvdgI7CwphjQa/ccarLymHVA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.13.12.tgz", - "integrity": "sha512-fcEdKOkIB7Tf4IxrgEVeFC4zeJSTr78no9wTdBuZZbqF64kzllU0ybo2zrzm7gUQfxGhBgq4E39oRs8Zx/RMYQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.13.0.tgz", - "integrity": "sha512-MXyyKQd9inhx1kDYPkFRVOBXQ20ES8Pto3T7UZ92xj2mY0EVD8oAVzeyYuVfy/mxAdTSIayOvg+aVzcHV2bn6Q==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz", - "integrity": "sha512-XyJmZidNfofEkqFV5VC/bLabGmO5QzenPO/YOfGuEbgU+2sSwMmio3YLb4WtBgcmmdwZHyVyv8on77IUjQ5Gvg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-decorators": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz", - "integrity": "sha512-Rw6aIXGuqDLr6/LoBBYE57nKOzQpz/aDkKlMqEwH+Vp0MXbG6H/TfRjaY343LKxzAKAMXIHsQ8JzaZKuDZ9MwA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz", - "integrity": "sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz", - "integrity": "sha512-A81F9pDwyS7yM//KwbCSDqy3Uj4NMIurtplxphWxoYtNPov7cJsDkAFNNyVlIZ3jwGycVsurZ+LtOA8gZ376iQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz", - "integrity": "sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.13.0.tgz", - "integrity": "sha512-3j6E004Dx0K3eGmhxVJxwwI89CTJrce7lg3UrtFuDAVQ/2+SJ/h/aSFOeE6/n0WB1GsOffsJp6MnPQNQ8nmwhg==", - "dependencies": { - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz", - "integrity": "sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.13.tgz", - "integrity": "sha512-Pxwe0iqWJX4fOOM2kEZeUuAxHMWb9nK+9oh5d11bsLoB0xMg+mkDpt0eYuDZB7ETrY9bbcVlKUGTOGWy7BHsMQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz", - "integrity": "sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13", - "globals": "^11.1.0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz", - "integrity": "sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.0.tgz", - "integrity": "sha512-zym5em7tePoNT9s964c0/KU3JPPnuq7VhIxPRefJ4/s82cD+q1mgKfuGRDMCPL0HTyKz4dISuQlCusfgCJ86HA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.13.tgz", - "integrity": "sha512-foDrozE65ZFdUC2OfgeOCrEPTxdB3yjqxpXh8CH+ipd9CHd4s/iq81kcUpyH8ACGNEPdFqbtzfgzbT/ZGlbDeQ==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.13.tgz", - "integrity": "sha512-NfADJiiHdhLBW3pulJlJI2NB0t4cci4WTZ8FtdIuNc2+8pslXdPtRRAEWqUY+m9kNOk2eRYbTAOipAxlrOcwwQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.13.tgz", - "integrity": "sha512-fbUelkM1apvqez/yYx1/oICVnGo2KM5s63mhGylrmXUxK/IAXSIf87QIxVfZldWf4QsOafY6vV3bX8aMHSvNrA==", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz", - "integrity": "sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz", - "integrity": "sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz", - "integrity": "sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz", - "integrity": "sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz", - "integrity": "sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz", - "integrity": "sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-simple-access": "^7.12.13", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.13.8.tgz", - "integrity": "sha512-hwqctPYjhM6cWvVIlOIe27jCIBgHCsdH2xCJVAYQm7V5yTMoilbVMi9f6wKg0rpQAOn6ZG4AOyvCqFF/hUh6+A==", - "dependencies": { - "@babel/helper-hoist-variables": "^7.13.0", - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-identifier": "^7.12.11", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz", - "integrity": "sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.13.tgz", - "integrity": "sha512-Xsm8P2hr5hAxyYblrfACXpQKdQbx4m2df9/ZZSQ8MAhsadw06+jW7s9zsSw6he+mJZXRlVMyEnVktJo4zjk1WA==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.13.tgz", - "integrity": "sha512-/KY2hbLxrG5GTQ9zzZSc3xWiOy379pIETEhbtzwZcw9rvuaVV4Fqy7BYGYOWZnaoXIQYbbJ0ziXLa/sKcGCYEQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz", - "integrity": "sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/helper-replace-supers": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz", - "integrity": "sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz", - "integrity": "sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz", - "integrity": "sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ==", - "dependencies": { - "regenerator-transform": "^0.14.2" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.13.tgz", - "integrity": "sha512-xhUPzDXxZN1QfiOy/I5tyye+TRz6lA7z6xaT4CLOjPRMVg1ldRf0LHw0TDBpYL4vG78556WuHdyO9oi5UmzZBg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.15.tgz", - "integrity": "sha512-d+ezl76gx6Jal08XngJUkXM4lFXK/5Ikl9Mh4HKDxSfGJXmZ9xG64XT2oivBzfxb/eQ62VfvoMkaCZUKJMVrBA==", - "dependencies": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz", - "integrity": "sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz", - "integrity": "sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.13.tgz", - "integrity": "sha512-Jc3JSaaWT8+fr7GRvQP02fKDsYk4K/lYwWq38r/UGfaxo89ajud321NH28KRQ7xy1Ybc0VUE5Pz8psjNNDUglg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz", - "integrity": "sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.13.tgz", - "integrity": "sha512-eKv/LmUJpMnu4npgfvs3LiHhJua5fo/CysENxa45YCQXZwKnGCQKAg87bvoqSW1fFT+HA32l03Qxsm8ouTY3ZQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.13.tgz", - "integrity": "sha512-0bHEkdwJ/sN/ikBHfSmOXPypN/beiGqjo+o4/5K+vxEFNPRPdImhviPakMKG4x96l85emoa0Z6cDflsdBusZbw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.13.tgz", - "integrity": "sha512-mDRzSNY7/zopwisPZ5kM9XKCfhchqIYwAKRERtEnhYscZB79VRekuRSoYbN0+KVe3y8+q1h6A4svXtP7N+UoCA==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.13.15.tgz", - "integrity": "sha512-D4JAPMXcxk69PKe81jRJ21/fP/uYdcTZ3hJDF5QX2HSI9bBxxYw/dumdR6dGumhjxlprHPE4XWoPaqzZUVy2MA==", - "dependencies": { - "@babel/compat-data": "^7.13.15", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-option": "^7.12.17", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-async-generator-functions": "^7.13.15", - "@babel/plugin-proposal-class-properties": "^7.13.0", - "@babel/plugin-proposal-dynamic-import": "^7.13.8", - "@babel/plugin-proposal-export-namespace-from": "^7.12.13", - "@babel/plugin-proposal-json-strings": "^7.13.8", - "@babel/plugin-proposal-logical-assignment-operators": "^7.13.8", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8", - "@babel/plugin-proposal-numeric-separator": "^7.12.13", - "@babel/plugin-proposal-object-rest-spread": "^7.13.8", - "@babel/plugin-proposal-optional-catch-binding": "^7.13.8", - "@babel/plugin-proposal-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-private-methods": "^7.13.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.12.13", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.12.13", - "@babel/plugin-transform-arrow-functions": "^7.13.0", - "@babel/plugin-transform-async-to-generator": "^7.13.0", - "@babel/plugin-transform-block-scoped-functions": "^7.12.13", - "@babel/plugin-transform-block-scoping": "^7.12.13", - "@babel/plugin-transform-classes": "^7.13.0", - "@babel/plugin-transform-computed-properties": "^7.13.0", - "@babel/plugin-transform-destructuring": "^7.13.0", - "@babel/plugin-transform-dotall-regex": "^7.12.13", - "@babel/plugin-transform-duplicate-keys": "^7.12.13", - "@babel/plugin-transform-exponentiation-operator": "^7.12.13", - "@babel/plugin-transform-for-of": "^7.13.0", - "@babel/plugin-transform-function-name": "^7.12.13", - "@babel/plugin-transform-literals": "^7.12.13", - "@babel/plugin-transform-member-expression-literals": "^7.12.13", - "@babel/plugin-transform-modules-amd": "^7.13.0", - "@babel/plugin-transform-modules-commonjs": "^7.13.8", - "@babel/plugin-transform-modules-systemjs": "^7.13.8", - "@babel/plugin-transform-modules-umd": "^7.13.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.13", - "@babel/plugin-transform-new-target": "^7.12.13", - "@babel/plugin-transform-object-super": "^7.12.13", - "@babel/plugin-transform-parameters": "^7.13.0", - "@babel/plugin-transform-property-literals": "^7.12.13", - "@babel/plugin-transform-regenerator": "^7.13.15", - "@babel/plugin-transform-reserved-words": "^7.12.13", - "@babel/plugin-transform-shorthand-properties": "^7.12.13", - "@babel/plugin-transform-spread": "^7.13.0", - "@babel/plugin-transform-sticky-regex": "^7.12.13", - "@babel/plugin-transform-template-literals": "^7.13.0", - "@babel/plugin-transform-typeof-symbol": "^7.12.13", - "@babel/plugin-transform-unicode-escapes": "^7.12.13", - "@babel/plugin-transform-unicode-regex": "^7.12.13", - "@babel/preset-modules": "^0.1.4", - "@babel/types": "^7.13.14", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "core-js-compat": "^3.9.0", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz", - "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - } - }, - "node_modules/@babel/runtime": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.10.tgz", - "integrity": "sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw==", - "dependencies": { - "regenerator-runtime": "^0.13.4" - } - }, - "node_modules/@babel/template": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz", - "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/parser": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/traverse": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.13.15.tgz", - "integrity": "sha512-/mpZMNvj6bce59Qzl09fHEs8Bt8NnpEDQYleHUPZQ3wXUMvXi+HJPLars68oAbmp839fGoOkv2pSL2z9ajCIaQ==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/parser": "^7.13.15", - "@babel/types": "^7.13.14", - "debug": "^4.1.0", - "globals": "^11.1.0" - } - }, - "node_modules/@babel/traverse/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/traverse/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/types": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.13.14.tgz", - "integrity": "sha512-A2aa3QTkWoyqsZZFl56MLUsfmh7O0gN41IPvXAE/++8ojpbz12SszD7JEGYVdn4f9Kt4amIei07swF1h4AqmmQ==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", - "lodash": "^4.17.19", - "to-fast-properties": "^2.0.0" - } - }, - "node_modules/@cosmos-ui/vue": { - "version": "0.35.0", - "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.35.0.tgz", - "integrity": "sha512-WTCJBWSoiDckgvXWPByKkQ7ZVSf9LSMsizIAHBnsi0Zp3GOaEqPNBpgjGt2JEhpDPr7+YwyIgmqQ0S3D+Hq5iQ==", - "dependencies": { - "algoliasearch": "^4.1.0", - "axios": "^0.19.2", - "clipboard-copy": "^3.1.0", - "fuse.js": "^3.4.6", - "hotkeys-js": "^3.7.3", - "js-base64": "^2.5.2", - "lodash": "^4.17.15", - "markdown-it": "^10.0.0", - "prismjs": "^1.19.0", - "querystring": "^0.2.0", - "tiny-cookie": "^2.3.1", - "vue": "^2.6.10" - } - }, - "node_modules/@cosmos-ui/vue/node_modules/axios": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz", - "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", - "dependencies": { - "follow-redirects": "1.5.10" - } - }, - "node_modules/@cosmos-ui/vue/node_modules/entities": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", - "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" - }, - "node_modules/@cosmos-ui/vue/node_modules/markdown-it": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz", - "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==", - "dependencies": { - "argparse": "^1.0.7", - "entities": "~2.0.0", - "linkify-it": "^2.0.0", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/@mrmlnc/readdir-enhanced": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", - "dependencies": { - "call-me-maybe": "^1.0.1", - "glob-to-regexp": "^0.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz", - "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@types/glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==", - "dependencies": { - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "node_modules/@types/json-schema": { - "version": "7.0.7", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.7.tgz", - "integrity": "sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA==" - }, - "node_modules/@types/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA==" - }, - "node_modules/@types/node": { - "version": "14.14.37", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.37.tgz", - "integrity": "sha512-XYmBiy+ohOR4Lh5jE379fV2IU+6Jn4g5qASinhitfyO71b/sCo6MKsMLF5tc7Zf2CE8hViVQyYSobJNke8OvUw==" - }, - "node_modules/@types/q": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", - "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" - }, - "node_modules/@vue/babel-helper-vue-jsx-merge-props": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz", - "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA==" - }, - "node_modules/@vue/babel-helper-vue-transform-on": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.0.2.tgz", - "integrity": "sha512-hz4R8tS5jMn8lDq6iD+yWL6XNB699pGIVLk7WSJnn1dbpjaazsjZQkieJoRX6gW5zpYSCFqQ7jUquPNY65tQYA==" - }, - "node_modules/@vue/babel-plugin-jsx": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.4.tgz", - "integrity": "sha512-Vu5gsabUdsiWc4vQarg46xWJGs8pMEJyyMQAKA1vO+F4+aR4/jaxWxPCOvZ7XvVyy+ecSbwQp/qIyDVje360UQ==", - "dependencies": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.0.0", - "@babel/template": "^7.0.0", - "@babel/traverse": "^7.0.0", - "@babel/types": "^7.0.0", - "@vue/babel-helper-vue-transform-on": "^1.0.2", - "camelcase": "^6.0.0", - "html-tags": "^3.1.0", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-plugin-transform-vue-jsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz", - "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==", - "dependencies": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "html-tags": "^2.0.0", - "lodash.kebabcase": "^4.1.1", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-plugin-transform-vue-jsx/node_modules/html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=", - "engines": { - "node": ">=4" - } - }, - "node_modules/@vue/babel-preset-app": { - "version": "4.5.12", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.12.tgz", - "integrity": "sha512-8q67ORQ9O0Ms0nlqsXTVhaBefRBaLrzPxOewAZhdcO7onHwcO5/wRdWtHhZgfpCZlhY7NogkU16z3WnorSSkEA==", - "dependencies": { - "@babel/core": "^7.11.0", - "@babel/helper-compilation-targets": "^7.9.6", - "@babel/helper-module-imports": "^7.8.3", - "@babel/plugin-proposal-class-properties": "^7.8.3", - "@babel/plugin-proposal-decorators": "^7.8.3", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-jsx": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.11.0", - "@babel/preset-env": "^7.11.0", - "@babel/runtime": "^7.11.0", - "@vue/babel-plugin-jsx": "^1.0.3", - "@vue/babel-preset-jsx": "^1.2.4", - "babel-plugin-dynamic-import-node": "^2.3.3", - "core-js": "^3.6.5", - "core-js-compat": "^3.6.5", - "semver": "^6.1.0" - } - }, - "node_modules/@vue/babel-preset-jsx": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz", - "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==", - "dependencies": { - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "@vue/babel-sugar-composition-api-inject-h": "^1.2.1", - "@vue/babel-sugar-composition-api-render-instance": "^1.2.4", - "@vue/babel-sugar-functional-vue": "^1.2.2", - "@vue/babel-sugar-inject-h": "^1.2.2", - "@vue/babel-sugar-v-model": "^1.2.3", - "@vue/babel-sugar-v-on": "^1.2.3" - } - }, - "node_modules/@vue/babel-sugar-composition-api-inject-h": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz", - "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-composition-api-render-instance": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz", - "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-functional-vue": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz", - "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-inject-h": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz", - "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-v-model": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz", - "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "camelcase": "^5.0.0", - "html-tags": "^2.0.0", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-sugar-v-model/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@vue/babel-sugar-v-model/node_modules/html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=", - "engines": { - "node": ">=4" - } - }, - "node_modules/@vue/babel-sugar-v-on": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz", - "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "camelcase": "^5.0.0" - } - }, - "node_modules/@vue/babel-sugar-v-on/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@vue/component-compiler-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz", - "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==", - "dependencies": { - "consolidate": "^0.15.1", - "hash-sum": "^1.0.2", - "lru-cache": "^4.1.2", - "merge-source-map": "^1.1.0", - "postcss": "^7.0.14", - "postcss-selector-parser": "^6.0.2", - "prettier": "^1.18.2", - "source-map": "~0.6.1", - "vue-template-es2015-compiler": "^1.9.0" - }, - "optionalDependencies": { - "prettier": "^1.18.2" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dependencies": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=" - }, - "node_modules/@vuepress/core": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.8.2.tgz", - "integrity": "sha512-lh9BLC06k9s0wxTuWtCkiNj49fkbW87enp0XSrFZHEoyDGSGndQjZmMMErcHc5Hx7nrW1nzc33sPH1NNtJl0hw==", - "dependencies": { - "@babel/core": "^7.8.4", - "@vue/babel-preset-app": "^4.1.2", - "@vuepress/markdown": "1.8.2", - "@vuepress/markdown-loader": "1.8.2", - "@vuepress/plugin-last-updated": "1.8.2", - "@vuepress/plugin-register-components": "1.8.2", - "@vuepress/shared-utils": "1.8.2", - "autoprefixer": "^9.5.1", - "babel-loader": "^8.0.4", - "cache-loader": "^3.0.0", - "chokidar": "^2.0.3", - "connect-history-api-fallback": "^1.5.0", - "copy-webpack-plugin": "^5.0.2", - "core-js": "^3.6.4", - "cross-spawn": "^6.0.5", - "css-loader": "^2.1.1", - "file-loader": "^3.0.1", - "js-yaml": "^3.13.1", - "lru-cache": "^5.1.1", - "mini-css-extract-plugin": "0.6.0", - "optimize-css-assets-webpack-plugin": "^5.0.1", - "portfinder": "^1.0.13", - "postcss-loader": "^3.0.0", - "postcss-safe-parser": "^4.0.1", - "toml": "^3.0.0", - "url-loader": "^1.0.1", - "vue": "^2.6.10", - "vue-loader": "^15.7.1", - "vue-router": "^3.4.5", - "vue-server-renderer": "^2.6.10", - "vue-template-compiler": "^2.6.10", - "vuepress-html-webpack-plugin": "^3.2.0", - "vuepress-plugin-container": "^2.0.2", - "webpack": "^4.8.1", - "webpack-chain": "^6.0.0", - "webpack-dev-server": "^3.5.1", - "webpack-merge": "^4.1.2", - "webpackbar": "3.2.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/@vuepress/markdown": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.8.2.tgz", - "integrity": "sha512-zznBHVqW+iBkznF/BO/GY9RFu53khyl0Ey0PnGqvwCJpRLNan6y5EXgYumtjw2GSYn5nDTTALYxtyNBdz64PKg==", - "dependencies": { - "@vuepress/shared-utils": "1.8.2", - "markdown-it": "^8.4.1", - "markdown-it-anchor": "^5.0.2", - "markdown-it-chain": "^1.3.0", - "markdown-it-emoji": "^1.4.0", - "markdown-it-table-of-contents": "^0.4.0", - "prismjs": "^1.13.0" - } - }, - "node_modules/@vuepress/markdown-loader": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.8.2.tgz", - "integrity": "sha512-mWzFXikCUcAN/chpKkqZpRYKdo0312hMv8cBea2hvrJYV6y4ODB066XKvXN8JwOcxuCjxWYJkhWGr+pXq1oTtw==", - "dependencies": { - "@vuepress/markdown": "1.8.2", - "loader-utils": "^1.1.0", - "lru-cache": "^5.1.1" - } - }, - "node_modules/@vuepress/markdown/node_modules/entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "node_modules/@vuepress/markdown/node_modules/markdown-it": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz", - "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==", - "dependencies": { - "argparse": "^1.0.7", - "entities": "~1.1.1", - "linkify-it": "^2.0.0", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/@vuepress/plugin-active-header-links": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.8.2.tgz", - "integrity": "sha512-JmXAQg8D7J8mcKe2Ue3BZ9dOCzJMJXP4Cnkkc/IrqfDg0ET0l96gYWZohCqlvRIWt4f0VPiFAO4FLYrW+hko+g==", - "dependencies": { - "lodash.debounce": "^4.0.8" - } - }, - "node_modules/@vuepress/plugin-google-analytics": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz", - "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ==" - }, - "node_modules/@vuepress/plugin-last-updated": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.8.2.tgz", - "integrity": "sha512-pYIRZi52huO9b6HY3JQNPKNERCLzMHejjBRt9ekdnJ1xhLs4MmRvt37BoXjI/qzvXkYtr7nmGgnKThNBVRTZuA==", - "dependencies": { - "cross-spawn": "^6.0.5" - } - }, - "node_modules/@vuepress/plugin-nprogress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.8.2.tgz", - "integrity": "sha512-3TOBee2NM3WLr1tdjDTGfrAMggjN+OlEPyKyv8FqThsVkDYhw48O3HwqlThp9KX7UbL3ExxIFBwWRFLC+kYrdw==", - "dependencies": { - "nprogress": "^0.2.0" - } - }, - "node_modules/@vuepress/plugin-register-components": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.8.2.tgz", - "integrity": "sha512-6SUq3nHFMEh9qKFnjA8QnrNxj0kLs7+Gspq1OBU8vtu0NQmSvLFZVaMV7pzT/9zN2nO5Pld5qhsUJv1g71MrEA==", - "dependencies": { - "@vuepress/shared-utils": "1.8.2" - } - }, - "node_modules/@vuepress/plugin-search": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.8.2.tgz", - "integrity": "sha512-JrSJr9o0Kar14lVtZ4wfw39pplxvvMh8vDBD9oW09a+6Zi/4bySPGdcdaqdqGW+OHSiZNvG+6uyfKSBBBqF6PA==" - }, - "node_modules/@vuepress/shared-utils": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.8.2.tgz", - "integrity": "sha512-6kGubc7iBDWruEBUU7yR+sQ++SOhMuvKWvWeTZJKRZedthycdzYz7QVpua0FaZSAJm5/dIt8ymU4WQvxTtZgTQ==", - "dependencies": { - "chalk": "^2.3.2", - "escape-html": "^1.0.3", - "fs-extra": "^7.0.1", - "globby": "^9.2.0", - "gray-matter": "^4.0.1", - "hash-sum": "^1.0.2", - "semver": "^6.0.0", - "toml": "^3.0.0", - "upath": "^1.1.0" - } - }, - "node_modules/@vuepress/theme-default": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.8.2.tgz", - "integrity": "sha512-rE7M1rs3n2xp4a/GrweO8EGwqFn3EA5gnFWdVmVIHyr7C1nix+EqjpPQF1SVWNnIrDdQuCw38PqS+oND1K2vYw==", - "dependencies": { - "@vuepress/plugin-active-header-links": "1.8.2", - "@vuepress/plugin-nprogress": "1.8.2", - "@vuepress/plugin-search": "1.8.2", - "docsearch.js": "^2.5.2", - "lodash": "^4.17.15", - "stylus": "^0.54.8", - "stylus-loader": "^3.0.2", - "vuepress-plugin-container": "^2.0.2", - "vuepress-plugin-smooth-scroll": "^0.0.3" - } - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz", - "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", - "dependencies": { - "@webassemblyjs/helper-module-context": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/wast-parser": "1.9.0" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz", - "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA==" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz", - "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz", - "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA==" - }, - "node_modules/@webassemblyjs/helper-code-frame": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz", - "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==", - "dependencies": { - "@webassemblyjs/wast-printer": "1.9.0" - } - }, - "node_modules/@webassemblyjs/helper-fsm": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz", - "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw==" - }, - "node_modules/@webassemblyjs/helper-module-context": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz", - "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", - "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz", - "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz", - "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz", - "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz", - "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w==" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz", - "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/helper-wasm-section": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0", - "@webassemblyjs/wasm-opt": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0", - "@webassemblyjs/wast-printer": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz", - "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/ieee754": "1.9.0", - "@webassemblyjs/leb128": "1.9.0", - "@webassemblyjs/utf8": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz", - "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz", - "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-api-error": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/ieee754": "1.9.0", - "@webassemblyjs/leb128": "1.9.0", - "@webassemblyjs/utf8": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wast-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz", - "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/floating-point-hex-parser": "1.9.0", - "@webassemblyjs/helper-api-error": "1.9.0", - "@webassemblyjs/helper-code-frame": "1.9.0", - "@webassemblyjs/helper-fsm": "1.9.0", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz", - "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/wast-parser": "1.9.0", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" - }, - "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" - }, - "node_modules/accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "dependencies": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", - "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/agentkeepalive": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz", - "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=", - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "node_modules/ajv-errors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz", - "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==" - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" - }, - "node_modules/algoliasearch": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.8.6.tgz", - "integrity": "sha512-G8IA3lcgaQB4r9HuQ4G+uSFjjz0Wv2OgEPiQ8emA+G2UUlroOfMl064j1bq/G+QTW0LmTQp9JwrFDRWxFM9J7w==", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.8.6", - "@algolia/cache-common": "4.8.6", - "@algolia/cache-in-memory": "4.8.6", - "@algolia/client-account": "4.8.6", - "@algolia/client-analytics": "4.8.6", - "@algolia/client-common": "4.8.6", - "@algolia/client-recommendation": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/logger-console": "4.8.6", - "@algolia/requester-browser-xhr": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/requester-node-http": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/alphanum-sort": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", - "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=" - }, - "node_modules/ansi-align": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz", - "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==", - "dependencies": { - "string-width": "^3.0.0" - } - }, - "node_modules/ansi-colors": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.4.tgz", - "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-html": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ansi-html/-/ansi-html-0.0.7.tgz", - "integrity": "sha1-gTWEAhliqenm/QOflA0S9WynhZ4=", - "engines": [ - "node >= 0.8.0" - ], - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/anymatch": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", - "dependencies": { - "micromatch": "^3.1.4", - "normalize-path": "^2.1.1" - } - }, - "node_modules/anymatch/node_modules/normalize-path": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", - "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", - "dependencies": { - "remove-trailing-separator": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/aproba": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==" - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/arr-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arr-flatten": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arr-union": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" - }, - "node_modules/array-union": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", - "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=", - "dependencies": { - "array-uniq": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-uniq": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", - "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-unique": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/asap": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" - }, - "node_modules/asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/asn1.js": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", - "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", - "dependencies": { - "bn.js": "^4.0.0", - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/asn1.js/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/assert": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", - "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", - "dependencies": { - "object-assign": "^4.1.1", - "util": "0.10.3" - } - }, - "node_modules/assert-never": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.2.1.tgz", - "integrity": "sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw==" - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/assert/node_modules/inherits": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", - "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=" - }, - "node_modules/assert/node_modules/util": { - "version": "0.10.3", - "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", - "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", - "dependencies": { - "inherits": "2.0.1" - } - }, - "node_modules/assign-symbols": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "dependencies": { - "lodash": "^4.17.14" - } - }, - "node_modules/async-each": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", - "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==" - }, - "node_modules/async-limiter": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "node_modules/atob": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", - "bin": { - "atob": "bin/atob.js" - }, - "engines": { - "node": ">= 4.5.0" - } - }, - "node_modules/autocomplete.js": { - "version": "0.36.0", - "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.36.0.tgz", - "integrity": "sha512-jEwUXnVMeCHHutUt10i/8ZiRaCb0Wo+ZyKxeGsYwBDtw6EJHqEeDrq4UwZRD8YBSvp3g6klP678il2eeiVXN2Q==", - "dependencies": { - "immediate": "^3.2.3" - } - }, - "node_modules/autoprefixer": { - "version": "9.8.6", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz", - "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==", - "dependencies": { - "browserslist": "^4.12.0", - "caniuse-lite": "^1.0.30001109", - "colorette": "^1.2.1", - "normalize-range": "^0.1.2", - "num2fraction": "^1.2.2", - "postcss": "^7.0.32", - "postcss-value-parser": "^4.1.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - } - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" - }, - "node_modules/axios": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", - "dependencies": { - "follow-redirects": "^1.10.0" - } - }, - "node_modules/axios/node_modules/follow-redirects": { - "version": "1.13.3", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.3.tgz", - "integrity": "sha512-DUgl6+HDzB0iEptNQEXLx/KhTmDb8tZUHSeLqpnjpknR70H0nC2t9N73BK6fN4hOvJ84pKlIQVQ4k5FFlBedKA==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/babel-loader": { - "version": "8.2.2", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz", - "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==", - "dependencies": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^1.4.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "engines": { - "node": ">= 8.9" - } - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz", - "integrity": "sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg==", - "dependencies": { - "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.2.0", - "semver": "^6.1.1" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz", - "integrity": "sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.2.0", - "core-js-compat": "^3.9.1" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz", - "integrity": "sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.2.0" - } - }, - "node_modules/babel-walk": { - "version": "3.0.0-canary-5", - "resolved": "https://registry.npmjs.org/babel-walk/-/babel-walk-3.0.0-canary-5.tgz", - "integrity": "sha512-GAwkz0AihzY5bkwIY5QDR+LvsRQgB/B+1foMPvi0FZPMl5fjD7ICiznUiBdLYMH1QYe6vqu4gWYytZOccLouFw==", - "dependencies": { - "@babel/types": "^7.9.6" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/base": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", - "dependencies": { - "cache-base": "^1.0.1", - "class-utils": "^0.3.5", - "component-emitter": "^1.2.1", - "define-property": "^1.0.0", - "isobject": "^3.0.1", - "mixin-deep": "^1.2.0", - "pascalcase": "^0.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "node_modules/batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=" - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", - "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "optional": true, - "dependencies": { - "file-uri-to-path": "1.0.0" - } - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" - }, - "node_modules/bn.js": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.0.tgz", - "integrity": "sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw==" - }, - "node_modules/body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", - "dependencies": { - "bytes": "3.1.0", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/bonjour": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/bonjour/-/bonjour-3.5.0.tgz", - "integrity": "sha1-jokKGD2O6aI5OzhExpGkK897yfU=", - "dependencies": { - "array-flatten": "^2.1.0", - "deep-equal": "^1.0.1", - "dns-equal": "^1.0.0", - "dns-txt": "^2.0.2", - "multicast-dns": "^6.0.1", - "multicast-dns-service-types": "^1.1.0" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=" - }, - "node_modules/boxen": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz", - "integrity": "sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ==", - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^5.3.1", - "chalk": "^3.0.0", - "cli-boxes": "^2.2.0", - "string-width": "^4.1.0", - "term-size": "^2.1.0", - "type-fest": "^0.8.1", - "widest-line": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/boxen/node_modules/chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/boxen/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/boxen/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/boxen/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dependencies": { - "arr-flatten": "^1.1.0", - "array-unique": "^0.3.2", - "extend-shallow": "^2.0.1", - "fill-range": "^4.0.0", - "isobject": "^3.0.1", - "repeat-element": "^1.1.2", - "snapdragon": "^0.8.1", - "snapdragon-node": "^2.0.1", - "split-string": "^3.0.2", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" - }, - "node_modules/browserify-aes": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "dependencies": { - "buffer-xor": "^1.0.3", - "cipher-base": "^1.0.0", - "create-hash": "^1.1.0", - "evp_bytestokey": "^1.0.3", - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/browserify-cipher": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", - "dependencies": { - "browserify-aes": "^1.0.4", - "browserify-des": "^1.0.0", - "evp_bytestokey": "^1.0.0" - } - }, - "node_modules/browserify-des": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", - "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", - "dependencies": { - "cipher-base": "^1.0.1", - "des.js": "^1.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/browserify-rsa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", - "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", - "dependencies": { - "bn.js": "^5.0.0", - "randombytes": "^2.0.1" - } - }, - "node_modules/browserify-sign": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", - "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", - "dependencies": { - "bn.js": "^5.1.1", - "browserify-rsa": "^4.0.1", - "create-hash": "^1.2.0", - "create-hmac": "^1.1.7", - "elliptic": "^6.5.3", - "inherits": "^2.0.4", - "parse-asn1": "^5.1.5", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - } - }, - "node_modules/browserify-sign/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/browserify-sign/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/browserify-zlib": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "dependencies": { - "pako": "~1.0.5" - } - }, - "node_modules/browserslist": { - "version": "4.16.6", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.6.tgz", - "integrity": "sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ==", - "dependencies": { - "caniuse-lite": "^1.0.30001219", - "colorette": "^1.2.2", - "electron-to-chromium": "^1.3.723", - "escalade": "^3.1.1", - "node-releases": "^1.1.71" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/browserslist/node_modules/caniuse-lite": { - "version": "1.0.30001228", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz", - "integrity": "sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A==" - }, - "node_modules/browserslist/node_modules/electron-to-chromium": { - "version": "1.3.738", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.738.tgz", - "integrity": "sha512-vCMf4gDOpEylPSLPLSwAEsz+R3ShP02Y3cAKMZvTqule3XcPp7tgc/0ESI7IS6ZeyBlGClE50N53fIOkcIVnpw==" - }, - "node_modules/buffer": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", - "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", - "dependencies": { - "base64-js": "^1.0.2", - "ieee754": "^1.1.4", - "isarray": "^1.0.0" - } - }, - "node_modules/buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" - }, - "node_modules/buffer-indexof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-indexof/-/buffer-indexof-1.1.1.tgz", - "integrity": "sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g==" - }, - "node_modules/buffer-json": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/buffer-json/-/buffer-json-2.0.0.tgz", - "integrity": "sha512-+jjPFVqyfF1esi9fvfUs3NqM0pH1ziZ36VP4hmA/y/Ssfo/5w5xHKfTw9BwQjoJ1w/oVtpLomqwUHKdefGyuHw==" - }, - "node_modules/buffer-xor": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" - }, - "node_modules/builtin-status-codes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", - "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=" - }, - "node_modules/bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cac": { - "version": "6.7.2", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.2.tgz", - "integrity": "sha512-w0bH1IF9rEjdi0a6lTtlXYT+vBZEJL9oytaXXRdsD68MH6+SrZGOGsu7s2saHQvYXqwo/wBdkW75tt8wFpj+mw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacache": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz", - "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==", - "dependencies": { - "bluebird": "^3.5.5", - "chownr": "^1.1.1", - "figgy-pudding": "^3.5.1", - "glob": "^7.1.4", - "graceful-fs": "^4.1.15", - "infer-owner": "^1.0.3", - "lru-cache": "^5.1.1", - "mississippi": "^3.0.0", - "mkdirp": "^0.5.1", - "move-concurrently": "^1.0.1", - "promise-inflight": "^1.0.1", - "rimraf": "^2.6.3", - "ssri": "^6.0.1", - "unique-filename": "^1.1.1", - "y18n": "^4.0.0" - } - }, - "node_modules/cacache/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/cache-base": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", - "dependencies": { - "collection-visit": "^1.0.0", - "component-emitter": "^1.2.1", - "get-value": "^2.0.6", - "has-value": "^1.0.0", - "isobject": "^3.0.1", - "set-value": "^2.0.0", - "to-object-path": "^0.3.0", - "union-value": "^1.0.0", - "unset-value": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cache-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/cache-loader/-/cache-loader-3.0.1.tgz", - "integrity": "sha512-HzJIvGiGqYsFUrMjAJNDbVZoG7qQA+vy9AIoKs7s9DscNfki0I589mf2w6/tW+kkFH3zyiknoWV5Jdynu6b/zw==", - "dependencies": { - "buffer-json": "^2.0.0", - "find-cache-dir": "^2.1.0", - "loader-utils": "^1.2.3", - "mkdirp": "^0.5.1", - "neo-async": "^2.6.1", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/cache-loader/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/cache-loader/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/cache-loader/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/cache-loader/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/normalize-url": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz", - "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "node_modules/call-me-maybe": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", - "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=" - }, - "node_modules/caller-callsite": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz", - "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=", - "dependencies": { - "callsites": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/caller-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz", - "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=", - "dependencies": { - "caller-callsite": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/callsites": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz", - "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=", - "engines": { - "node": ">=4" - } - }, - "node_modules/camel-case": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-3.0.0.tgz", - "integrity": "sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M=", - "dependencies": { - "no-case": "^2.2.0", - "upper-case": "^1.1.1" - } - }, - "node_modules/camelcase": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", - "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", - "engines": { - "node": ">=10" - } - }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001208", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001208.tgz", - "integrity": "sha512-OE5UE4+nBOro8Dyvv0lfx+SRtfVIOM9uhKqFmJeUbGriqhhStgp1A0OyBpgy3OUF8AhYCT+PVwPC1gMl2ZcQMA==" - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/character-parser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz", - "integrity": "sha1-x84o821LzZdE5f/CxfzeHHMmH8A=", - "dependencies": { - "is-regex": "^1.0.3" - } - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.6", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.6.tgz", - "integrity": "sha512-hjx1XE1M/D5pAtMgvWwE21QClmAEeGHOIDfycgmndisdNgI6PE1cGRQkMGBcsbUbmEQyWu5PJLUcAOjtQS8DWw==", - "dependencies": { - "cheerio-select": "^1.3.0", - "dom-serializer": "^1.3.1", - "domhandler": "^4.1.0", - "htmlparser2": "^6.1.0", - "parse5": "^6.0.1", - "parse5-htmlparser2-tree-adapter": "^6.0.1" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/cheerio-select": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.3.0.tgz", - "integrity": "sha512-mLgqdHxVOQyhOIkG5QnRkDg7h817Dkf0dAvlCio2TJMmR72cJKH0bF28SHXvLkVrGcGOiub0/Bs/CMnPeQO7qw==", - "dependencies": { - "css-select": "^4.0.0", - "css-what": "^5.0.0", - "domelementtype": "^2.2.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.2" - } - }, - "node_modules/chokidar": { - "version": "2.1.8", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", - "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", - "dependencies": { - "anymatch": "^2.0.0", - "async-each": "^1.0.1", - "braces": "^2.3.2", - "fsevents": "^1.2.7", - "glob-parent": "^3.1.0", - "inherits": "^2.0.3", - "is-binary-path": "^1.0.0", - "is-glob": "^4.0.0", - "normalize-path": "^3.0.0", - "path-is-absolute": "^1.0.0", - "readdirp": "^2.2.1", - "upath": "^1.1.1" - }, - "optionalDependencies": { - "fsevents": "^1.2.7" - } - }, - "node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" - }, - "node_modules/chrome-trace-event": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", - "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ci-info": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.1.1.tgz", - "integrity": "sha512-kdRWLBIJwdsYJWYJFtAFFYxybguqeF91qpZaggjG5Nf8QKdizFG2hjqvaTXbxFIcYbSaD74KpAXv6BSm17DHEQ==" - }, - "node_modules/cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/class-utils": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", - "dependencies": { - "arr-union": "^3.1.0", - "define-property": "^0.2.5", - "isobject": "^3.0.0", - "static-extend": "^0.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/class-utils/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/clean-css": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz", - "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/clean-css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/clipboard": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz", - "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==", - "optional": true, - "dependencies": { - "good-listener": "^1.2.2", - "select": "^1.1.2", - "tiny-emitter": "^2.0.0" - } - }, - "node_modules/clipboard-copy": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", - "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" - }, - "node_modules/cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dependencies": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "dependencies": { - "mimic-response": "^1.0.0" - } - }, - "node_modules/coa": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz", - "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==", - "dependencies": { - "@types/q": "^1.5.1", - "chalk": "^2.4.1", - "q": "^1.1.2" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/collection-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", - "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", - "dependencies": { - "map-visit": "^1.0.0", - "object-visit": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/color": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", - "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", - "dependencies": { - "color-convert": "^1.9.1", - "color-string": "^1.5.4" - } - }, - "node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", - "dependencies": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "node_modules/colorette": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz", - "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w==" - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "2.17.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz", - "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==" - }, - "node_modules/commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" - }, - "node_modules/component-emitter": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" - }, - "node_modules/compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "node_modules/concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "engines": [ - "node >= 0.8" - ], - "dependencies": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } - }, - "node_modules/configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "dependencies": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz", - "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/consola": { - "version": "2.15.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", - "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" - }, - "node_modules/console-browserify": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", - "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" - }, - "node_modules/consolidate": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/consolidate/-/consolidate-0.15.1.tgz", - "integrity": "sha512-DW46nrsMJgy9kqAbPt5rKaCr7uFtpo4mSUvLHIUbJEjm0vo+aY5QLwBUq3FK4tRnJr/X0Psc0C4jf/h+HtXSMw==", - "dependencies": { - "bluebird": "^3.1.1" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/constantinople": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-4.0.1.tgz", - "integrity": "sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==", - "dependencies": { - "@babel/parser": "^7.6.0", - "@babel/types": "^7.6.1" - } - }, - "node_modules/constants-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", - "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=" - }, - "node_modules/content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", - "dependencies": { - "safe-buffer": "5.1.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", - "dependencies": { - "safe-buffer": "~5.1.1" - } - }, - "node_modules/cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "node_modules/copy-concurrently": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz", - "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==", - "dependencies": { - "aproba": "^1.1.1", - "fs-write-stream-atomic": "^1.0.8", - "iferr": "^0.1.5", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.0" - } - }, - "node_modules/copy-concurrently/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/copy-descriptor": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.2.tgz", - "integrity": "sha512-Uh7crJAco3AjBvgAy9Z75CjK8IG+gxaErro71THQ+vv/bl4HaQcpkexAY8KVW/T6D2W2IRr+couF/knIRkZMIQ==", - "dependencies": { - "cacache": "^12.0.3", - "find-cache-dir": "^2.1.0", - "glob-parent": "^3.1.0", - "globby": "^7.1.1", - "is-glob": "^4.0.1", - "loader-utils": "^1.2.3", - "minimatch": "^3.0.4", - "normalize-path": "^3.0.0", - "p-limit": "^2.2.1", - "schema-utils": "^1.0.0", - "serialize-javascript": "^4.0.0", - "webpack-log": "^2.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-7.1.1.tgz", - "integrity": "sha1-+yzP+UAfhgCUXfral0QMypcrhoA=", - "dependencies": { - "array-union": "^1.0.1", - "dir-glob": "^2.0.0", - "glob": "^7.1.2", - "ignore": "^3.3.5", - "pify": "^3.0.0", - "slash": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ignore": { - "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" - }, - "node_modules/copy-webpack-plugin/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/copy-webpack-plugin/node_modules/slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/core-js": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.10.1.tgz", - "integrity": "sha512-pwCxEXnj27XG47mu7SXAwhLP3L5CrlvCB91ANUkIz40P27kUcvNfSdvyZJ9CLHiVoKSp+TTChMQMSKQEH/IQxA==", - "hasInstallScript": true - }, - "node_modules/core-js-compat": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.10.1.tgz", - "integrity": "sha512-ZHQTdTPkqvw2CeHiZC970NNJcnwzT6YIueDMASKt+p3WbZsLXOcoD392SkcWhkC0wBBHhlfhqGKKsNCQUozYtg==", - "dependencies": { - "browserslist": "^4.16.3", - "semver": "7.0.0" - } - }, - "node_modules/core-js-compat/node_modules/semver": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", - "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "node_modules/cosmiconfig": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz", - "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==", - "dependencies": { - "import-fresh": "^2.0.0", - "is-directory": "^0.3.1", - "js-yaml": "^3.13.1", - "parse-json": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/create-ecdh": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", - "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", - "dependencies": { - "bn.js": "^4.1.0", - "elliptic": "^6.5.3" - } - }, - "node_modules/create-ecdh/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "dependencies": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "node_modules/create-hmac": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "dependencies": { - "cipher-base": "^1.0.3", - "create-hash": "^1.1.0", - "inherits": "^2.0.1", - "ripemd160": "^2.0.0", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "dependencies": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - }, - "engines": { - "node": ">=4.8" - } - }, - "node_modules/cross-spawn/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/crypto-browserify": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", - "dependencies": { - "browserify-cipher": "^1.0.0", - "browserify-sign": "^4.0.0", - "create-ecdh": "^4.0.0", - "create-hash": "^1.1.0", - "create-hmac": "^1.1.0", - "diffie-hellman": "^5.0.0", - "inherits": "^2.0.1", - "pbkdf2": "^3.0.3", - "public-encrypt": "^4.0.0", - "randombytes": "^2.0.0", - "randomfill": "^1.0.3" - }, - "engines": { - "node": "*" - } - }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/css": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz", - "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==", - "dependencies": { - "inherits": "^2.0.3", - "source-map": "^0.6.1", - "source-map-resolve": "^0.5.2", - "urix": "^0.1.0" - } - }, - "node_modules/css-color-names": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", - "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=", - "engines": { - "node": "*" - } - }, - "node_modules/css-declaration-sorter": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz", - "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==", - "dependencies": { - "postcss": "^7.0.1", - "timsort": "^0.3.0" - }, - "engines": { - "node": ">4" - } - }, - "node_modules/css-loader": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-2.1.1.tgz", - "integrity": "sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w==", - "dependencies": { - "camelcase": "^5.2.0", - "icss-utils": "^4.1.0", - "loader-utils": "^1.2.3", - "normalize-path": "^3.0.0", - "postcss": "^7.0.14", - "postcss-modules-extract-imports": "^2.0.0", - "postcss-modules-local-by-default": "^2.0.6", - "postcss-modules-scope": "^2.1.0", - "postcss-modules-values": "^2.0.0", - "postcss-value-parser": "^3.3.0", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/css-loader/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/css-loader/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/css-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/css-parse": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/css-parse/-/css-parse-2.0.0.tgz", - "integrity": "sha1-pGjuZnwW2BzPBcWMONKpfHgNv9Q=", - "dependencies": { - "css": "^2.0.0" - } - }, - "node_modules/css-select": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.0.0.tgz", - "integrity": "sha512-I7favumBlDP/nuHBKLfL5RqvlvRdn/W29evvWJ+TaoGPm7QD+xSIN5eY2dyGjtkUmemh02TZrqJb4B8DWo6PoQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^5.0.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.1", - "nth-check": "^2.0.0" - } - }, - "node_modules/css-select-base-adapter": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", - "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==" - }, - "node_modules/css-tree": { - "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", - "dependencies": { - "mdn-data": "2.0.4", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/css-tree/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/css-what": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.0.0.tgz", - "integrity": "sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano": { - "version": "4.1.11", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz", - "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==", - "dependencies": { - "cosmiconfig": "^5.0.0", - "cssnano-preset-default": "^4.0.8", - "is-resolvable": "^1.0.0", - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-preset-default": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz", - "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==", - "dependencies": { - "css-declaration-sorter": "^4.0.1", - "cssnano-util-raw-cache": "^4.0.1", - "postcss": "^7.0.0", - "postcss-calc": "^7.0.1", - "postcss-colormin": "^4.0.3", - "postcss-convert-values": "^4.0.1", - "postcss-discard-comments": "^4.0.2", - "postcss-discard-duplicates": "^4.0.2", - "postcss-discard-empty": "^4.0.1", - "postcss-discard-overridden": "^4.0.1", - "postcss-merge-longhand": "^4.0.11", - "postcss-merge-rules": "^4.0.3", - "postcss-minify-font-values": "^4.0.2", - "postcss-minify-gradients": "^4.0.2", - "postcss-minify-params": "^4.0.2", - "postcss-minify-selectors": "^4.0.2", - "postcss-normalize-charset": "^4.0.1", - "postcss-normalize-display-values": "^4.0.2", - "postcss-normalize-positions": "^4.0.2", - "postcss-normalize-repeat-style": "^4.0.2", - "postcss-normalize-string": "^4.0.2", - "postcss-normalize-timing-functions": "^4.0.2", - "postcss-normalize-unicode": "^4.0.1", - "postcss-normalize-url": "^4.0.1", - "postcss-normalize-whitespace": "^4.0.2", - "postcss-ordered-values": "^4.1.2", - "postcss-reduce-initial": "^4.0.3", - "postcss-reduce-transforms": "^4.0.2", - "postcss-svgo": "^4.0.3", - "postcss-unique-selectors": "^4.0.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-get-arguments": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", - "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-get-match": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", - "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-raw-cache": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz", - "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-same-parent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz", - "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", - "dependencies": { - "css-tree": "^1.1.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/css-tree": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", - "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" - }, - "node_modules/csso/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cyclist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz", - "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk=" - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/de-indent": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", - "integrity": "sha1-sgOOhG3DO6pXlhKNCAS0VbjB4h0=" - }, - "node_modules/debug": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-equal": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz", - "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==", - "dependencies": { - "is-arguments": "^1.0.4", - "is-date-object": "^1.0.1", - "is-regex": "^1.0.4", - "object-is": "^1.0.1", - "object-keys": "^1.1.1", - "regexp.prototype.flags": "^1.2.0" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/deepmerge": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-1.5.2.tgz", - "integrity": "sha512-95k0GDqvBjZavkuvzx/YqVLv/6YYa17fz6ILMSf7neqQITCPbnfEnQvEgMPNjH4kgobe7+WIL0yJEHku+H3qtQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-gateway": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-4.2.0.tgz", - "integrity": "sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA==", - "dependencies": { - "execa": "^1.0.0", - "ip-regex": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" - }, - "node_modules/define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dependencies": { - "object-keys": "^1.0.12" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/define-property": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", - "dependencies": { - "is-descriptor": "^1.0.2", - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/del": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-4.1.1.tgz", - "integrity": "sha512-QwGuEUouP2kVwQenAsOof5Fv8K9t3D8Ca8NxcXKrIpEHjTXK5J2nXLdP+ALI1cgv8wj7KuwBhTwBkOZSJKM5XQ==", - "dependencies": { - "@types/glob": "^7.1.1", - "globby": "^6.1.0", - "is-path-cwd": "^2.0.0", - "is-path-in-cwd": "^2.0.0", - "p-map": "^2.0.0", - "pify": "^4.0.1", - "rimraf": "^2.6.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/del/node_modules/globby": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-6.1.0.tgz", - "integrity": "sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=", - "dependencies": { - "array-union": "^1.0.1", - "glob": "^7.0.3", - "object-assign": "^4.0.1", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/del/node_modules/globby/node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/delegate": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz", - "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==", - "optional": true - }, - "node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/des.js": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", - "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", - "dependencies": { - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "node_modules/detect-node": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.5.tgz", - "integrity": "sha512-qi86tE6hRcFHy8jI1m2VG+LaPUR1LhqDa5G8tVjuUXmOrpuAgqsA1pN0+ldgr3aKUH+QLI9hCY/OcRYisERejw==" - }, - "node_modules/diffie-hellman": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", - "dependencies": { - "bn.js": "^4.1.0", - "miller-rabin": "^4.0.0", - "randombytes": "^2.0.0" - } - }, - "node_modules/diffie-hellman/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/dir-glob": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.2.2.tgz", - "integrity": "sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw==", - "dependencies": { - "path-type": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0=" - }, - "node_modules/dns-packet": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.4.tgz", - "integrity": "sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==", - "dependencies": { - "ip": "^1.1.0", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/dns-txt": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz", - "integrity": "sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=", - "dependencies": { - "buffer-indexof": "^1.0.0" - } - }, - "node_modules/docsearch.js": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/docsearch.js/-/docsearch.js-2.6.3.tgz", - "integrity": "sha512-GN+MBozuyz664ycpZY0ecdQE0ND/LSgJKhTLA0/v3arIS3S1Rpf2OJz6A35ReMsm91V5apcmzr5/kM84cvUg+A==", - "dependencies": { - "algoliasearch": "^3.24.5", - "autocomplete.js": "0.36.0", - "hogan.js": "^3.0.2", - "request": "^2.87.0", - "stack-utils": "^1.0.1", - "to-factory": "^1.0.0", - "zepto": "^1.2.0" - } - }, - "node_modules/docsearch.js/node_modules/algoliasearch": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", - "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", - "dependencies": { - "agentkeepalive": "^2.2.0", - "debug": "^2.6.9", - "envify": "^4.0.0", - "es6-promise": "^4.1.0", - "events": "^1.1.0", - "foreach": "^2.0.5", - "global": "^4.3.2", - "inherits": "^2.0.1", - "isarray": "^2.0.1", - "load-script": "^1.0.0", - "object-keys": "^1.0.11", - "querystring-es3": "^0.2.1", - "reduce": "^1.0.1", - "semver": "^5.1.0", - "tunnel-agent": "^0.6.0" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/docsearch.js/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/docsearch.js/node_modules/events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/docsearch.js/node_modules/isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" - }, - "node_modules/docsearch.js/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/doctypes": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz", - "integrity": "sha1-6oCxBqh1OHdOijpKWv4pPeSJ4Kk=" - }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-serializer": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.1.tgz", - "integrity": "sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "entities": "^2.0.0" - } - }, - "node_modules/dom-walk": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", - "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" - }, - "node_modules/domain-browser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", - "engines": { - "node": ">=0.4", - "npm": ">=1.2" - } - }, - "node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/domhandler": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.1.0.tgz", - "integrity": "sha512-/6/kmsGlMY4Tup/nGVutdrK9yQi4YjWVcVeoQmixpzjOUK1U7pQkvAPHBJeUxOgxF0J8f8lwCJSlCfD0V4CMGQ==", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/domutils": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.5.2.tgz", - "integrity": "sha512-MHTthCb1zj8f1GVfRpeZUbohQf/HdBos0oX5gZcQFepOZPLLRyj6Wn7XS7EMnY7CVpwv8863u2vyE83Hfu28HQ==", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.1.0" - } - }, - "node_modules/dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, - "node_modules/duplexify": { - "version": "3.7.1", - "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", - "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", - "dependencies": { - "end-of-stream": "^1.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.0.0", - "stream-shift": "^1.0.0" - } - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "node_modules/elliptic": { - "version": "6.5.4", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", - "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==", - "dependencies": { - "bn.js": "^4.11.9", - "brorand": "^1.1.0", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.1", - "inherits": "^2.0.4", - "minimalistic-assert": "^1.0.1", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "node_modules/elliptic/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/enhanced-resolve": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz", - "integrity": "sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==", - "dependencies": { - "graceful-fs": "^4.1.2", - "memory-fs": "^0.5.0", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/enhanced-resolve/node_modules/memory-fs": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", - "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", - "dependencies": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - }, - "engines": { - "node": ">=4.3.0 <5.0.0 || >=5.10" - } - }, - "node_modules/entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" - }, - "node_modules/envify": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz", - "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==", - "dependencies": { - "esprima": "^4.0.0", - "through": "~2.3.4" - }, - "bin": { - "envify": "bin/envify" - } - }, - "node_modules/envinfo": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", - "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", - "bin": { - "envinfo": "dist/cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/errno": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", - "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", - "dependencies": { - "prr": "~1.0.1" - }, - "bin": { - "errno": "cli.js" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", - "dependencies": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "get-intrinsic": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", - "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.2", - "string.prototype.trimend": "^1.0.4", - "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dependencies": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es6-promise": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/eslint-scope": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz", - "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", - "dependencies": { - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/esm": { - "version": "3.2.25", - "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz", - "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", - "dependencies": { - "original": "^1.0.0" - }, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/evp_bytestokey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "dependencies": { - "md5.js": "^1.3.4", - "safe-buffer": "^5.1.1" - } - }, - "node_modules/execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "dependencies": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/expand-brackets": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", - "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dependencies": { - "debug": "^2.3.3", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "posix-character-classes": "^0.1.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expand-brackets/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/expand-brackets/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", - "dependencies": { - "accepts": "~1.3.7", - "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", - "content-type": "~1.0.4", - "cookie": "0.4.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.1.2", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", - "statuses": "~1.5.0", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dependencies": { - "array-unique": "^0.3.2", - "define-property": "^1.0.0", - "expand-brackets": "^2.1.4", - "extend-shallow": "^2.0.1", - "fragment-cache": "^0.2.1", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/fast-glob": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz", - "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==", - "dependencies": { - "@mrmlnc/readdir-enhanced": "^2.2.1", - "@nodelib/fs.stat": "^1.1.2", - "glob-parent": "^3.1.0", - "is-glob": "^4.0.0", - "merge2": "^1.2.3", - "micromatch": "^3.1.10" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "node_modules/faye-websocket": { - "version": "0.11.3", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.3.tgz", - "integrity": "sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/figgy-pudding": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", - "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==" - }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/file-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-3.0.1.tgz", - "integrity": "sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw==", - "dependencies": { - "loader-utils": "^1.0.2", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "optional": true - }, - "node_modules/fill-range": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dependencies": { - "extend-shallow": "^2.0.1", - "is-number": "^3.0.0", - "repeat-string": "^1.6.1", - "to-regex-range": "^2.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "statuses": "~1.5.0", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/find-cache-dir": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", - "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/flush-write-stream": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", - "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", - "dependencies": { - "inherits": "^2.0.3", - "readable-stream": "^2.3.6" - } - }, - "node_modules/follow-redirects": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz", - "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==", - "dependencies": { - "debug": "=3.1.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/for-in": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/foreach": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", - "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=" - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fragment-cache": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", - "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", - "dependencies": { - "map-cache": "^0.2.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/from2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", - "dependencies": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - } - }, - "node_modules/fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", - "dependencies": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/fs-write-stream-atomic": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", - "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=", - "dependencies": { - "graceful-fs": "^4.1.2", - "iferr": "^0.1.5", - "imurmurhash": "^0.1.4", - "readable-stream": "1 || 2" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "node_modules/fsevents": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", - "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "bindings": "^1.5.0", - "nan": "^2.12.1" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "node_modules/fuse.js": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", - "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" - } - }, - "node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/get-value": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - } - }, - "node_modules/glob-parent": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", - "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", - "dependencies": { - "is-glob": "^3.1.0", - "path-dirname": "^1.0.0" - } - }, - "node_modules/glob-parent/node_modules/is-glob": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", - "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", - "dependencies": { - "is-extglob": "^2.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=" - }, - "node_modules/global": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz", - "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==", - "dependencies": { - "min-document": "^2.19.0", - "process": "^0.11.10" - } - }, - "node_modules/global-dirs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz", - "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==", - "dependencies": { - "ini": "1.3.7" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/globby": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-9.2.0.tgz", - "integrity": "sha512-ollPHROa5mcxDEkwg6bPt3QbEf4pDQSNtd6JPL1YvOvAo/7/0VAm9TccUeoTmarjPw4pfUthSCqcyfNB1I3ZSg==", - "dependencies": { - "@types/glob": "^7.1.1", - "array-union": "^1.0.2", - "dir-glob": "^2.2.2", - "fast-glob": "^2.2.6", - "glob": "^7.1.3", - "ignore": "^4.0.3", - "pify": "^4.0.1", - "slash": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/good-listener": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=", - "optional": true, - "dependencies": { - "delegate": "^3.1.2" - } - }, - "node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", - "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" - }, - "node_modules/gray-matter": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.2.tgz", - "integrity": "sha512-7hB/+LxrOjq/dd8APlK0r24uL/67w7SkYnfwhNFwg/VDIGWGmduTDYf3WNstLW2fbbmRwrDGCVSJ2isuf2+4Hw==", - "dependencies": { - "js-yaml": "^3.11.0", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" - }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-bigints": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==" - }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/has-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/has-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", - "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", - "dependencies": { - "get-value": "^2.0.6", - "has-values": "^1.0.0", - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-values": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", - "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", - "dependencies": { - "is-number": "^3.0.0", - "kind-of": "^4.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-values/node_modules/kind-of": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", - "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "dependencies": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/hash-base/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/hash-base/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/hash-sum": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/hash-sum/-/hash-sum-1.0.2.tgz", - "integrity": "sha1-M7QHd3VMZDJXPBIMw4CLvRDUfwQ=" - }, - "node_modules/hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "dependencies": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "bin": { - "he": "bin/he" - } - }, - "node_modules/hex-color-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" - }, - "node_modules/hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "dependencies": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "node_modules/hogan.js": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz", - "integrity": "sha1-TNnhq9QpQUbnZ55B14mHMrAse/0=", - "dependencies": { - "mkdirp": "0.3.0", - "nopt": "1.0.10" - }, - "bin": { - "hulk": "bin/hulk" - } - }, - "node_modules/hogan.js/node_modules/mkdirp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz", - "integrity": "sha1-G79asbqCevI1dRQ0kEJkVfSB/h4=", - "engines": { - "node": "*" - } - }, - "node_modules/hotkeys-js": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz", - "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw==" - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hsl-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", - "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=" - }, - "node_modules/hsla-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", - "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg=" - }, - "node_modules/html-entities": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.4.0.tgz", - "integrity": "sha512-8nxjcBcd8wovbeKx7h3wTji4e6+rhaVuPNpMqwWgnHh+N9ToqsCs6XztWRBPQ+UtzsoMAdKZtUENoVzU/EMtZA==" - }, - "node_modules/html-minifier": { - "version": "3.5.21", - "resolved": "https://registry.npmjs.org/html-minifier/-/html-minifier-3.5.21.tgz", - "integrity": "sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA==", - "dependencies": { - "camel-case": "3.0.x", - "clean-css": "4.2.x", - "commander": "2.17.x", - "he": "1.2.x", - "param-case": "2.1.x", - "relateurl": "0.2.x", - "uglify-js": "3.4.x" - }, - "bin": { - "html-minifier": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/html-tags": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", - "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/htmlparser2": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc=" - }, - "node_modules/http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/http-errors/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/http-parser-js": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.3.tgz", - "integrity": "sha512-t7hjvef/5HEK7RWTdUzVUhl8zkEu+LlaE0IYzdMuvbSDipxBRpOn4Uhw8ZyECEa808iVT8XCjzo6xmYt4CiLZg==" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-middleware": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz", - "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==", - "dependencies": { - "http-proxy": "^1.17.0", - "is-glob": "^4.0.0", - "lodash": "^4.17.11", - "micromatch": "^3.1.10" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, - "node_modules/https-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", - "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=" - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-replace-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz", - "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0=" - }, - "node_modules/icss-utils": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz", - "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==", - "dependencies": { - "postcss": "^7.0.14" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" - }, - "node_modules/iferr": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz", - "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE=" - }, - "node_modules/ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/immediate": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", - "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" - }, - "node_modules/import-cwd": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-2.1.0.tgz", - "integrity": "sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=", - "dependencies": { - "import-from": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-fresh": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz", - "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=", - "dependencies": { - "caller-path": "^2.0.0", - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-from": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-from/-/import-from-2.1.0.tgz", - "integrity": "sha1-M1238qev/VOqpHHUuAId7ja387E=", - "dependencies": { - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=", - "engines": { - "node": ">=4" - } - }, - "node_modules/import-local": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz", - "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==", - "dependencies": { - "pkg-dir": "^3.0.0", - "resolve-cwd": "^2.0.0" - }, - "bin": { - "import-local-fixture": "fixtures/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/import-local/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indexes-of": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", - "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=" - }, - "node_modules/infer-owner": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", - "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==" - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==" - }, - "node_modules/internal-ip": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-4.3.0.tgz", - "integrity": "sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg==", - "dependencies": { - "default-gateway": "^4.2.0", - "ipaddr.js": "^1.9.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/ip": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", - "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=" - }, - "node_modules/ip-regex": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-2.1.0.tgz", - "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=", - "engines": { - "node": ">=4" - } - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/is-absolute-url": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", - "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-accessor-descriptor/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-arguments": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz", - "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==", - "dependencies": { - "call-bind": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" - }, - "node_modules/is-bigint": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz", - "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==" - }, - "node_modules/is-binary-path": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", - "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", - "dependencies": { - "binary-extensions": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-boolean-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz", - "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==", - "dependencies": { - "call-bind": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "node_modules/is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-ci/node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" - }, - "node_modules/is-color-stop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", - "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=", - "dependencies": { - "css-color-names": "^0.0.4", - "hex-color-regex": "^1.1.0", - "hsl-regex": "^1.0.0", - "hsla-regex": "^1.0.0", - "rgb-regex": "^1.0.1", - "rgba-regex": "^1.0.0" - } - }, - "node_modules/is-core-module": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz", - "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==", - "dependencies": { - "has": "^1.0.3" - } - }, - "node_modules/is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-data-descriptor/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dependencies": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-descriptor/node_modules/kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-directory": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", - "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-expression": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-4.0.0.tgz", - "integrity": "sha512-zMIXX63sxzG3XrkHkrAPvm/OVZVSCPNkwMHU8oTX7/U3AL78I0QXCEICXUM13BIa8TYGZ68PiTKfQz3yaTNr4A==", - "dependencies": { - "acorn": "^7.1.1", - "object-assign": "^4.1.1" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-installed-globally": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", - "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", - "dependencies": { - "global-dirs": "^2.0.1", - "is-path-inside": "^3.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-installed-globally/node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-negative-zero": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", - "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-npm": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz", - "integrity": "sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-number-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz", - "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-number/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-in-cwd": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-2.1.0.tgz", - "integrity": "sha512-rNocXHgipO+rvnP6dk3zI20RpOtrAM/kzbB258Uw5BWr3TpXi861yzjo16Dn4hUox07iw5AyeMLHWsujkjzvRQ==", - "dependencies": { - "is-path-inside": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-inside": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-2.1.0.tgz", - "integrity": "sha512-wiyhTzfDWsvwAW53OBWF5zuvaOGlZ6PwYxAbPVDhpm+gM09xKQGjBq/8uYN12aDvMxnAnq3dxTyoSoRNmg5YFg==", - "dependencies": { - "path-is-inside": "^1.0.2" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" - }, - "node_modules/is-regex": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz", - "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==", - "dependencies": { - "call-bind": "^1.0.2", - "has-symbols": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-resolvable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" - }, - "node_modules/is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", - "dependencies": { - "has-symbols": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "node_modules/is-windows": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-wsl": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", - "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" - }, - "node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "node_modules/javascript-stringify": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz", - "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=" - }, - "node_modules/js-base64": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz", - "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ==" - }, - "node_modules/js-stringify": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz", - "integrity": "sha1-Fzb939lyTyijaCrcYjCufk6Weds=" - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" - }, - "node_modules/json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" - }, - "node_modules/json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "node_modules/json3": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.3.tgz", - "integrity": "sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA==" - }, - "node_modules/json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dependencies": { - "minimist": "^1.2.0" - }, - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "dependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/jsonp/-/jsonp-0.2.1.tgz", - "integrity": "sha1-pltPoPEL2nGaBUQep7lMVfPhW64=", - "dependencies": { - "debug": "^2.1.3" - } - }, - "node_modules/jsonp/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "node_modules/jstransformer": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz", - "integrity": "sha1-7Yvwkh4vPx7U1cGkT2hwntJHIsM=", - "dependencies": { - "is-promise": "^2.0.0", - "promise": "^7.0.1" - } - }, - "node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/killable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/killable/-/killable-1.0.1.tgz", - "integrity": "sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg==" - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/last-call-webpack-plugin": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz", - "integrity": "sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w==", - "dependencies": { - "lodash": "^4.17.5", - "webpack-sources": "^1.1.0" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/linkify-it": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", - "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/load-script": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", - "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ=" - }, - "node_modules/loader-runner": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz", - "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==", - "engines": { - "node": ">=4.3.0 <5.0.0 || >=5.10" - } - }, - "node_modules/loader-utils": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", - "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^1.0.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash._reinterpolate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0=" - }, - "node_modules/lodash.chunk": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz", - "integrity": "sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw=" - }, - "node_modules/lodash.clonedeep": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", - "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" - }, - "node_modules/lodash.kebabcase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz", - "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY=" - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=" - }, - "node_modules/lodash.padstart": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz", - "integrity": "sha1-0uPuv/DZ05rVD1y9G1KnvOa7YRs=" - }, - "node_modules/lodash.sortby": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=" - }, - "node_modules/lodash.template": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", - "dependencies": { - "lodash._reinterpolate": "^3.0.0", - "lodash.templatesettings": "^4.0.0" - } - }, - "node_modules/lodash.templatesettings": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", - "dependencies": { - "lodash._reinterpolate": "^3.0.0" - } - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" - }, - "node_modules/loglevel": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz", - "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/lower-case": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz", - "integrity": "sha1-miyr0bno4K6ZOkv31YdcOcQujqw=" - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/map-cache": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/map-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", - "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", - "dependencies": { - "object-visit": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/markdown-it": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.4.tgz", - "integrity": "sha512-34RwOXZT8kyuOJy25oJNJoulO8L0bTHYWXcdZBYZqFnjIy3NgjeoM3FmPXIOFQ26/lSHYMr8oc62B6adxXcb3Q==", - "dependencies": { - "argparse": "^2.0.1", - "entities": "~2.1.0", - "linkify-it": "^3.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/markdown-it-anchor": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz", - "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==" - }, - "node_modules/markdown-it-attrs": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz", - "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/markdown-it-chain": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/markdown-it-chain/-/markdown-it-chain-1.3.0.tgz", - "integrity": "sha512-XClV8I1TKy8L2qsT9iX3qiV+50ZtcInGXI80CA+DP62sMs7hXlyV/RM3hfwy5O3Ad0sJm9xIwQELgANfESo8mQ==", - "dependencies": { - "webpack-chain": "^4.9.0" - }, - "engines": { - "node": ">=6.9" - } - }, - "node_modules/markdown-it-chain/node_modules/webpack-chain": { - "version": "4.12.1", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-4.12.1.tgz", - "integrity": "sha512-BCfKo2YkDe2ByqkEWe1Rw+zko4LsyS75LVr29C6xIrxAg9JHJ4pl8kaIZ396SUSNp6b4815dRZPSTAS8LlURRQ==", - "dependencies": { - "deepmerge": "^1.5.2", - "javascript-stringify": "^1.6.0" - } - }, - "node_modules/markdown-it-container": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-it-container/-/markdown-it-container-2.0.0.tgz", - "integrity": "sha1-ABm0P9Au7+zi8ZYKKJX7qBpARpU=" - }, - "node_modules/markdown-it-emoji": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/markdown-it-emoji/-/markdown-it-emoji-1.4.0.tgz", - "integrity": "sha1-m+4OmpkKljupbfaYDE/dsF37Tcw=" - }, - "node_modules/markdown-it-table-of-contents": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/markdown-it-table-of-contents/-/markdown-it-table-of-contents-0.4.4.tgz", - "integrity": "sha512-TAIHTHPwa9+ltKvKPWulm/beozQU41Ab+FIefRaQV1NRnpzwcV9QOe6wXQS5WLivm5Q/nlo0rl6laGkMDZE7Gw==", - "engines": { - "node": ">6.4.0" - } - }, - "node_modules/markdown-it/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/markdown-it/node_modules/linkify-it": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz", - "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "dependencies": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/mdn-data": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" - }, - "node_modules/mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4=" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memory-fs": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz", - "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=", - "dependencies": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "node_modules/merge-source-map": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz", - "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==", - "dependencies": { - "source-map": "^0.6.1" - } - }, - "node_modules/merge-source-map/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromatch": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "dependencies": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "braces": "^2.3.1", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "extglob": "^2.0.4", - "fragment-cache": "^0.2.1", - "kind-of": "^6.0.2", - "nanomatch": "^1.2.9", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/micromatch/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/micromatch/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/miller-rabin": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", - "dependencies": { - "bn.js": "^4.0.0", - "brorand": "^1.0.1" - }, - "bin": { - "miller-rabin": "bin/miller-rabin" - } - }, - "node_modules/miller-rabin/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/mime": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", - "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", - "dependencies": { - "mime-db": "1.47.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/min-document": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", - "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=", - "dependencies": { - "dom-walk": "^0.1.0" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-0.6.0.tgz", - "integrity": "sha512-79q5P7YGI6rdnVyIAV4NXpBQJFWdkzJxCim3Kog4078fM0piAaFlwocqbejdWtLW1cEzCexPrh6EdyFsPgVdAw==", - "dependencies": { - "loader-utils": "^1.1.0", - "normalize-url": "^2.0.1", - "schema-utils": "^1.0.0", - "webpack-sources": "^1.1.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "node_modules/minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" - }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" - }, - "node_modules/mississippi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz", - "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==", - "dependencies": { - "concat-stream": "^1.5.0", - "duplexify": "^3.4.2", - "end-of-stream": "^1.1.0", - "flush-write-stream": "^1.0.0", - "from2": "^2.1.0", - "parallel-transform": "^1.1.0", - "pump": "^3.0.0", - "pumpify": "^1.3.3", - "stream-each": "^1.1.0", - "through2": "^2.0.0" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mixin-deep": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", - "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", - "dependencies": { - "for-in": "^1.0.2", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/mixin-deep/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/move-concurrently": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", - "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=", - "dependencies": { - "aproba": "^1.1.1", - "copy-concurrently": "^1.0.0", - "fs-write-stream-atomic": "^1.0.8", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.3" - } - }, - "node_modules/move-concurrently/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/multicast-dns": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-6.2.3.tgz", - "integrity": "sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==", - "dependencies": { - "dns-packet": "^1.3.1", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/multicast-dns-service-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz", - "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" - }, - "node_modules/nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", - "optional": true - }, - "node_modules/nanomatch": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", - "dependencies": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "fragment-cache": "^0.2.1", - "is-windows": "^1.0.2", - "kind-of": "^6.0.2", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/nanomatch/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/nanomatch/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" - }, - "node_modules/nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "node_modules/no-case": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-2.3.2.tgz", - "integrity": "sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==", - "dependencies": { - "lower-case": "^1.1.1" - } - }, - "node_modules/node-forge": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz", - "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==", - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/node-libs-browser": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz", - "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==", - "dependencies": { - "assert": "^1.1.1", - "browserify-zlib": "^0.2.0", - "buffer": "^4.3.0", - "console-browserify": "^1.1.0", - "constants-browserify": "^1.0.0", - "crypto-browserify": "^3.11.0", - "domain-browser": "^1.1.1", - "events": "^3.0.0", - "https-browserify": "^1.0.0", - "os-browserify": "^0.3.0", - "path-browserify": "0.0.1", - "process": "^0.11.10", - "punycode": "^1.2.4", - "querystring-es3": "^0.2.0", - "readable-stream": "^2.3.3", - "stream-browserify": "^2.0.1", - "stream-http": "^2.7.2", - "string_decoder": "^1.0.0", - "timers-browserify": "^2.0.4", - "tty-browserify": "0.0.0", - "url": "^0.11.0", - "util": "^0.11.0", - "vm-browserify": "^1.0.1" - } - }, - "node_modules/node-libs-browser/node_modules/punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - }, - "node_modules/node-releases": { - "version": "1.1.71", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.71.tgz", - "integrity": "sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg==" - }, - "node_modules/nopt": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", - "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=", - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", - "dependencies": { - "prepend-http": "^2.0.0", - "query-string": "^5.0.1", - "sort-keys": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "dependencies": { - "path-key": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/nprogress": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E=" - }, - "node_modules/nth-check": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz", - "integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==", - "dependencies": { - "boolbase": "^1.0.0" - } - }, - "node_modules/num2fraction": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", - "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=" - }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "engines": { - "node": "*" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", - "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", - "dependencies": { - "copy-descriptor": "^0.1.0", - "define-property": "^0.2.5", - "kind-of": "^3.0.3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.9.0.tgz", - "integrity": "sha512-i3Bp9iTqwhaLZBxGkRfo5ZbE07BQRT7MGu8+nNgwW9ItGp1TzCTw2DLEoWwjClxBjOFI/hWljTAmYGCEwmtnOw==" - }, - "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object-visit": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", - "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", - "dependencies": { - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.getownpropertydescriptors": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz", - "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/object.pick": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", - "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" - }, - "node_modules/on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/opencollective-postinstall": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz", - "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q==", - "bin": { - "opencollective-postinstall": "index.js" - } - }, - "node_modules/opn": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz", - "integrity": "sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==", - "dependencies": { - "is-wsl": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/optimize-css-assets-webpack-plugin": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz", - "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==", - "dependencies": { - "cssnano": "^4.1.10", - "last-call-webpack-plugin": "^3.0.0" - } - }, - "node_modules/original": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/original/-/original-1.0.2.tgz", - "integrity": "sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg==", - "dependencies": { - "url-parse": "^1.4.3" - } - }, - "node_modules/os-browserify": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", - "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-retry": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-3.0.1.tgz", - "integrity": "sha512-XE6G4+YTTkT2a0UWb2kjZe8xNwf8bIbnqpc/IS/idOBVhyves0mK5OJgeocjx7q5pvX/6m23xuzVPYT1uGM73w==", - "dependencies": { - "retry": "^0.12.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" - }, - "node_modules/parallel-transform": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz", - "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==", - "dependencies": { - "cyclist": "^1.0.1", - "inherits": "^2.0.3", - "readable-stream": "^2.1.5" - } - }, - "node_modules/param-case": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-2.1.1.tgz", - "integrity": "sha1-35T9jPZTHs915r75oIWPvHK+Ikc=", - "dependencies": { - "no-case": "^2.2.0" - } - }, - "node_modules/parse-asn1": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", - "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", - "dependencies": { - "asn1.js": "^5.2.0", - "browserify-aes": "^1.0.0", - "evp_bytestokey": "^1.0.0", - "pbkdf2": "^3.0.3", - "safe-buffer": "^5.1.1" - } - }, - "node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascalcase": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-browserify": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", - "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==" - }, - "node_modules/path-dirname": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", - "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=" - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" - }, - "node_modules/path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "engines": { - "node": ">=4" - } - }, - "node_modules/path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "node_modules/path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dependencies": { - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/path-type/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "engines": { - "node": ">=4" - } - }, - "node_modules/pbkdf2": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz", - "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==", - "dependencies": { - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4", - "ripemd160": "^2.0.1", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - }, - "engines": { - "node": ">=0.12" - } - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "node_modules/picomatch": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", - "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==", - "optional": true, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pinkie": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", - "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pinkie-promise": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", - "dependencies": { - "pinkie": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/portfinder": { - "version": "1.0.28", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz", - "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==", - "dependencies": { - "async": "^2.6.2", - "debug": "^3.1.1", - "mkdirp": "^0.5.5" - }, - "engines": { - "node": ">= 0.12.0" - } - }, - "node_modules/portfinder/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/portfinder/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/portfinder/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/posix-character-classes": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "dependencies": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/postcss-calc": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", - "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", - "dependencies": { - "postcss": "^7.0.27", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.0.2" - } - }, - "node_modules/postcss-colormin": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz", - "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==", - "dependencies": { - "browserslist": "^4.0.0", - "color": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-colormin/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-convert-values": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz", - "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-convert-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-discard-comments": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz", - "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz", - "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-empty": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz", - "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz", - "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-load-config": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz", - "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==", - "dependencies": { - "cosmiconfig": "^5.0.0", - "import-cwd": "^2.0.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/postcss-loader": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-3.0.0.tgz", - "integrity": "sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA==", - "dependencies": { - "loader-utils": "^1.1.0", - "postcss": "^7.0.0", - "postcss-load-config": "^2.0.0", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "4.0.11", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz", - "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==", - "dependencies": { - "css-color-names": "0.0.4", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "stylehacks": "^4.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-merge-longhand/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-merge-rules": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz", - "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "cssnano-util-same-parent": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0", - "vendors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-merge-rules/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz", - "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-font-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-gradients": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz", - "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "is-color-stop": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-gradients/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-params": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz", - "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "browserslist": "^4.0.0", - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "uniqs": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-params/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-selectors": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz", - "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-selectors/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz", - "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==", - "dependencies": { - "postcss": "^7.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz", - "integrity": "sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA==", - "dependencies": { - "postcss": "^7.0.6", - "postcss-selector-parser": "^6.0.0", - "postcss-value-parser": "^3.3.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-local-by-default/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-modules-scope": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz", - "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==", - "dependencies": { - "postcss": "^7.0.6", - "postcss-selector-parser": "^6.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-values": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz", - "integrity": "sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w==", - "dependencies": { - "icss-replace-symbols": "^1.1.0", - "postcss": "^7.0.6" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz", - "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz", - "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-display-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-positions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz", - "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-positions/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz", - "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-repeat-style/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-string": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz", - "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==", - "dependencies": { - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-string/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz", - "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-timing-functions/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-unicode": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz", - "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==", - "dependencies": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-unicode/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-url": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz", - "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==", - "dependencies": { - "is-absolute-url": "^2.0.0", - "normalize-url": "^3.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-url/node_modules/normalize-url": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", - "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/postcss-normalize-url/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-whitespace": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz", - "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-whitespace/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-ordered-values": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz", - "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-ordered-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-reduce-initial": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz", - "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz", - "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-reduce-transforms/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-safe-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-4.0.2.tgz", - "integrity": "sha512-Uw6ekxSWNLCPesSv/cmqf2bY/77z11O7jZGPax3ycZMFU/oi2DMH9i89AdHc1tRwFg/arFoEwX0IS3LCUxJh1g==", - "dependencies": { - "postcss": "^7.0.26" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", - "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", - "dependencies": { - "cssesc": "^3.0.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-svgo": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz", - "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "svgo": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-svgo/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-unique-selectors": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz", - "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "postcss": "^7.0.0", - "uniqs": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz", - "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ==" - }, - "node_modules/postcss/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/postcss/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", - "engines": { - "node": ">=4" - } - }, - "node_modules/prettier": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz", - "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", - "optional": true, - "bin": { - "prettier": "bin-prettier.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pretty-error": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz", - "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^2.0.4" - } - }, - "node_modules/pretty-time": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/prismjs": { - "version": "1.23.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz", - "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==", - "dependencies": { - "clipboard": "^2.0.0" - }, - "optionalDependencies": { - "clipboard": "^2.0.0" - } - }, - "node_modules/process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "node_modules/promise": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", - "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", - "dependencies": { - "asap": "~2.0.3" - } - }, - "node_modules/promise-inflight": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", - "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=" - }, - "node_modules/proxy-addr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", - "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", - "dependencies": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/prr": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", - "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=" - }, - "node_modules/pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=" - }, - "node_modules/psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "node_modules/public-encrypt": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", - "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", - "dependencies": { - "bn.js": "^4.1.0", - "browserify-rsa": "^4.0.0", - "create-hash": "^1.1.0", - "parse-asn1": "^5.0.0", - "randombytes": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/public-encrypt/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/pug": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pug/-/pug-3.0.2.tgz", - "integrity": "sha512-bp0I/hiK1D1vChHh6EfDxtndHji55XP/ZJKwsRqrz6lRia6ZC2OZbdAymlxdVFwd1L70ebrVJw4/eZ79skrIaw==", - "dependencies": { - "pug-code-gen": "^3.0.2", - "pug-filters": "^4.0.0", - "pug-lexer": "^5.0.1", - "pug-linker": "^4.0.0", - "pug-load": "^3.0.0", - "pug-parser": "^6.0.0", - "pug-runtime": "^3.0.1", - "pug-strip-comments": "^2.0.0" - } - }, - "node_modules/pug-attrs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-3.0.0.tgz", - "integrity": "sha512-azINV9dUtzPMFQktvTXciNAfAuVh/L/JCl0vtPCwvOA21uZrC08K/UnmrL+SXGEVc1FwzjW62+xw5S/uaLj6cA==", - "dependencies": { - "constantinople": "^4.0.1", - "js-stringify": "^1.0.2", - "pug-runtime": "^3.0.0" - } - }, - "node_modules/pug-code-gen": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-3.0.2.tgz", - "integrity": "sha512-nJMhW16MbiGRiyR4miDTQMRWDgKplnHyeLvioEJYbk1RsPI3FuA3saEP8uwnTb2nTJEKBU90NFVWJBk4OU5qyg==", - "dependencies": { - "constantinople": "^4.0.1", - "doctypes": "^1.1.0", - "js-stringify": "^1.0.2", - "pug-attrs": "^3.0.0", - "pug-error": "^2.0.0", - "pug-runtime": "^3.0.0", - "void-elements": "^3.1.0", - "with": "^7.0.0" - } - }, - "node_modules/pug-error": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-2.0.0.tgz", - "integrity": "sha512-sjiUsi9M4RAGHktC1drQfCr5C5eriu24Lfbt4s+7SykztEOwVZtbFk1RRq0tzLxcMxMYTBR+zMQaG07J/btayQ==" - }, - "node_modules/pug-filters": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-4.0.0.tgz", - "integrity": "sha512-yeNFtq5Yxmfz0f9z2rMXGw/8/4i1cCFecw/Q7+D0V2DdtII5UvqE12VaZ2AY7ri6o5RNXiweGH79OCq+2RQU4A==", - "dependencies": { - "constantinople": "^4.0.1", - "jstransformer": "1.0.0", - "pug-error": "^2.0.0", - "pug-walk": "^2.0.0", - "resolve": "^1.15.1" - } - }, - "node_modules/pug-lexer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-5.0.1.tgz", - "integrity": "sha512-0I6C62+keXlZPZkOJeVam9aBLVP2EnbeDw3An+k0/QlqdwH6rv8284nko14Na7c0TtqtogfWXcRoFE4O4Ff20w==", - "dependencies": { - "character-parser": "^2.2.0", - "is-expression": "^4.0.0", - "pug-error": "^2.0.0" - } - }, - "node_modules/pug-linker": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-4.0.0.tgz", - "integrity": "sha512-gjD1yzp0yxbQqnzBAdlhbgoJL5qIFJw78juN1NpTLt/mfPJ5VgC4BvkoD3G23qKzJtIIXBbcCt6FioLSFLOHdw==", - "dependencies": { - "pug-error": "^2.0.0", - "pug-walk": "^2.0.0" - } - }, - "node_modules/pug-load": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-3.0.0.tgz", - "integrity": "sha512-OCjTEnhLWZBvS4zni/WUMjH2YSUosnsmjGBB1An7CsKQarYSWQ0GCVyd4eQPMFJqZ8w9xgs01QdiZXKVjk92EQ==", - "dependencies": { - "object-assign": "^4.1.1", - "pug-walk": "^2.0.0" - } - }, - "node_modules/pug-parser": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-6.0.0.tgz", - "integrity": "sha512-ukiYM/9cH6Cml+AOl5kETtM9NR3WulyVP2y4HOU45DyMim1IeP/OOiyEWRr6qk5I5klpsBnbuHpwKmTx6WURnw==", - "dependencies": { - "pug-error": "^2.0.0", - "token-stream": "1.0.0" - } - }, - "node_modules/pug-plain-loader": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.1.0.tgz", - "integrity": "sha512-1nYgIJLaahRuHJHhzSPODV44aZfb00bO7kiJiMkke6Hj4SVZftuvx6shZ4BOokk50dJc2RSFqNUBOlus0dniFQ==", - "dependencies": { - "loader-utils": "^1.1.0" - } - }, - "node_modules/pug-runtime": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-3.0.1.tgz", - "integrity": "sha512-L50zbvrQ35TkpHwv0G6aLSuueDRwc/97XdY8kL3tOT0FmhgG7UypU3VztfV/LATAvmUfYi4wNxSajhSAeNN+Kg==" - }, - "node_modules/pug-strip-comments": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-2.0.0.tgz", - "integrity": "sha512-zo8DsDpH7eTkPHCXFeAk1xZXJbyoTfdPlNR0bK7rpOMuhBYb0f5qUVCO1xlsitYd3w5FQTK7zpNVKb3rZoUrrQ==", - "dependencies": { - "pug-error": "^2.0.0" - } - }, - "node_modules/pug-walk": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-2.0.0.tgz", - "integrity": "sha512-yYELe9Q5q9IQhuvqsZNwA5hfPkMJ8u92bQLIMcsMxf/VADjNtEYptU+inlufAFYcWdHlwNfZOEnOOQrZrcyJCQ==" - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/pumpify": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", - "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", - "dependencies": { - "duplexify": "^3.6.0", - "inherits": "^2.0.3", - "pump": "^2.0.0" - } - }, - "node_modules/pumpify/node_modules/pump": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", - "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pupa": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", - "dependencies": { - "escape-goat": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/q": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", - "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", - "engines": { - "node": ">=0.6.0", - "teleport": ">=0.2.0" - } - }, - "node_modules/qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/query-string": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", - "dependencies": { - "decode-uri-component": "^0.2.0", - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/querystring": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz", - "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/querystring-es3": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/randomfill": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", - "dependencies": { - "randombytes": "^2.0.5", - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", - "dependencies": { - "bytes": "3.1.0", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/readdirp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", - "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", - "dependencies": { - "graceful-fs": "^4.1.11", - "micromatch": "^3.1.10", - "readable-stream": "^2.0.2" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/reduce": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.2.tgz", - "integrity": "sha512-xX7Fxke/oHO5IfZSk77lvPa/7bjMh9BuCk4OOoX5XTXrM7s0Z+MkPfSDfz0q7r91BhhGSs8gii/VEN/7zhCPpQ==", - "dependencies": { - "object-keys": "^1.1.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "node_modules/regenerate-unicode-properties": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz", - "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==", - "dependencies": { - "regenerate": "^1.4.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.13.7", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", - "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew==" - }, - "node_modules/regenerator-transform": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz", - "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regex-not": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", - "dependencies": { - "extend-shallow": "^3.0.2", - "safe-regex": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regex-not/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regex-not/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regexp.prototype.flags": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", - "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/regexpu-core": { - "version": "4.7.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz", - "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==", - "dependencies": { - "regenerate": "^1.4.0", - "regenerate-unicode-properties": "^8.2.0", - "regjsgen": "^0.5.1", - "regjsparser": "^0.6.4", - "unicode-match-property-ecmascript": "^1.0.4", - "unicode-match-property-value-ecmascript": "^1.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", - "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/regjsgen": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz", - "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A==" - }, - "node_modules/regjsparser": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.9.tgz", - "integrity": "sha512-ZqbNRz1SNjLAiYuwY0zoXW8Ne675IX5q+YHioAGbCw4X96Mjl2+dcX9B2ciaeyYjViDAfvIjFpQjJgLttTEERQ==", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/relateurl": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/remove-trailing-separator": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", - "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" - }, - "node_modules/renderkid": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.5.tgz", - "integrity": "sha512-ccqoLg+HLOHq1vdfYNm4TBeaCDIi1FLt3wGojTDSvdewUv65oTmI3cnT2E4hRjl1gzKZIPK+KZrXzlUYKnR+vQ==", - "dependencies": { - "css-select": "^2.0.2", - "dom-converter": "^0.2", - "htmlparser2": "^3.10.1", - "lodash": "^4.17.20", - "strip-ansi": "^3.0.0" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/renderkid/node_modules/css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/renderkid/node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/renderkid/node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "dependencies": { - "domelementtype": "1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", - "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", - "dependencies": { - "domelementtype": "^1.3.1", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^3.1.1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2/node_modules/entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "node_modules/renderkid/node_modules/nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/renderkid/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/repeat-element": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz", - "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/request/node_modules/qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" - }, - "node_modules/resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", - "dependencies": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" - } - }, - "node_modules/resolve-cwd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz", - "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=", - "dependencies": { - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-from": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", - "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=", - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-url": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=" - }, - "node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "engines": { - "node": ">=0.12" - } - }, - "node_modules/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", - "engines": { - "node": ">= 4" - } - }, - "node_modules/rgb-regex": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", - "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE=" - }, - "node_modules/rgba-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", - "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=" - }, - "node_modules/rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - } - }, - "node_modules/ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "dependencies": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, - "node_modules/run-queue": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz", - "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=", - "dependencies": { - "aproba": "^1.1.1" - } - }, - "node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/safe-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", - "dependencies": { - "ret": "~0.1.10" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" - }, - "node_modules/schema-utils": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", - "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", - "dependencies": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 8.9.0" - } - }, - "node_modules/section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/select": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz", - "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=", - "optional": true - }, - "node_modules/select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=" - }, - "node_modules/selfsigned": { - "version": "1.10.8", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz", - "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==", - "dependencies": { - "node-forge": "^0.10.0" - } - }, - "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "dependencies": { - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", - "dependencies": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "~1.7.2", - "mime": "1.6.0", - "ms": "2.1.1", - "on-finished": "~2.3.0", - "range-parser": "~1.2.1", - "statuses": "~1.5.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/send/node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" - }, - "node_modules/serialize-javascript": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz", - "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" - }, - "node_modules/serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.17.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" - }, - "node_modules/set-value": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", - "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", - "dependencies": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.3", - "split-string": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/setimmediate": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", - "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=" - }, - "node_modules/setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" - }, - "node_modules/sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - }, - "bin": { - "sha.js": "bin.js" - } - }, - "node_modules/shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dependencies": { - "shebang-regex": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "node_modules/simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/simple-swizzle/node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - }, - "node_modules/sitemap": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz", - "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==", - "dependencies": { - "lodash.chunk": "^4.2.0", - "lodash.padstart": "^4.6.1", - "whatwg-url": "^7.0.0", - "xmlbuilder": "^13.0.0" - }, - "engines": { - "node": ">=6.0.0", - "npm": ">=4.0.0" - } - }, - "node_modules/slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/smoothscroll-polyfill": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/smoothscroll-polyfill/-/smoothscroll-polyfill-0.4.4.tgz", - "integrity": "sha512-TK5ZA9U5RqCwMpfoMq/l1mrH0JAR7y7KRvOBx0n2869aLxch+gT9GhN3yUfjiw+d/DiF1mKo14+hd62JyMmoBg==" - }, - "node_modules/snapdragon": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", - "dependencies": { - "base": "^0.11.1", - "debug": "^2.2.0", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "map-cache": "^0.2.2", - "source-map": "^0.5.6", - "source-map-resolve": "^0.5.0", - "use": "^3.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", - "dependencies": { - "define-property": "^1.0.0", - "isobject": "^3.0.0", - "snapdragon-util": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-util": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", - "dependencies": { - "kind-of": "^3.2.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-util/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/snapdragon/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sockjs": { - "version": "0.3.21", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.21.tgz", - "integrity": "sha512-DhbPFGpxjc6Z3I+uX07Id5ZO2XwYsWOrYjaSeieES78cq+JaJvVe5q/m1uvjIQhXinhIeCFRH6JgXe+mvVMyXw==", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^3.4.0", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sockjs-client": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.1.tgz", - "integrity": "sha512-VnVAb663fosipI/m6pqRXakEOw7nvd7TUgdr3PlR/8V2I95QIdwT8L4nMxhyU8SmDBHYXU1TOElaKOmKLfYzeQ==", - "dependencies": { - "debug": "^3.2.6", - "eventsource": "^1.0.7", - "faye-websocket": "^0.11.3", - "inherits": "^2.0.4", - "json3": "^3.3.3", - "url-parse": "^1.5.1" - } - }, - "node_modules/sockjs-client/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/sockjs-client/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/sort-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", - "dependencies": { - "is-plain-obj": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/source-list-map": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz", - "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==" - }, - "node_modules/source-map": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", - "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/source-map-resolve": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", - "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", - "dependencies": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0", - "resolve-url": "^0.2.1", - "source-map-url": "^0.4.0", - "urix": "^0.1.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/source-map-support/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-url": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz", - "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==" - }, - "node_modules/spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/spdy-transport/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/spdy-transport/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/spdy-transport/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/spdy/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/spdy/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/split-string": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", - "dependencies": { - "extend-shallow": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/split-string/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/split-string/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "node_modules/sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ssri": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.2.tgz", - "integrity": "sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==", - "dependencies": { - "figgy-pudding": "^3.5.1" - } - }, - "node_modules/stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" - }, - "node_modules/stack-utils": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.5.tgz", - "integrity": "sha512-KZiTzuV3CnSnSvgMRrARVCj+Ht7rMbauGDK0LdVFRGyenwdylpajAp4Q0i6SX8rEmbTpMMf6ryq2gb8pPq2WgQ==", - "dependencies": { - "escape-string-regexp": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stack-utils/node_modules/escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/static-extend": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", - "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", - "dependencies": { - "define-property": "^0.2.5", - "object-copy": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/static-extend/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/std-env": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.0.tgz", - "integrity": "sha512-4qT5B45+Kjef2Z6pE0BkskzsH0GO7GrND0wGlTM1ioUe3v0dGYx9ZJH0Aro/YyA8fqQ5EyIKDRjZojJYMFTflw==", - "dependencies": { - "ci-info": "^3.0.0" - } - }, - "node_modules/stream-browserify": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", - "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", - "dependencies": { - "inherits": "~2.0.1", - "readable-stream": "^2.0.2" - } - }, - "node_modules/stream-each": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz", - "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==", - "dependencies": { - "end-of-stream": "^1.1.0", - "stream-shift": "^1.0.0" - } - }, - "node_modules/stream-http": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", - "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", - "dependencies": { - "builtin-status-codes": "^3.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.3.6", - "to-arraybuffer": "^1.0.0", - "xtend": "^4.0.0" - } - }, - "node_modules/stream-shift": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", - "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" - }, - "node_modules/strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/string.prototype.trimend": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", - "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/string.prototype.trimstart": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", - "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stylehacks": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz", - "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==", - "dependencies": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/stylehacks/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stylus": { - "version": "0.54.8", - "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz", - "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==", - "dependencies": { - "css-parse": "~2.0.0", - "debug": "~3.1.0", - "glob": "^7.1.6", - "mkdirp": "~1.0.4", - "safer-buffer": "^2.1.2", - "sax": "~1.2.4", - "semver": "^6.3.0", - "source-map": "^0.7.3" - }, - "bin": { - "stylus": "bin/stylus" - }, - "engines": { - "node": "*" - } - }, - "node_modules/stylus-loader": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/stylus-loader/-/stylus-loader-3.0.2.tgz", - "integrity": "sha512-+VomPdZ6a0razP+zinir61yZgpw2NfljeSsdUF5kJuEzlo3khXhY19Fn6l8QQz1GRJGtMCo8nG5C04ePyV7SUA==", - "dependencies": { - "loader-utils": "^1.0.2", - "lodash.clonedeep": "^4.5.0", - "when": "~3.6.x" - } - }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/svg-tags": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", - "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=" - }, - "node_modules/svgo": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "dependencies": { - "chalk": "^2.4.1", - "coa": "^2.0.2", - "css-select": "^2.0.0", - "css-select-base-adapter": "^0.1.1", - "css-tree": "1.0.0-alpha.37", - "csso": "^4.0.2", - "js-yaml": "^3.13.1", - "mkdirp": "~0.5.1", - "object.values": "^1.1.0", - "sax": "~1.2.4", - "stable": "^0.1.8", - "unquote": "~1.1.1", - "util.promisify": "~1.0.0" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/svgo/node_modules/css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/svgo/node_modules/css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/svgo/node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/svgo/node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/svgo/node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "node_modules/svgo/node_modules/domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/svgo/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/svgo/node_modules/nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/term-size": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz", - "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", - "dependencies": { - "commander": "^2.20.0", - "source-map": "~0.6.1", - "source-map-support": "~0.5.12" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "1.4.5", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz", - "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==", - "dependencies": { - "cacache": "^12.0.2", - "find-cache-dir": "^2.1.0", - "is-wsl": "^1.1.0", - "schema-utils": "^1.0.0", - "serialize-javascript": "^4.0.0", - "source-map": "^0.6.1", - "terser": "^4.1.2", - "webpack-sources": "^1.4.0", - "worker-farm": "^1.7.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/terser-webpack-plugin/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/terser-webpack-plugin/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/terser-webpack-plugin/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/terser-webpack-plugin/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, - "node_modules/terser/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" - }, - "node_modules/through2": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", - "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", - "dependencies": { - "readable-stream": "~2.3.6", - "xtend": "~4.0.1" - } - }, - "node_modules/thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" - }, - "node_modules/timers-browserify": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", - "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", - "dependencies": { - "setimmediate": "^1.0.4" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/timsort": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=" - }, - "node_modules/tiny-cookie": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", - "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" - }, - "node_modules/tiny-emitter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", - "optional": true - }, - "node_modules/to-arraybuffer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", - "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=" - }, - "node_modules/to-factory": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-factory/-/to-factory-1.0.0.tgz", - "integrity": "sha1-hzivi9lxIK0dQEeXKtpVY7+UebE=" - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/to-object-path": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", - "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-object-path/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "engines": { - "node": ">=6" - } - }, - "node_modules/to-regex": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", - "dependencies": { - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "regex-not": "^1.0.2", - "safe-regex": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex-range": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", - "dependencies": { - "is-number": "^3.0.0", - "repeat-string": "^1.6.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/token-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-1.0.0.tgz", - "integrity": "sha1-zCAOqyYT9BZtJ/+a/HylbUnfbrQ=" - }, - "node_modules/toml": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz", - "integrity": "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==" - }, - "node_modules/toposort": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/toposort/-/toposort-1.0.7.tgz", - "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk=" - }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tr46": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", - "integrity": "sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/tty-browserify": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", - "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=" - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "engines": { - "node": ">=10" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" - }, - "node_modules/uglify-js": { - "version": "3.4.10", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.4.10.tgz", - "integrity": "sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw==", - "dependencies": { - "commander": "~2.19.0", - "source-map": "~0.6.1" - }, - "bin": { - "uglifyjs": "bin/uglifyjs" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/uglify-js/node_modules/commander": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.19.0.tgz", - "integrity": "sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg==" - }, - "node_modules/uglify-js/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unbox-primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", - "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", - "dependencies": { - "function-bind": "^1.1.1", - "has-bigints": "^1.0.1", - "has-symbols": "^1.0.2", - "which-boxed-primitive": "^1.0.2" - } - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz", - "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz", - "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^1.0.4", - "unicode-property-aliases-ecmascript": "^1.0.4" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz", - "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz", - "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==", - "engines": { - "node": ">=4" - } - }, - "node_modules/union-value": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", - "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", - "dependencies": { - "arr-union": "^3.1.0", - "get-value": "^2.0.6", - "is-extendable": "^0.1.1", - "set-value": "^2.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" - }, - "node_modules/uniqs": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", - "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI=" - }, - "node_modules/unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "dependencies": { - "unique-slug": "^2.0.0" - } - }, - "node_modules/unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "dependencies": { - "imurmurhash": "^0.1.4" - } - }, - "node_modules/unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/unquote": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", - "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=" - }, - "node_modules/unset-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", - "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", - "dependencies": { - "has-value": "^0.3.1", - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-value": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", - "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", - "dependencies": { - "get-value": "^2.0.3", - "has-values": "^0.1.4", - "isobject": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-value/node_modules/isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dependencies": { - "isarray": "1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-values": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/upath": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", - "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", - "engines": { - "node": ">=4", - "yarn": "*" - } - }, - "node_modules/update-notifier": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz", - "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==", - "dependencies": { - "boxen": "^4.2.0", - "chalk": "^3.0.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.3.1", - "is-npm": "^4.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.0.0", - "pupa": "^2.0.1", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/update-notifier/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/update-notifier/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/upper-case": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-1.1.3.tgz", - "integrity": "sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg=" - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/urix": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=" - }, - "node_modules/url": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", - "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", - "dependencies": { - "punycode": "1.3.2", - "querystring": "0.2.0" - } - }, - "node_modules/url-loader": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-1.1.2.tgz", - "integrity": "sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg==", - "dependencies": { - "loader-utils": "^1.1.0", - "mime": "^2.0.3", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/url-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/url/node_modules/punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" - }, - "node_modules/url/node_modules/querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/use": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/util": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz", - "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==", - "dependencies": { - "inherits": "2.0.3" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "node_modules/util.promisify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz", - "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==", - "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.2", - "has-symbols": "^1.0.1", - "object.getownpropertydescriptors": "^2.1.0" - } - }, - "node_modules/util/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/utila": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw=" - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "bin": { - "uuid": "bin/uuid" - } - }, - "node_modules/v-runtime-template": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/v-runtime-template/-/v-runtime-template-1.10.0.tgz", - "integrity": "sha512-WLlq9jUepSfUrMEenw3mn7FDXX6hhbl11JjC1OKhwLzifHzVrY5a696TUHDPyj9jke3GGnR7b+2T3od/RL5cww==" - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vendors": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz", - "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==" - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/vm-browserify": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", - "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" - }, - "node_modules/void-elements": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", - "integrity": "sha1-YU9/v42AHwu18GYfWy9XhXUOTwk=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz", - "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg==" - }, - "node_modules/vue-hot-reload-api": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz", - "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" - }, - "node_modules/vue-loader": { - "version": "15.9.6", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.6.tgz", - "integrity": "sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==", - "dependencies": { - "@vue/component-compiler-utils": "^3.1.0", - "hash-sum": "^1.0.2", - "loader-utils": "^1.1.0", - "vue-hot-reload-api": "^2.3.0", - "vue-style-loader": "^4.1.0" - } - }, - "node_modules/vue-router": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.1.tgz", - "integrity": "sha512-RRQNLT8Mzr8z7eL4p7BtKvRaTSGdCbTy2+Mm5HTJvLGYSSeG9gDzNasJPP/yOYKLy+/cLG/ftrqq5fvkFwBJEw==" - }, - "node_modules/vue-server-renderer": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz", - "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==", - "dependencies": { - "chalk": "^1.1.3", - "hash-sum": "^1.0.2", - "he": "^1.1.0", - "lodash.template": "^4.5.0", - "lodash.uniq": "^4.5.0", - "resolve": "^1.2.0", - "serialize-javascript": "^3.1.0", - "source-map": "0.5.6" - } - }, - "node_modules/vue-server-renderer/node_modules/ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dependencies": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/serialize-javascript": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz", - "integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/vue-server-renderer/node_modules/source-map": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz", - "integrity": "sha1-dc449SvwczxafwwRjYEzSiu19BI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/vue-style-loader": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.3.tgz", - "integrity": "sha512-sFuh0xfbtpRlKfm39ss/ikqs9AbKCoXZBpHeVZ8Tx650o0k0q/YCM7FRvigtxpACezfq6af+a7JeqVTWvncqDg==", - "dependencies": { - "hash-sum": "^1.0.2", - "loader-utils": "^1.0.2" - } - }, - "node_modules/vue-template-compiler": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz", - "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==", - "dependencies": { - "de-indent": "^1.0.2", - "he": "^1.1.0" - } - }, - "node_modules/vue-template-es2015-compiler": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz", - "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" - }, - "node_modules/vuepress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.8.2.tgz", - "integrity": "sha512-BU1lUDwsA3ghf7a9ga4dsf0iTc++Z/l7BR1kUagHWVBHw7HNRgRDfAZBDDQXhllMILVToIxaTifpne9mSi94OA==", - "hasInstallScript": true, - "dependencies": { - "@vuepress/core": "1.8.2", - "@vuepress/theme-default": "1.8.2", - "cac": "^6.5.6", - "envinfo": "^7.2.0", - "opencollective-postinstall": "^2.0.2", - "update-notifier": "^4.0.0" - }, - "bin": { - "vuepress": "cli.js" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/vuepress-html-webpack-plugin": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/vuepress-html-webpack-plugin/-/vuepress-html-webpack-plugin-3.2.0.tgz", - "integrity": "sha512-BebAEl1BmWlro3+VyDhIOCY6Gef2MCBllEVAP3NUAtMguiyOwo/dClbwJ167WYmcxHJKLl7b0Chr9H7fpn1d0A==", - "dependencies": { - "html-minifier": "^3.2.3", - "loader-utils": "^0.2.16", - "lodash": "^4.17.3", - "pretty-error": "^2.0.2", - "tapable": "^1.0.0", - "toposort": "^1.0.0", - "util.promisify": "1.0.0" - }, - "engines": { - "node": ">=6.9" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/big.js": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", - "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==", - "engines": { - "node": "*" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/emojis-list": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/loader-utils": { - "version": "0.2.17", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz", - "integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=", - "dependencies": { - "big.js": "^3.1.3", - "emojis-list": "^2.0.0", - "json5": "^0.5.0", - "object-assign": "^4.0.1" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/util.promisify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", - "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==", - "dependencies": { - "define-properties": "^1.1.2", - "object.getownpropertydescriptors": "^2.0.3" - } - }, - "node_modules/vuepress-plugin-container": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.5.tgz", - "integrity": "sha512-TQrDX/v+WHOihj3jpilVnjXu9RcTm6m8tzljNJwYhxnJUW0WWQ0hFLcDTqTBwgKIFdEiSxVOmYE+bJX/sq46MA==", - "dependencies": { - "@vuepress/shared-utils": "^1.2.0", - "markdown-it-container": "^2.0.0" - } - }, - "node_modules/vuepress-plugin-google-tag-manager": { - "version": "0.0.5", - "resolved": "https://registry.npmjs.org/vuepress-plugin-google-tag-manager/-/vuepress-plugin-google-tag-manager-0.0.5.tgz", - "integrity": "sha512-Hm1GNDdNmc4Vs9c3OMfTtHicB/oZWNCmzMFPdlOObVN1OjizIjImdm+LZIwiVKVndT2TQ4BPhMx7HQkovmD2Lg==" - }, - "node_modules/vuepress-plugin-sitemap": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/vuepress-plugin-sitemap/-/vuepress-plugin-sitemap-2.3.1.tgz", - "integrity": "sha512-n+8lbukhrKrsI9H/EX0EBgkE1pn85LAQFvQ5dIvrZP4Kz6JxPOPPNTQmZMhahQV1tXbLZQCEN7A1WZH4x+arJQ==", - "dependencies": { - "sitemap": "^3.0.0" - }, - "bin": { - "vuepress-sitemap": "cli.js" - } - }, - "node_modules/vuepress-plugin-smooth-scroll": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/vuepress-plugin-smooth-scroll/-/vuepress-plugin-smooth-scroll-0.0.3.tgz", - "integrity": "sha512-qsQkDftLVFLe8BiviIHaLV0Ea38YLZKKonDGsNQy1IE0wllFpFIEldWD8frWZtDFdx6b/O3KDMgVQ0qp5NjJCg==", - "dependencies": { - "smoothscroll-polyfill": "^0.4.3" - } - }, - "node_modules/vuepress-theme-cosmos": { - "version": "1.0.182", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.182.tgz", - "integrity": "sha512-Mc1ZOsSqLGgbB9xEXsx5QkHUBkKXOoDgkjrp5iX+fwmM4TCmR4MWbTlKpEzfzsxZ1DuixtwVkv0MT+eNvD2Lfw==", - "dependencies": { - "@cosmos-ui/vue": "^0.35.0", - "@vuepress/plugin-google-analytics": "1.7.1", - "algoliasearch": "^4.2.0", - "axios": "^0.21.0", - "cheerio": "^1.0.0-rc.3", - "clipboard-copy": "^3.1.0", - "entities": "2.1.0", - "esm": "^3.2.25", - "gray-matter": "^4.0.2", - "hotkeys-js": "3.8.1", - "jsonp": "^0.2.1", - "markdown-it": "^12.0.0", - "markdown-it-attrs": "^3.0.3", - "prismjs": "^1.22.0", - "pug": "^3.0.1", - "pug-plain-loader": "^1.0.0", - "stylus": "^0.54.8", - "stylus-loader": "^3.0.2", - "tiny-cookie": "^2.3.2", - "v-runtime-template": "^1.10.0", - "vuepress": "^1.5.4", - "vuepress-plugin-google-tag-manager": "0.0.5", - "vuepress-plugin-sitemap": "^2.3.1" - } - }, - "node_modules/watchpack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.2.0.tgz", - "integrity": "sha512-up4YAn/XHgZHIxFBVCdlMiWDj6WaLKpwVeGQk2I5thdYxF/KmF0aaz6TfJZ/hfl1h/XlcDr7k1KH7ThDagpFaA==", - "dev": true, - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/watchpack-chokidar2": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz", - "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==", - "optional": true, - "dependencies": { - "chokidar": "^2.1.8" - } - }, - "node_modules/watchpack/node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", - "dev": true - }, - "node_modules/wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/webidl-conversions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", - "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" - }, - "node_modules/webpack": { - "version": "4.46.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.46.0.tgz", - "integrity": "sha512-6jJuJjg8znb/xRItk7bkT0+Q7AHCYjjFnvKIWQPkNIOyRqoCGvkOs0ipeQzrqz4l5FtN5ZI/ukEHroeX/o1/5Q==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-module-context": "1.9.0", - "@webassemblyjs/wasm-edit": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0", - "acorn": "^6.4.1", - "ajv": "^6.10.2", - "ajv-keywords": "^3.4.1", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^4.5.0", - "eslint-scope": "^4.0.3", - "json-parse-better-errors": "^1.0.2", - "loader-runner": "^2.4.0", - "loader-utils": "^1.2.3", - "memory-fs": "^0.4.1", - "micromatch": "^3.1.10", - "mkdirp": "^0.5.3", - "neo-async": "^2.6.1", - "node-libs-browser": "^2.2.1", - "schema-utils": "^1.0.0", - "tapable": "^1.1.3", - "terser-webpack-plugin": "^1.4.3", - "watchpack": "^1.7.4", - "webpack-sources": "^1.4.1" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/webpack-chain": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.5.1.tgz", - "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==", - "dependencies": { - "deepmerge": "^1.5.2", - "javascript-stringify": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack-chain/node_modules/javascript-stringify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.0.1.tgz", - "integrity": "sha512-yV+gqbd5vaOYjqlbk16EG89xB5udgjqQF3C5FAORDg4f/IS1Yc5ERCv5e/57yBcfJYw05V5JyIXabhwb75Xxow==" - }, - "node_modules/webpack-dev-middleware": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.7.3.tgz", - "integrity": "sha512-djelc/zGiz9nZj/U7PTBi2ViorGJXEWo/3ltkPbDyxCXhhEXkW0ce99falaok4TPj+AsxLiXJR0EBOb0zh9fKQ==", - "dependencies": { - "memory-fs": "^0.4.1", - "mime": "^2.4.4", - "mkdirp": "^0.5.1", - "range-parser": "^1.2.1", - "webpack-log": "^2.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/webpack-dev-server": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.2.tgz", - "integrity": "sha512-A80BkuHRQfCiNtGBS1EMf2ChTUs0x+B3wGDFmOeT4rmJOHhHTCH2naNxIHhmkr0/UillP4U3yeIyv1pNp+QDLQ==", - "dependencies": { - "ansi-html": "0.0.7", - "bonjour": "^3.5.0", - "chokidar": "^2.1.8", - "compression": "^1.7.4", - "connect-history-api-fallback": "^1.6.0", - "debug": "^4.1.1", - "del": "^4.1.1", - "express": "^4.17.1", - "html-entities": "^1.3.1", - "http-proxy-middleware": "0.19.1", - "import-local": "^2.0.0", - "internal-ip": "^4.3.0", - "ip": "^1.1.5", - "is-absolute-url": "^3.0.3", - "killable": "^1.0.1", - "loglevel": "^1.6.8", - "opn": "^5.5.0", - "p-retry": "^3.0.1", - "portfinder": "^1.0.26", - "schema-utils": "^1.0.0", - "selfsigned": "^1.10.8", - "semver": "^6.3.0", - "serve-index": "^1.9.1", - "sockjs": "^0.3.21", - "sockjs-client": "^1.5.0", - "spdy": "^4.0.2", - "strip-ansi": "^3.0.1", - "supports-color": "^6.1.0", - "url": "^0.11.0", - "webpack-dev-middleware": "^3.7.2", - "webpack-log": "^2.0.0", - "ws": "^6.2.1", - "yargs": "^13.3.2" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 6.11.5" - } - }, - "node_modules/webpack-dev-server/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/webpack-dev-server/node_modules/is-absolute-url": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", - "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack-dev-server/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/webpack-dev-server/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/webpack-dev-server/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/webpack-log": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/webpack-log/-/webpack-log-2.0.0.tgz", - "integrity": "sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg==", - "dependencies": { - "ansi-colors": "^3.0.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack-merge": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-4.2.2.tgz", - "integrity": "sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g==", - "dependencies": { - "lodash": "^4.17.15" - } - }, - "node_modules/webpack-sources": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz", - "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==", - "dependencies": { - "source-list-map": "^2.0.0", - "source-map": "~0.6.1" - } - }, - "node_modules/webpack-sources/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/webpack/node_modules/acorn": { - "version": "6.4.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", - "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/webpack/node_modules/anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "optional": true, - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/webpack/node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "optional": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "optional": true, - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", - "optional": true, - "dependencies": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "fsevents": "~2.3.1", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" - }, - "engines": { - "node": ">= 8.10.0" - } - }, - "node_modules/webpack/node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "optional": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/webpack/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "optional": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack/node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "optional": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "optional": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/webpack/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/webpack/node_modules/readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", - "optional": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/webpack/node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "optional": true, - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/webpack/node_modules/watchpack": { - "version": "1.7.5", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz", - "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==", - "dependencies": { - "chokidar": "^3.4.1", - "graceful-fs": "^4.1.2", - "neo-async": "^2.5.0", - "watchpack-chokidar2": "^2.0.1" - }, - "optionalDependencies": { - "chokidar": "^3.4.1", - "watchpack-chokidar2": "^2.0.1" - } - }, - "node_modules/webpackbar": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-3.2.0.tgz", - "integrity": "sha512-PC4o+1c8gWWileUfwabe0gqptlXUDJd5E0zbpr2xHP1VSOVlZVPBZ8j6NCR8zM5zbKdxPhctHXahgpNK1qFDPw==", - "dependencies": { - "ansi-escapes": "^4.1.0", - "chalk": "^2.4.1", - "consola": "^2.6.0", - "figures": "^3.0.0", - "pretty-time": "^1.1.0", - "std-env": "^2.2.1", - "text-table": "^0.2.0", - "wrap-ansi": "^5.1.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/whatwg-url": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", - "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", - "dependencies": { - "lodash.sortby": "^4.7.0", - "tr46": "^1.0.1", - "webidl-conversions": "^4.0.2" - } - }, - "node_modules/when": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/when/-/when-3.6.4.tgz", - "integrity": "sha1-RztRfsFZ4rhQBUl6E5g/CVQS404=" - }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dependencies": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "node_modules/which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" - }, - "node_modules/widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "dependencies": { - "string-width": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/widest-line/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/with": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/with/-/with-7.0.2.tgz", - "integrity": "sha512-RNGKj82nUPg3g5ygxkQl0R937xLyho1J24ItRCBTr/m1YnZkzJy1hUiHUJrc/VlsDQzsCnInEGSg3bci0Lmd4w==", - "dependencies": { - "@babel/parser": "^7.9.6", - "@babel/types": "^7.9.6", - "assert-never": "^1.2.1", - "babel-walk": "3.0.0-canary-5" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/worker-farm": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz", - "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==", - "dependencies": { - "errno": "~0.1.7" - } - }, - "node_modules/wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dependencies": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", - "dependencies": { - "async-limiter": "~1.0.0" - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/xmlbuilder": { - "version": "13.0.2", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz", - "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "engines": { - "node": ">=0.4" - } - }, - "node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" - }, - "node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "node_modules/yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dependencies": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - } - }, - "node_modules/yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dependencies": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - }, - "node_modules/yargs-parser/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/zepto": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/zepto/-/zepto-1.2.0.tgz", - "integrity": "sha1-4Se9nmb9hGvl6rSME5SIL3wOT5g=" - } - }, "dependencies": { "@algolia/cache-browser-local-storage": { "version": "4.8.6", @@ -21477,14 +8944,6 @@ "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - }, "string-width": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", @@ -21528,6 +8987,14 @@ "define-properties": "^1.1.3" } }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, "strip-ansi": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", diff --git a/docs/roadmap/README.md b/docs/roadmap/README.md new file mode 100644 index 000000000..bd9280c45 --- /dev/null +++ b/docs/roadmap/README.md @@ -0,0 +1,6 @@ +--- +order: false +parent: + title: Roadmap + order: 7 +--- \ No newline at end of file diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md index e6e5a32fe..19d9d89fb 100644 --- a/docs/roadmap/roadmap.md +++ b/docs/roadmap/roadmap.md @@ -1,8 +1,5 @@ --- -order: false -parent: - title: Roadmap - order: 7 +order: 1 --- # Tendermint Roadmap diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index c4c54a514..0de5ed908 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -5,18 +5,20 @@ parent: order: 5 --- +# Overview + This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) -- [Block Sync](./block-sync.md) -- [State Sync](./state-sync.md) -- [Mempool](./mempool.md) +- [Block Sync](./block-sync/README.md) +- [State Sync](./state-sync/README.md) +- [Mempool](./mempool/README.md) - [Light Client](./light-client.md) - [Consensus](./consensus/README.md) -- [Pex](./pex/README.md) +- [Peer Exachange (PEX)](./pex/README.md) - [Evidence](./evidence/README.md) For full specifications refer to the [spec repo](https://github.com/tendermint/spec). diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md index 78e63ee22..bd7def551 100644 --- a/docs/tendermint-core/consensus/README.md +++ b/docs/tendermint-core/consensus/README.md @@ -5,6 +5,7 @@ parent: order: 6 --- +# Consensus Tendermint Consensus is a distributed protocol executed by validator processes to agree on the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where diff --git a/docs/tools/remote-signer-validation.md b/docs/tools/remote-signer-validation.md index 80a6a64bc..ab5500860 100644 --- a/docs/tools/remote-signer-validation.md +++ b/docs/tools/remote-signer-validation.md @@ -1,4 +1,4 @@ -# tm-signer-harness +# Remote Signer Located under the `tools/tm-signer-harness` folder in the [Tendermint repository](https://github.com/tendermint/tendermint). From 4725b00af1fa0adad5eb90e9560dd13914b68979 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Oct 2021 08:54:53 +0000 Subject: [PATCH 122/174] build(deps): Bump prismjs from 1.23.0 to 1.25.0 in /docs (#7168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [prismjs](https://github.com/PrismJS/prism) from 1.23.0 to 1.25.0.

Release notes

Sourced from prismjs's releases.

v1.25.0

Release 1.25.0

v1.24.1

Release 1.24.1

v1.24.0

Release 1.24.0

Changelog

Sourced from prismjs's changelog.

1.25.0 (2021-09-16)

New components

Updated components

... (truncated)

Commits
Maintainer changes

This version was pushed to npm by rundevelopment, a new releaser for prismjs since your current version.


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=prismjs&package-manager=npm_and_yarn&previous-version=1.23.0&new-version=1.25.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/tendermint/tendermint/network/alerts).
--- docs/package-lock.json | 47 +++--------------------------------------- 1 file changed, 3 insertions(+), 44 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 37938ad99..c5cf96e86 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -2959,17 +2959,6 @@ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==" }, - "clipboard": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz", - "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==", - "optional": true, - "requires": { - "good-listener": "^1.2.2", - "select": "^1.1.2", - "tiny-emitter": "^2.0.0" - } - }, "clipboard-copy": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", @@ -3875,12 +3864,6 @@ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" }, - "delegate": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz", - "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==", - "optional": true - }, "depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", @@ -4863,15 +4846,6 @@ "slash": "^2.0.0" } }, - "good-listener": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=", - "optional": true, - "requires": { - "delegate": "^3.1.2" - } - }, "got": { "version": "9.6.0", "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", @@ -7544,12 +7518,9 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.23.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz", - "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==", - "requires": { - "clipboard": "^2.0.0" - } + "version": "1.25.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.25.0.tgz", + "integrity": "sha512-WCjJHl1KEWbnkQom1+SzftbtXMKQoezOCYs5rECqMN+jP+apI7ftoflyqigqzopSO3hMhTEb0mFClA8lkolgEg==" }, "process": { "version": "0.11.10", @@ -8308,12 +8279,6 @@ "kind-of": "^6.0.0" } }, - "select": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz", - "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=", - "optional": true - }, "select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", @@ -9331,12 +9296,6 @@ "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" }, - "tiny-emitter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", - "optional": true - }, "to-arraybuffer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", From 2416ff3be9276c97cc1542881803ae05e056a56b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Oct 2021 08:56:03 +0000 Subject: [PATCH 123/174] build(deps): Bump postcss from 7.0.35 to 7.0.39 in /docs (#7167) Bumps [postcss](https://github.com/postcss/postcss) from 7.0.35 to 7.0.39.
Release notes

Sourced from postcss's releases.

7.0.39

  • Reduce package size.
  • Backport nanocolors to picocolors migration.

7.0.38

  • Update Processor#version.

7.0.37

  • Backport chalk to nanocolors migration.

7.0.36

  • Backport ReDoS vulnerabilities from PostCSS 8.
Changelog

Sourced from postcss's changelog.

7.0.39

  • Reduce package size.
  • Backport nanocolors to picocolors migration.

7.0.38

  • Update Processor#version.

7.0.37

  • Backport chalk to nanocolors migration.

7.0.36

  • Backport ReDoS vulnerabilities from PostCSS 8.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=postcss&package-manager=npm_and_yarn&previous-version=7.0.35&new-version=7.0.39)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/tendermint/tendermint/network/alerts).
--- docs/package-lock.json | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index c5cf96e86..e8589c2f1 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6877,6 +6877,11 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + }, "picomatch": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", @@ -6948,27 +6953,18 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" + "picocolors": "^0.2.1", + "source-map": "^0.6.1" }, "dependencies": { "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } } } }, From ab237091bbacd7eef2ac327c0acf7fd74c3645d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Oct 2021 08:57:58 +0000 Subject: [PATCH 124/174] build(deps): Bump ws from 6.2.1 to 6.2.2 in /docs (#7165) Bumps [ws](https://github.com/websockets/ws) from 6.2.1 to 6.2.2.
Release notes

Sourced from ws's releases.

6.2.2

Bug fixes

  • Backported 00c425ec to the 6.x release line (78c676d2).
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ws&package-manager=npm_and_yarn&previous-version=6.2.1&new-version=6.2.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/tendermint/tendermint/network/alerts).
--- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index e8589c2f1..9bc09f322 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10595,9 +10595,9 @@ } }, "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", "requires": { "async-limiter": "~1.0.0" } From 0dd597089487ce6d9c171c51a36c7ff347df6707 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Oct 2021 08:59:48 +0000 Subject: [PATCH 125/174] build(deps): Bump path-parse from 1.0.6 to 1.0.7 in /docs (#7164) Bumps [path-parse](https://github.com/jbgutierrez/path-parse) from 1.0.6 to 1.0.7.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=path-parse&package-manager=npm_and_yarn&previous-version=1.0.6&new-version=1.0.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/tendermint/tendermint/network/alerts).
--- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 9bc09f322..f8f68c88e 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6836,9 +6836,9 @@ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "path-to-regexp": { "version": "0.1.7", From e5d019d51b940cdb6c9b0df97f347cd094bc04cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Oct 2021 09:01:14 +0000 Subject: [PATCH 126/174] build(deps): Bump url-parse from 1.5.1 to 1.5.3 in /docs (#7166) Bumps [url-parse](https://github.com/unshiftio/url-parse) from 1.5.1 to 1.5.3.
Commits
  • ad44493 [dist] 1.5.3
  • c798461 [fix] Fix host parsing for file URLs (#210)
  • 201034b [dist] 1.5.2
  • 2d9ac2c [fix] Sanitize only special URLs (#209)
  • fb128af [fix] Use 'null' as origin for non special URLs
  • fed6d9e [fix] Add a leading slash only if the URL is special
  • 94872e7 [fix] Do not incorrectly set the slashes property to true
  • 81ab967 [fix] Ignore slashes after the protocol for special URLs
  • ee22050 [ci] Use GitHub Actions
  • d2979b5 [fix] Special case the file: protocol (#204)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=url-parse&package-manager=npm_and_yarn&previous-version=1.5.1&new-version=1.5.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/tendermint/tendermint/network/alerts).
--- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index f8f68c88e..c6f21bd6b 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -9740,9 +9740,9 @@ } }, "url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.3.tgz", + "integrity": "sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" From 93eb940dcdc37e959f054479e9741c190bd4ca6c Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 27 Oct 2021 14:46:18 +0200 Subject: [PATCH 127/174] config: WriteConfigFile should return error (#7169) --- CHANGELOG_PENDING.md | 1 + cmd/tendermint/commands/init.go | 4 +- cmd/tendermint/commands/testnet.go | 4 +- config/toml.go | 45 +++++--- config/toml_test.go | 9 +- internal/blocksync/reactor_test.go | 13 ++- internal/consensus/byzantine_test.go | 4 +- internal/consensus/common_test.go | 60 ++++++---- internal/consensus/mempool_test.go | 9 +- internal/consensus/reactor_test.go | 3 +- internal/consensus/replay_test.go | 15 ++- internal/consensus/state_test.go | 105 ++++++++++++------ .../consensus/types/height_vote_set_test.go | 6 +- internal/consensus/wal_generator.go | 3 +- internal/inspect/inspect_test.go | 7 +- internal/mempool/v0/bench_test.go | 14 ++- internal/mempool/v0/cache_test.go | 3 +- internal/mempool/v0/clist_mempool_test.go | 50 ++++++--- internal/mempool/v0/reactor_test.go | 3 +- internal/mempool/v1/mempool_test.go | 3 +- internal/mempool/v1/reactor_test.go | 7 +- internal/state/state_test.go | 4 +- internal/state/store_test.go | 4 +- internal/store/store_test.go | 14 ++- light/example_test.go | 6 +- light/light_test.go | 6 +- light/provider/http/http_test.go | 3 +- node/node_test.go | 45 +++++--- rpc/client/examples_test.go | 6 +- rpc/client/main_test.go | 3 +- rpc/test/helpers.go | 9 +- test/e2e/runner/setup.go | 4 +- 32 files changed, 318 insertions(+), 154 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 98636a852..523c8480b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -27,6 +27,7 @@ Special thanks to external contributors on this release: - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) + - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) - Blockchain Protocol diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index bc94f763b..02e400a0a 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -121,7 +121,9 @@ func initFilesWithConfig(config *cfg.Config) error { } // write config file - cfg.WriteConfigFile(config.RootDir, config) + if err := cfg.WriteConfigFile(config.RootDir, config); err != nil { + return err + } logger.Info("Generated config", "mode", config.Mode) return nil diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index ef46f5428..95955dd9b 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -239,7 +239,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } config.Moniker = moniker(i) - cfg.WriteConfigFile(nodeDir, config) + if err := cfg.WriteConfigFile(nodeDir, config); err != nil { + return err + } } fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) diff --git a/config/toml.go b/config/toml.go index dc462243a..94c1907e8 100644 --- a/config/toml.go +++ b/config/toml.go @@ -45,23 +45,24 @@ func EnsureRoot(rootDir string) { // WriteConfigFile renders config using the template and writes it to configFilePath. // This function is called by cmd/tendermint/commands/init.go -func WriteConfigFile(rootDir string, config *Config) { +func WriteConfigFile(rootDir string, config *Config) error { var buffer bytes.Buffer if err := configTemplate.Execute(&buffer, config); err != nil { - panic(err) + return err } configFilePath := filepath.Join(rootDir, defaultConfigFilePath) - mustWriteFile(configFilePath, buffer.Bytes(), 0644) + return writeFile(configFilePath, buffer.Bytes(), 0644) } -func writeDefaultConfigFileIfNone(rootDir string) { +func writeDefaultConfigFileIfNone(rootDir string) error { configFilePath := filepath.Join(rootDir, defaultConfigFilePath) if !tmos.FileExists(configFilePath) { - WriteConfigFile(rootDir, DefaultConfig()) + return WriteConfigFile(rootDir, DefaultConfig()) } + return nil } // Note: any changes to the comments/variables/mapstructure @@ -503,22 +504,22 @@ namespace = "{{ .Instrumentation.Namespace }}" /****** these are for test settings ***********/ -func ResetTestRoot(testName string) *Config { +func ResetTestRoot(testName string) (*Config, error) { return ResetTestRootWithChainID(testName, "") } -func ResetTestRootWithChainID(testName string, chainID string) *Config { +func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) { // create a unique, concurrency-safe test directory under os.TempDir() rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { - panic(err) + return nil, err } // ensure config and data subdirs are created if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { - panic(err) + return nil, err } if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { - panic(err) + return nil, err } conf := DefaultConfig() @@ -527,26 +528,36 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config { privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State) // Write default config file if missing. - writeDefaultConfigFileIfNone(rootDir) + if err := writeDefaultConfigFileIfNone(rootDir); err != nil { + return nil, err + } + if !tmos.FileExists(genesisFilePath) { if chainID == "" { chainID = "tendermint_test" } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - mustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + if err := writeFile(genesisFilePath, []byte(testGenesis), 0644); err != nil { + return nil, err + } } // we always overwrite the priv val - mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + if err := writeFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644); err != nil { + return nil, err + } + if err := writeFile(privStateFilePath, []byte(testPrivValidatorState), 0644); err != nil { + return nil, err + } config := TestConfig().SetRoot(rootDir) - return config + return config, nil } -func mustWriteFile(filePath string, contents []byte, mode os.FileMode) { +func writeFile(filePath string, contents []byte, mode os.FileMode) error { if err := ioutil.WriteFile(filePath, contents, mode); err != nil { - tmos.Exit(fmt.Sprintf("failed to write file: %v", err)) + return fmt.Errorf("failed to write file: %w", err) } + return nil } var testGenesisFmt = `{ diff --git a/config/toml_test.go b/config/toml_test.go index d46cf9ce7..9af6e4f35 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -24,17 +24,17 @@ func TestEnsureRoot(t *testing.T) { // setup temp dir for test tmpDir, err := ioutil.TempDir("", "config-test") - require.Nil(err) + require.NoError(err) defer os.RemoveAll(tmpDir) // create root dir EnsureRoot(tmpDir) - WriteConfigFile(tmpDir, DefaultConfig()) + require.NoError(WriteConfigFile(tmpDir, DefaultConfig())) // make sure config is set properly data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) - require.Nil(err) + require.NoError(err) checkConfig(t, string(data)) @@ -47,7 +47,8 @@ func TestEnsureTestRoot(t *testing.T) { testName := "ensureTestRoot" // create root dir - cfg := ResetTestRoot(testName) + cfg, err := ResetTestRoot(testName) + require.NoError(err) defer os.RemoveAll(cfg.RootDir) rootDir := cfg.RootDir diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 6bca8d4a9..78e61020c 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -182,7 +182,8 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - cfg := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) @@ -217,7 +218,8 @@ func TestReactor_AbruptDisconnect(t *testing.T) { } func TestReactor_SyncTime(t *testing.T) { - cfg := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) @@ -240,7 +242,9 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - cfg := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) @@ -287,7 +291,8 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - cfg := config.ResetTestRoot("block_sync_reactor_test") + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) maxBlockHeight := int64(48) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index a826ef79b..ab8d12205 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -50,7 +50,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) require.NoError(t, stateStore.Save(state)) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + defer os.RemoveAll(thisConfig.RootDir) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 8b54f6026..8799d87d4 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -50,20 +50,26 @@ type cleanupFunc func() func configSetup(t *testing.T) *config.Config { t.Helper() - cfg := ResetConfig("consensus_reactor_test") + cfg, err := ResetConfig("consensus_reactor_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - consensusReplayConfig := ResetConfig("consensus_replay_test") - configStateTest := ResetConfig("consensus_state_test") - configMempoolTest := ResetConfig("consensus_mempool_test") - configByzantineTest := ResetConfig("consensus_byzantine_test") + consensusReplayConfig, err := ResetConfig("consensus_replay_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(consensusReplayConfig.RootDir) }) + + configStateTest, err := ResetConfig("consensus_state_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configStateTest.RootDir) }) + + configMempoolTest, err := ResetConfig("consensus_mempool_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configMempoolTest.RootDir) }) + + configByzantineTest, err := ResetConfig("consensus_byzantine_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) }) - t.Cleanup(func() { - os.RemoveAll(cfg.RootDir) - os.RemoveAll(consensusReplayConfig.RootDir) - os.RemoveAll(configStateTest.RootDir) - os.RemoveAll(configMempoolTest.RootDir) - os.RemoveAll(configByzantineTest.RootDir) - }) return cfg } @@ -73,7 +79,7 @@ func ensureDir(dir string, mode os.FileMode) { } } -func ResetConfig(name string) *config.Config { +func ResetConfig(name string) (*config.Config, error) { return config.ResetTestRoot(name) } @@ -384,9 +390,12 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { //------------------------------------------------------------------------------- // consensus states -func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { - cfg := config.ResetTestRoot("consensus_state_test") - return newStateWithConfig(cfg, state, pv, app) +func newState(state sm.State, pv types.PrivValidator, app abci.Application) (*State, error) { + cfg, err := config.ResetTestRoot("consensus_state_test") + if err != nil { + return nil, err + } + return newStateWithConfig(cfg, state, pv, app), nil } func newStateWithConfig( @@ -454,13 +463,16 @@ func loadPrivValidator(cfg *config.Config) *privval.FilePV { return privValidator } -func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) { +func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub, error) { // Get State state, privVals := randGenesisState(cfg, nValidators, false, 10) vss := make([]*validatorStub, nValidators) - cs := newState(state, privVals[0], kvstore.NewApplication()) + cs, err := newState(state, privVals[0], kvstore.NewApplication()) + if err != nil { + return nil, nil, err + } for i := 0; i < nValidators; i++ { vss[i] = newValidatorStub(privVals[i], int32(i)) @@ -468,7 +480,7 @@ func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) { // since cs1 starts at 1 incrementHeight(vss[1:]...) - return cs, vss + return cs, vss, nil } //------------------------------------------------------------------------------- @@ -722,7 +734,9 @@ func randConsensusState( blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + configRootDirs = append(configRootDirs, thisConfig.RootDir) for _, opt := range configOpts { @@ -772,7 +786,11 @@ func randConsensusNetWithPeers( configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + if err != nil { + panic(err) + } + configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal if i == 0 { diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 4b2037469..558dbd4b3 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -28,7 +28,8 @@ func assertMempool(txn txNotifier) mempool.Mempool { func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { baseConfig := configSetup(t) - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false @@ -50,7 +51,8 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { baseConfig := configSetup(t) - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout @@ -70,7 +72,8 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { func TestMempoolProgressInHigherRound(t *testing.T) { baseConfig := configSetup(t) - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 16fa13969..e17404960 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -334,7 +334,8 @@ func TestReactorWithEvidence(t *testing.T) { stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 0d0ae36e8..f72da903b 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -131,7 +131,8 @@ func TestWALCrash(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - consensusReplayConfig := ResetConfig(tc.name) + consensusReplayConfig, err := ResetConfig(tc.name) + require.NoError(t, err) crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) } @@ -690,7 +691,8 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod cfg := sim.Config if testValidatorsChange { - testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() stateDB = dbm.NewMemDB() @@ -700,7 +702,8 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod commits = sim.Commits store = newMockBlockStore(cfg, genesisState.ConsensusParams) } else { // test single node - testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() walBody, err := WALWithNBlocks(t, numBlocks) require.NoError(t, err) @@ -938,7 +941,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x01 // - 0x02 // - 0x03 - cfg := ResetConfig("handshake_test_") + cfg, err := ResetConfig("handshake_test_") + require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) @@ -1228,7 +1232,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) { app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := abciclient.NewLocalCreator(app) - cfg := ResetConfig("handshake_test_") + cfg, err := ResetConfig("handshake_test_") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index a1db8276d..c6ef8d3e6 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -57,7 +57,9 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh func TestStateProposerSelection0(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) + height, round := cs1.Height, cs1.Round newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -99,7 +101,9 @@ func TestStateProposerSelection0(t *testing.T) { func TestStateProposerSelection2(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) // test needs more work for more than 3 validators + cs1, vss, err := randState(config, 4) // test needs more work for more than 3 validators + require.NoError(t, err) + height := cs1.Height newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -138,7 +142,8 @@ func TestStateProposerSelection2(t *testing.T) { func TestStateEnterProposeNoPrivValidator(t *testing.T) { config := configSetup(t) - cs, _ := randState(config, 1) + cs, _, err := randState(config, 1) + require.NoError(t, err) cs.SetPrivValidator(nil) height, round := cs.Height, cs.Round @@ -159,7 +164,8 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { func TestStateEnterProposeYesPrivValidator(t *testing.T) { config := configSetup(t) - cs, _ := randState(config, 1) + cs, _, err := randState(config, 1) + require.NoError(t, err) height, round := cs.Height, cs.Round // Listen for propose timeout event @@ -191,7 +197,8 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { func TestStateBadProposal(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 2) + cs1, vss, err := randState(config, 2) + require.NoError(t, err) height, round := cs1.Height, cs1.Round vs2 := vss[1] @@ -251,7 +258,8 @@ func TestStateBadProposal(t *testing.T) { func TestStateOversizedBlock(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 2) + cs1, vss, err := randState(config, 2) + require.NoError(t, err) cs1.state.ConsensusParams.Block.MaxBytes = 2000 height, round := cs1.Height, cs1.Round vs2 := vss[1] @@ -315,7 +323,8 @@ func TestStateOversizedBlock(t *testing.T) { func TestStateFullRound1(t *testing.T) { config := configSetup(t) - cs, vss := randState(config, 1) + cs, vss, err := randState(config, 1) + require.NoError(t, err) height, round := cs.Height, cs.Round // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit @@ -357,7 +366,8 @@ func TestStateFullRound1(t *testing.T) { func TestStateFullRoundNil(t *testing.T) { config := configSetup(t) - cs, vss := randState(config, 1) + cs, vss, err := randState(config, 1) + require.NoError(t, err) height, round := cs.Height, cs.Round voteCh := subscribe(cs.eventBus, types.EventQueryVote) @@ -377,7 +387,8 @@ func TestStateFullRoundNil(t *testing.T) { func TestStateFullRound2(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 2) + cs1, vss, err := randState(config, 2) + require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round @@ -419,7 +430,8 @@ func TestStateFullRound2(t *testing.T) { func TestStateLockNoPOL(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 2) + cs1, vss, err := randState(config, 2) + require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round @@ -557,7 +569,8 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - cs2, _ := randState(config, 2) // needed so generated block is different than locked block + cs2, _, err := randState(config, 2) // needed so generated block is different than locked block + require.NoError(t, err) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { @@ -609,7 +622,8 @@ func TestStateLockNoPOL(t *testing.T) { func TestStateLockPOLRelock(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -653,7 +667,9 @@ func TestStateLockPOLRelock(t *testing.T) { signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(cs1.state, vs2, kvstore.NewApplication()) + require.NoError(t, err) + prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") @@ -708,7 +724,8 @@ func TestStateLockPOLRelock(t *testing.T) { func TestStateLockPOLUnlock(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -802,7 +819,8 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -842,7 +860,8 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(cs1.state, vs2, kvstore.NewApplication()) + require.NoError(t, err) prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") @@ -886,7 +905,8 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs3 := newState(cs1.state, vs3, kvstore.NewApplication()) + cs3, err := newState(cs1.state, vs3, kvstore.NewApplication()) + require.NoError(t, err) prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") @@ -930,7 +950,8 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1053,7 +1074,8 @@ func TestStateLockPOLSafety1(t *testing.T) { func TestStateLockPOLSafety2(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1152,7 +1174,8 @@ func TestStateLockPOLSafety2(t *testing.T) { func TestProposeValidBlock(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1244,7 +1267,8 @@ func TestProposeValidBlock(t *testing.T) { func TestSetValidBlockOnDelayedPrevote(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1308,7 +1332,8 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { func TestSetValidBlockOnDelayedProposal(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1366,7 +1391,8 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { func TestWaitingTimeoutOnNilPolka(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1389,7 +1415,8 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1427,7 +1454,8 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1465,7 +1493,8 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1494,7 +1523,8 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1530,7 +1560,8 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { func TestCommitFromPreviousRound(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1586,7 +1617,8 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config := configSetup(t) config.Consensus.SkipTimeoutCommit = false - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} vs2, vs3, vs4 := vss[1], vss[2], vss[3] @@ -1649,7 +1681,8 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { config := configSetup(t) config.Consensus.SkipTimeoutCommit = false - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1792,7 +1825,8 @@ func TestStateSlashingPrecommits(t *testing.T) { func TestStateHalt1(t *testing.T) { config := configSetup(t) - cs1, vss := randState(config, 4) + cs1, vss, err := randState(config, 4) + require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes @@ -1862,7 +1896,8 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { config := configSetup(t) // create dummy peer - cs, _ := randState(config, 1) + cs, _, err := randState(config, 1) + require.NoError(t, err) peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) @@ -1907,7 +1942,8 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { config := configSetup(t) - cs, vss := randState(config, 2) + cs, vss, err := randState(config, 2) + require.NoError(t, err) // create dummy peer peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) @@ -1943,7 +1979,8 @@ func TestStateOutputVoteStats(t *testing.T) { func TestSignSameVoteTwice(t *testing.T) { config := configSetup(t) - _, vss := randState(config, 2) + _, vss, err := randState(config, 2) + require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index b3830a3f6..cafb1ed1a 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -18,7 +18,11 @@ import ( var cfg *config.Config // NOTE: must be reset for each _test.go file func TestMain(m *testing.M) { - cfg = config.ResetTestRoot("consensus_height_vote_set_test") + var err error + cfg, err = config.ResetTestRoot("consensus_height_vote_set_test") + if err != nil { + panic(err) + } code := m.Run() os.RemoveAll(cfg.RootDir) os.Exit(code) diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index f81234f97..8f75d6969 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -156,7 +156,8 @@ func makeAddrs() (p2pAddr, rpcAddr string) { // getConfig returns a config for test cases func getConfig(t *testing.T) *config.Config { - c := config.ResetTestRoot(t.Name()) + c, err := config.ResetTestRoot(t.Name()) + require.NoError(t, err) p2pAddr, rpcAddr := makeAddrs() c.P2P.ListenAddress = p2pAddr diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 972932440..e2808ce85 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -27,7 +27,8 @@ import ( ) func TestInspectConstructor(t *testing.T) { - cfg := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot("test") + require.NoError(t, err) testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() @@ -41,7 +42,9 @@ func TestInspectConstructor(t *testing.T) { } func TestInspectRun(t *testing.T) { - cfg := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot("test") + require.NoError(t, err) + testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go index acfaec283..35558f857 100644 --- a/internal/mempool/v0/bench_test.go +++ b/internal/mempool/v0/bench_test.go @@ -6,6 +6,7 @@ import ( "sync/atomic" "testing" + "github.com/stretchr/testify/require" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/internal/mempool" @@ -14,8 +15,10 @@ import ( func BenchmarkReap(b *testing.B) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(b, err) defer cleanup() + mp.config.Size = 100000 size := 10000 @@ -35,7 +38,8 @@ func BenchmarkReap(b *testing.B) { func BenchmarkCheckTx(b *testing.B) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(b, err) defer cleanup() mp.config.Size = 1000000 @@ -57,7 +61,8 @@ func BenchmarkCheckTx(b *testing.B) { func BenchmarkParallelCheckTx(b *testing.B) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(b, err) defer cleanup() mp.config.Size = 100000000 @@ -82,7 +87,8 @@ func BenchmarkParallelCheckTx(b *testing.B) { func BenchmarkCheckDuplicateTx(b *testing.B) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(b, err) defer cleanup() mp.config.Size = 1000000 diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go index 5bf2c7603..953e82b8d 100644 --- a/internal/mempool/v0/cache_test.go +++ b/internal/mempool/v0/cache_test.go @@ -17,7 +17,8 @@ import ( func TestCacheAfterUpdate(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() // reAddIndices & txsInCache can have elements > numTxsToCreate diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index a5d6ae8da..20d07db83 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -33,8 +33,14 @@ import ( // test. type cleanupFunc func() -func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) { - return newMempoolWithAppAndConfig(cc, config.ResetTestRoot("mempool_test")) +func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc, error) { + conf, err := config.ResetTestRoot("mempool_test") + if err != nil { + return nil, func() {}, err + } + + mp, cu := newMempoolWithAppAndConfig(cc, conf) + return mp, cu, nil } func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) { @@ -95,7 +101,8 @@ func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types. func TestReapMaxBytesMaxGas(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() // Ensure gas calculation behaves as expected @@ -144,7 +151,8 @@ func TestReapMaxBytesMaxGas(t *testing.T) { func TestMempoolFilters(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() emptyTxArr := []types.Tx{[]byte{}} @@ -183,7 +191,8 @@ func TestMempoolFilters(t *testing.T) { func TestMempoolUpdate(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() // 1. Adds valid txs to the cache @@ -230,7 +239,8 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { return mockClient, nil } - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. @@ -249,7 +259,7 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { // Calling update to remove the first transaction from the mempool. // This call also triggers the mempool to recheck its remaining transactions. - err := mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) require.Nil(t, err) // The mempool has now sent its requests off to the client to be rechecked @@ -318,7 +328,8 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { func TestTxsAvailable(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() mp.EnableTxsAvailable() @@ -363,12 +374,13 @@ func TestSerialReap(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) + mp, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() appConnCon, _ := cc() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err := appConnCon.Start() + err = appConnCon.Start() require.Nil(t, err) cacheMap := make(map[string]struct{}) @@ -473,7 +485,8 @@ func TestSerialReap(t *testing.T) { func TestMempool_CheckTxChecksTxSize(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - mempl, cleanup := newMempoolWithApp(cc) + mempl, cleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() maxTxSize := mempl.config.MaxTxBytes @@ -518,7 +531,9 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { func TestMempoolTxsBytes(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - cfg := config.ResetTestRoot("mempool_test") + cfg, err := config.ResetTestRoot("mempool_test") + require.NoError(t, err) + cfg.Mempool.MaxTxsBytes = 10 mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) defer cleanup() @@ -527,7 +542,7 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 0, mp.SizeBytes()) // 2. len(tx) after CheckTx - err := mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) + err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) require.NoError(t, err) assert.EqualValues(t, 1, mp.SizeBytes()) @@ -561,7 +576,8 @@ func TestMempoolTxsBytes(t *testing.T) { // 6. zero after tx is rechecked and removed due to not being valid anymore app2 := kvstore.NewApplication() cc = abciclient.NewLocalCreator(app2) - mp, cleanup = newMempoolWithApp(cc) + mp, cleanup, err = newMempoolWithApp(cc) + require.NoError(t, err) defer cleanup() txBytes := make([]byte, 8) @@ -617,7 +633,9 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { t.Error(err) } }) - cfg := config.ResetTestRoot("mempool_test") + cfg, err := config.ResetTestRoot("mempool_test") + require.NoError(t, err) + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) defer cleanup() @@ -640,7 +658,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { // this will err with ErrTxInCache many times ... mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error } - err := mp.FlushAppConn() + err = mp.FlushAppConn() require.NoError(t, err) } diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 69582284b..14ff58adf 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -58,7 +58,8 @@ func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) rts.kvstores[nodeID] = kvstore.NewApplication() cc := abciclient.NewLocalCreator(rts.kvstores[nodeID]) - mempool, memCleanup := newMempoolWithApp(cc) + mempool, memCleanup, err := newMempoolWithApp(cc) + require.NoError(t, err) t.Cleanup(memCleanup) mempool.SetLogger(rts.logger) rts.mempools[nodeID] = mempool diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index 8bed5520a..72a72861c 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -79,7 +79,8 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { app := &application{kvstore.NewApplication()} cc := abciclient.NewLocalCreator(app) - cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + require.NoError(t, err) cfg.Mempool.CacheSize = cacheSize appConnMem, err := cc() diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index 56e6057a1..2a92a11c5 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -34,10 +34,9 @@ type reactorTestSuite struct { func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() - cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) - t.Cleanup(func() { - os.RemoveAll(cfg.RootDir) - }) + cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), diff --git a/internal/state/state_test.go b/internal/state/state_test.go index 8c0144abd..fdf681294 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -26,7 +26,9 @@ import ( // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - cfg := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot("state_") + require.NoError(t, err) + dbType := dbm.BackendType(cfg.DBBackend) stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(t, err) diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 118350fff..815d1f7a1 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -101,7 +101,9 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - cfg := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot("state_") + require.NoError(b, err) + defer os.RemoveAll(cfg.RootDir) dbType := dbm.BackendType(cfg.DBBackend) stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 03db91fa8..14163f488 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -46,7 +46,11 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { } func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { - cfg := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + if err != nil { + panic(err) + } + blockDB := dbm.NewMemDB() state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { @@ -292,7 +296,9 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - cfg := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) @@ -348,7 +354,9 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - cfg := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) diff --git a/light/example_test.go b/light/example_test.go index 2e0feb5e1..d51acb925 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -22,7 +22,11 @@ import ( func ExampleClient() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + conf, err := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + if err != nil { + stdlog.Fatal(err) + } + logger := log.TestingLogger() // Start a test application diff --git a/light/light_test.go b/light/light_test.go index f5d2ddd89..93cef603b 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -29,7 +29,8 @@ func TestClientIntegration_Update(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // Start a test application app := kvstore.NewApplication() @@ -89,7 +90,8 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // Start a test application app := kvstore.NewApplication() diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index adcb69fb9..1f695d442 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -35,7 +35,8 @@ func TestNewProvider(t *testing.T) { func TestProvider(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := rpctest.CreateConfig(t.Name()) + cfg, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // start a tendermint node in the background to test against app := kvstore.NewApplication() diff --git a/node/node_test.go b/node/node_test.go index 0586cc491..a4af0a9cc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -38,7 +38,8 @@ import ( ) func TestNodeStartStop(t *testing.T) { - cfg := config.ResetTestRoot("node_node_test") + cfg, err := config.ResetTestRoot("node_node_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -110,7 +111,9 @@ func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl } func TestNodeDelayedStart(t *testing.T) { - cfg := config.ResetTestRoot("node_delayed_start_test") + cfg, err := config.ResetTestRoot("node_delayed_start_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() @@ -125,7 +128,8 @@ func TestNodeDelayedStart(t *testing.T) { } func TestNodeSetAppVersion(t *testing.T) { - cfg := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot("node_app_version_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) // create node @@ -146,7 +150,8 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) - cfg := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addr @@ -179,7 +184,8 @@ func TestNodeSetPrivValTCP(t *testing.T) { func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { addrNoPrefix := testFreeAddr(t) - cfg := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix @@ -196,7 +202,8 @@ func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up - cfg := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = "unix://" + tmpfile @@ -237,11 +244,12 @@ func TestCreateProposalBlock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() + err = proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -332,11 +340,13 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() + err = proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -397,11 +407,12 @@ func TestMaxProposalBlockSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() + err = proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -512,7 +523,8 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - cfg := config.ResetTestRoot("node_new_node_custom_reactors_test") + cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test") + require.NoError(t, err) cfg.Mode = config.ModeSeed defer os.RemoveAll(cfg.RootDir) @@ -539,7 +551,9 @@ func TestNodeNewSeedNode(t *testing.T) { } func TestNodeSetEventSink(t *testing.T) { - cfg := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot("node_app_version_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) logger := log.TestingLogger() @@ -701,7 +715,8 @@ func loadStatefromGenesis(t *testing.T) sm.State { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - cfg := config.ResetTestRoot("load_state_from_genesis") + cfg, err := config.ResetTestRoot("load_state_from_genesis") + require.NoError(t, err) loadedState, err := stateStore.Load() require.NoError(t, err) diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 38fb4fcf7..8acf4b072 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -22,7 +22,8 @@ func TestHTTPSimple(t *testing.T) { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf := rpctest.CreateConfig("ExampleHTTP_simple") + conf, err := rpctest.CreateConfig("ExampleHTTP_simple") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -71,7 +72,8 @@ func TestHTTPBatching(t *testing.T) { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf := rpctest.CreateConfig("ExampleHTTP_batching") + conf, err := rpctest.CreateConfig("ExampleHTTP_batching") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index c2e0dc3cd..fdf5da851 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -20,7 +20,8 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { ctx, cancel := context.WithCancel(context.Background()) - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // start a tendermint node in the background to test against dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 189289b0a..7bdc1bb2e 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -57,15 +57,18 @@ func makeAddrs() (p2pAddr, rpcAddr string) { return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } -func CreateConfig(testName string) *config.Config { - c := config.ResetTestRoot(testName) +func CreateConfig(testName string) (*config.Config, error) { + c, err := config.ResetTestRoot(testName) + if err != nil { + return nil, err + } p2pAddr, rpcAddr := makeAddrs() c.P2P.ListenAddress = p2pAddr c.RPC.ListenAddress = rpcAddr c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - return c + return c, nil } type ServiceCloser func(context.Context) error diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 6199d804f..0bead8864 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -84,7 +84,9 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - config.WriteConfigFile(nodeDir, cfg) // panics + if err := config.WriteConfigFile(nodeDir, cfg); err != nil { + return err + } appCfg, err := MakeAppConfig(node) if err != nil { From e2a103a3152977ae2cee0cfd783e01899b0c1828 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 27 Oct 2021 15:45:17 +0200 Subject: [PATCH 128/174] mempool: port reactor tests from legacy implementation (#7162) --- internal/mempool/v1/mempool_test.go | 10 + internal/mempool/v1/reactor_test.go | 273 ++++++++++++++++++++++++++++ 2 files changed, 283 insertions(+) diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index 72a72861c..89c1a6685 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -118,6 +118,16 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx return txs } +func convertTex(in []testTx) types.Txs { + out := make([]types.Tx, len(in)) + + for idx := range in { + out[idx] = in[idx].tx + } + + return out +} + func TestTxMempool_TxsAvailable(t *testing.T) { txmp := setup(t, 0) txmp.EnableTxsAvailable() diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index 2a92a11c5..317f7ce73 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -1,18 +1,24 @@ package v1 import ( + "context" "os" "strings" "sync" "testing" + "time" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -100,6 +106,47 @@ func (rts *reactorTestSuite) start(t *testing.T) { "network does not have expected number of nodes") } +func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { + t.Helper() + + rts.stop(t) + + for _, mch := range rts.mempoolChannels { + require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) + } +} + +func (rts *reactorTestSuite) stop(t *testing.T) { + for id, r := range rts.reactors { + require.NoError(t, r.Stop(), "stopping reactor %s", id) + r.Wait() + require.False(t, r.IsRunning(), "reactor %s did not stop", id) + } +} + +func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...types.NodeID) { + t.Helper() + + // ensure that the transactions get fully broadcast to the + // rest of the network + wg := &sync.WaitGroup{} + for name, pool := range rts.mempools { + if !p2ptest.NodeInSlice(name, ids) { + continue + } + + wg.Add(1) + go func(pool *TxMempool) { + defer wg.Done() + require.Eventually(t, func() bool { return len(txs) == pool.Size() }, + time.Minute, + 100*time.Millisecond, + ) + }(pool) + } + wg.Wait() +} + func TestReactorBroadcastDoesNotPanic(t *testing.T) { numNodes := 2 rts := setupReactors(t, numNodes, 0) @@ -142,3 +189,229 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { primaryReactor.peerWG.Wait() wg.Wait() } + +func TestReactorBroadcastTxs(t *testing.T) { + numTxs := 1000 + numNodes := 10 + + rts := setupReactors(t, numNodes, 0) + + primary := rts.nodes[0] + secondaries := rts.nodes[1:] + + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) + + // run the router + rts.start(t) + + // Wait till all secondary suites (reactor) received all mempool txs from the + // primary suite (node). + rts.waitForTxns(t, convertTex(txs), secondaries...) + + rts.stop(t) +} + +// regression test for https://github.com/tendermint/tendermint/issues/5408 +func TestReactorConcurrency(t *testing.T) { + numTxs := 5 + numNodes := 2 + + rts := setupReactors(t, numNodes, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(t) + + var wg sync.WaitGroup + + for i := 0; i < 1000; i++ { + wg.Add(2) + + // 1. submit a bunch of txs + // 2. update the whole mempool + + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) + go func() { + defer wg.Done() + + mempool := rts.mempools[primary] + + mempool.Lock() + defer mempool.Unlock() + + deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) + for i := range txs { + deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} + } + + require.NoError(t, mempool.Update(1, convertTex(txs), deliverTxResponses, nil, nil)) + }() + + // 1. submit a bunch of txs + // 2. update none + _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) + go func() { + defer wg.Done() + + mempool := rts.mempools[secondary] + + mempool.Lock() + defer mempool.Unlock() + + err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) + require.NoError(t, err) + }() + + // flush the mempool + rts.mempools[secondary].Flush() + } + + wg.Wait() +} + +func TestReactorNoBroadcastToSender(t *testing.T) { + numTxs := 1000 + numNodes := 2 + + rts := setupReactors(t, numNodes, uint(numTxs)) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + peerID := uint16(1) + _ = checkTxs(t, rts.mempools[primary], numTxs, peerID) + + rts.start(t) + + time.Sleep(100 * time.Millisecond) + + require.Eventually(t, func() bool { + return rts.mempools[secondary].Size() == 0 + }, time.Minute, 100*time.Millisecond) + + rts.assertMempoolChannelsDrained(t) +} + +func TestReactor_MaxTxBytes(t *testing.T) { + numNodes := 2 + cfg := config.TestConfig() + + rts := setupReactors(t, numNodes, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + // Broadcast a tx, which has the max size and ensure it's received by the + // second reactor. + tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) + err := rts.reactors[primary].mempool.CheckTx( + context.Background(), + tx1, + nil, + mempool.TxInfo{ + SenderID: mempool.UnknownPeerID, + }, + ) + require.NoError(t, err) + + rts.start(t) + + rts.reactors[primary].mempool.Flush() + rts.reactors[secondary].mempool.Flush() + + // broadcast a tx, which is beyond the max size and ensure it's not sent + tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) + err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) + require.Error(t, err) + + rts.assertMempoolChannelsDrained(t) +} + +func TestDontExhaustMaxActiveIDs(t *testing.T) { + // we're creating a single node network, but not starting the + // network. + rts := setupReactors(t, 1, mempool.MaxActiveIDs+1) + + nodeID := rts.nodes[0] + + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + // ensure the reactor does not panic (i.e. exhaust active IDs) + for i := 0; i < mempool.MaxActiveIDs+1; i++ { + rts.peerChans[nodeID] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusUp, + NodeID: peerID, + } + + rts.mempoolChannels[nodeID].Out <- p2p.Envelope{ + To: peerID, + Message: &protomem.Txs{ + Txs: [][]byte{}, + }, + } + } + + require.Eventually( + t, + func() bool { + for _, mch := range rts.mempoolChannels { + if len(mch.Out) > 0 { + return false + } + } + + return true + }, + time.Minute, + 10*time.Millisecond, + ) + + rts.assertMempoolChannelsDrained(t) +} + +func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + // 0 is already reserved for UnknownPeerID + ids := mempool.NewMempoolIDs() + + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + for i := 0; i < mempool.MaxActiveIDs-1; i++ { + ids.ReserveForPeer(peerID) + } + + require.Panics(t, func() { + ids.ReserveForPeer(peerID) + }) +} + +func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + rts := setupReactors(t, 2, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(t) + + // disconnect peer + rts.peerChans[primary] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusDown, + NodeID: secondary, + } + time.Sleep(500 * time.Millisecond) + + txs := checkTxs(t, rts.reactors[primary].mempool, 4, mempool.UnknownPeerID) + require.Equal(t, 4, len(txs)) + require.Equal(t, 4, rts.mempools[primary].Size()) + require.Equal(t, 0, rts.mempools[secondary].Size()) +} From 4d09a6c25d9aea3a0209c648467493ba5b4ac63d Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Thu, 28 Oct 2021 05:27:09 +0200 Subject: [PATCH 129/174] abci: fix readme link (#7173) --- abci/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/abci/README.md b/abci/README.md index 4a953dab3..e2234f4d1 100644 --- a/abci/README.md +++ b/abci/README.md @@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g A detailed description of the ABCI methods and message types is contained in: - [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md) -- [A protobuf file](../proto/tendermint/abci/types.proto) +- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto) - [A Go interface](./types/application.go) ## Protocol Buffers From 1fd706054227950190710e9b38f6f978a7c42274 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 28 Oct 2021 00:57:45 -0700 Subject: [PATCH 130/174] pubsub: Use distinct client IDs for test subscriptions. (#7178) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #7176. Some of the benchmarks create a bunch of different subscriptions all sharing the same query. These were all using the same client ID, which violates one of the subscriber rules. Ensure each subscriber gets a unique ID. This has been broken as long as this library has been in the repo—I tracked it back to bb9aa85d and it was already failing there, so I think this never really worked. I'm not sure these test anything useful, but at least now they run. --- libs/pubsub/pubsub_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 525415493..d3855be7a 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -518,7 +518,8 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { ctx := context.Background() q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") for i := 0; i < n; i++ { - subscription, err := s.Subscribe(ctx, clientID, q) + id := fmt.Sprintf("clientID-%d", i+1) + subscription, err := s.Subscribe(ctx, id, q) if err != nil { b.Fatal(err) } From 6108d0a9b53888f179760262def787267cf7e524 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 28 Oct 2021 11:17:23 +0200 Subject: [PATCH 131/174] config: expose ability to write config to arbitrary paths (#7174) --- config/toml.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/config/toml.go b/config/toml.go index 94c1907e8..a57e14b9b 100644 --- a/config/toml.go +++ b/config/toml.go @@ -46,15 +46,20 @@ func EnsureRoot(rootDir string) { // WriteConfigFile renders config using the template and writes it to configFilePath. // This function is called by cmd/tendermint/commands/init.go func WriteConfigFile(rootDir string, config *Config) error { + return config.WriteToTemplate(filepath.Join(rootDir, defaultConfigFilePath)) +} + +// WriteToTemplate writes the config to the exact file specified by +// the path, in the default toml template and does not mangle the path +// or filename at all. +func (cfg *Config) WriteToTemplate(path string) error { var buffer bytes.Buffer - if err := configTemplate.Execute(&buffer, config); err != nil { + if err := configTemplate.Execute(&buffer, cfg); err != nil { return err } - configFilePath := filepath.Join(rootDir, defaultConfigFilePath) - - return writeFile(configFilePath, buffer.Bytes(), 0644) + return writeFile(path, buffer.Bytes(), 0644) } func writeDefaultConfigFileIfNone(rootDir string) error { From 8441b3715aff9dbfcb9cbe29ebc2f53e7cd910d3 Mon Sep 17 00:00:00 2001 From: Sharad Chand Date: Thu, 28 Oct 2021 23:19:07 +0545 Subject: [PATCH 132/174] migrate away from deprecated ioutil APIs (#7175) Co-authored-by: Callum Waters Co-authored-by: M. J. Fromberger --- abci/example/kvstore/kvstore_test.go | 8 +++--- cmd/priv_val_server/main.go | 3 +-- cmd/tendermint/commands/debug/dump.go | 3 +-- cmd/tendermint/commands/debug/io.go | 3 +-- cmd/tendermint/commands/debug/kill.go | 3 +-- cmd/tendermint/commands/debug/util.go | 6 ++--- cmd/tendermint/commands/root_test.go | 3 +-- config/config.go | 3 +-- config/toml.go | 5 ++-- config/toml_test.go | 7 +++--- internal/consensus/common_test.go | 7 +++--- internal/consensus/replay_test.go | 9 +++---- internal/consensus/state.go | 4 +-- internal/libs/autofile/autofile_test.go | 13 +++++----- internal/libs/autofile/group_test.go | 9 +++---- internal/libs/tempfile/tempfile_test.go | 17 ++++++------- internal/p2p/conn/secret_connection_test.go | 3 +-- internal/p2p/upnp/upnp.go | 6 ++--- .../state/indexer/indexer_service_test.go | 11 ++++---- internal/state/indexer/sink/psql/psql_test.go | 3 +-- internal/state/indexer/tx/kv/kv_bench_test.go | 4 +-- internal/state/indexer/tx/kv/kv_test.go | 5 ++-- internal/state/state.go | 4 +-- internal/statesync/chunks.go | 7 +++--- internal/statesync/chunks_test.go | 7 +++--- libs/cli/helper.go | 3 +-- libs/cli/setup_test.go | 4 +-- libs/os/os_test.go | 17 ++++++------- light/example_test.go | 3 +-- light/light_test.go | 5 ++-- light/mbt/driver_test.go | 4 +-- privval/file.go | 6 ++--- privval/file_test.go | 25 +++++++++---------- privval/grpc/util.go | 5 ++-- privval/socket_listeners_test.go | 3 +-- rpc/client/main_test.go | 3 +-- rpc/jsonrpc/client/http_json_client.go | 6 ++--- rpc/jsonrpc/client/http_json_client_test.go | 4 +-- rpc/jsonrpc/client/http_uri_client.go | 4 +-- rpc/jsonrpc/server/http_json_handler.go | 4 +-- rpc/jsonrpc/server/http_json_handler_test.go | 14 +++++------ rpc/jsonrpc/server/http_server_test.go | 10 ++++---- test/e2e/app/snapshots.go | 9 +++---- test/e2e/app/state.go | 5 ++-- test/e2e/runner/evidence.go | 4 +-- test/e2e/runner/setup.go | 9 +++---- test/fuzz/mempool/v0/fuzz_test.go | 4 +-- test/fuzz/mempool/v1/fuzz_test.go | 4 +-- test/fuzz/p2p/secretconnection/fuzz_test.go | 4 +-- .../p2p/secretconnection/init-corpus/main.go | 3 +-- test/fuzz/rpc/jsonrpc/server/fuzz_test.go | 4 +-- test/fuzz/rpc/jsonrpc/server/handler.go | 6 ++--- .../internal/test_harness_test.go | 3 +-- tools/tm-signer-harness/main.go | 3 +-- types/genesis.go | 6 ++--- types/genesis_test.go | 3 +-- types/node_key.go | 6 ++--- types/part_set_test.go | 4 +-- 58 files changed, 158 insertions(+), 192 deletions(-) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 9d026bd87..2c6f48903 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -3,7 +3,7 @@ package kvstore import ( "context" "fmt" - "io/ioutil" + "os" "sort" "testing" @@ -74,7 +74,7 @@ func TestKVStoreKV(t *testing.T) { } func TestPersistentKVStoreKV(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -90,7 +90,7 @@ func TestPersistentKVStoreKV(t *testing.T) { } func TestPersistentKVStoreInfo(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -122,7 +122,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { // add a validator, remove a validator, update a validator func TestValUpdates(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index 203b3df0d..cda123d7f 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -6,7 +6,6 @@ import ( "crypto/x509" "flag" "fmt" - "io/ioutil" "net" "net/http" "os" @@ -78,7 +77,7 @@ func main() { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(*rootCA) + bs, err := os.ReadFile(*rootCA) if err != nil { fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err) os.Exit(1) diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index cb1cc942a..9d67cfe84 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -82,7 +81,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() - tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp") if err != nil { logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err) return diff --git a/cmd/tendermint/commands/debug/io.go b/cmd/tendermint/commands/debug/io.go index dcfff50c8..bf904cf5c 100644 --- a/cmd/tendermint/commands/debug/io.go +++ b/cmd/tendermint/commands/debug/io.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -111,5 +110,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error { return fmt.Errorf("failed to encode state dump: %w", err) } - return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) } diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index 3e749e513..7f1ebdba1 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -56,7 +55,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // Create a temporary directory which will contain all the state dumps and // relevant files and directories that will be compressed into a file. - tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp") if err != nil { return fmt.Errorf("failed to create temporary directory: %w", err) } diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index fa356c488..6fa48df1d 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -3,7 +3,7 @@ package debug import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "os" "path" @@ -73,10 +73,10 @@ func dumpProfile(dir, addr, profile string, debug int) error { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) } diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index cd4bc9f5f..6d5143f4e 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -167,5 +166,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } diff --git a/config/config.go b/config/config.go index c0443e021..e0a5c91ef 100644 --- a/config/config.go +++ b/config/config.go @@ -4,7 +4,6 @@ import ( "encoding/hex" "errors" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -269,7 +268,7 @@ func (cfg BaseConfig) NodeKeyFile() string { // LoadNodeKey loads NodeKey located in filePath. func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) { - jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile()) + jsonBytes, err := os.ReadFile(cfg.NodeKeyFile()) if err != nil { return "", err } diff --git a/config/toml.go b/config/toml.go index a57e14b9b..832c62564 100644 --- a/config/toml.go +++ b/config/toml.go @@ -3,7 +3,6 @@ package config import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -515,7 +514,7 @@ func ResetTestRoot(testName string) (*Config, error) { func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) { // create a unique, concurrency-safe test directory under os.TempDir() - rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName)) + rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { return nil, err } @@ -559,7 +558,7 @@ func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) } func writeFile(filePath string, contents []byte, mode os.FileMode) error { - if err := ioutil.WriteFile(filePath, contents, mode); err != nil { + if err := os.WriteFile(filePath, contents, mode); err != nil { return fmt.Errorf("failed to write file: %w", err) } return nil diff --git a/config/toml_test.go b/config/toml_test.go index 9af6e4f35..c062d25e4 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -23,7 +22,7 @@ func TestEnsureRoot(t *testing.T) { require := require.New(t) // setup temp dir for test - tmpDir, err := ioutil.TempDir("", "config-test") + tmpDir, err := os.MkdirTemp("", "config-test") require.NoError(err) defer os.RemoveAll(tmpDir) @@ -33,7 +32,7 @@ func TestEnsureRoot(t *testing.T) { require.NoError(WriteConfigFile(tmpDir, DefaultConfig())) // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) + data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) require.NoError(err) checkConfig(t, string(data)) @@ -53,7 +52,7 @@ func TestEnsureTestRoot(t *testing.T) { rootDir := cfg.RootDir // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) + data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) require.Nil(err) checkConfig(t, string(data)) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 8799d87d4..a44fccc98 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -800,11 +799,11 @@ func randConsensusNetWithPeers( if i < nValidators { privVal = privVals[i] } else { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") if err != nil { panic(err) } - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") if err != nil { panic(err) } @@ -892,7 +891,7 @@ func (m *mockTicker) Chan() <-chan timeoutInfo { func (*mockTicker) SetLogger(log.Logger) {} func newPersistentKVStore() abci.Application { - dir, err := ioutil.TempDir("", "persistent-kvstore") + dir, err := os.MkdirTemp("", "persistent-kvstore") if err != nil { panic(err) } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index f72da903b..0b7468b79 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -70,7 +69,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Co ) cs.SetLogger(logger) - bytes, _ := ioutil.ReadFile(cs.config.WalFile()) + bytes, _ := os.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) err = cs.Start() @@ -665,7 +664,7 @@ func TestMockProxyApp(t *testing.T) { } func tempWALWithData(data []byte) string { - walFile, err := ioutil.TempFile("", "wal") + walFile, err := os.CreateTemp("", "wal") if err != nil { panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) } @@ -1072,7 +1071,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // if its not the first one, we have a full block if thisBlockParts != nil { var pbb = new(tmproto.Block) - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + bz, err := io.ReadAll(thisBlockParts.GetReader()) if err != nil { panic(err) } @@ -1111,7 +1110,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } } // grab the last block too - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + bz, err := io.ReadAll(thisBlockParts.GetReader()) if err != nil { panic(err) } diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 42900a7d4..65f4f865b 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -5,7 +5,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "os" "runtime/debug" "time" @@ -1897,7 +1897,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID ) } if added && cs.ProposalBlockParts.IsComplete() { - bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader()) if err != nil { return added, err } diff --git a/internal/libs/autofile/autofile_test.go b/internal/libs/autofile/autofile_test.go index c2442a56f..479a239cb 100644 --- a/internal/libs/autofile/autofile_test.go +++ b/internal/libs/autofile/autofile_test.go @@ -1,7 +1,6 @@ package autofile import ( - "io/ioutil" "os" "path/filepath" "syscall" @@ -22,7 +21,7 @@ func TestSIGHUP(t *testing.T) { }) // First, create a temporary directory and move into it - dir, err := ioutil.TempDir("", "sighup_test") + dir, err := os.MkdirTemp("", "sighup_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(dir) @@ -45,7 +44,7 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory - otherDir, err := ioutil.TempDir("", "sighup_test_other") + otherDir, err := os.MkdirTemp("", "sighup_test_other") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(otherDir) }) require.NoError(t, os.Chdir(otherDir)) @@ -72,7 +71,7 @@ func TestSIGHUP(t *testing.T) { } // The current directory should be empty - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) } @@ -80,7 +79,7 @@ func TestSIGHUP(t *testing.T) { // // Manually modify file permissions, close, and reopen using autofile: // // We expect the file permissions to be changed back to the intended perms. // func TestOpenAutoFilePerms(t *testing.T) { -// file, err := ioutil.TempFile("", "permission_test") +// file, err := os.CreateTemp("", "permission_test") // require.NoError(t, err) // err = file.Close() // require.NoError(t, err) @@ -106,7 +105,7 @@ func TestSIGHUP(t *testing.T) { func TestAutoFileSize(t *testing.T) { // First, create an AutoFile writing to a tempfile dir - f, err := ioutil.TempFile("", "sighup_test") + f, err := os.CreateTemp("", "sighup_test") require.NoError(t, err) require.NoError(t, f.Close()) @@ -139,7 +138,7 @@ func TestAutoFileSize(t *testing.T) { } func mustReadFile(t *testing.T, filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) require.NoError(t, err) return fileBytes diff --git a/internal/libs/autofile/group_test.go b/internal/libs/autofile/group_test.go index 0981923eb..ffdb70013 100644 --- a/internal/libs/autofile/group_test.go +++ b/internal/libs/autofile/group_test.go @@ -2,7 +2,6 @@ package autofile import ( "io" - "io/ioutil" "os" "path/filepath" "testing" @@ -122,7 +121,7 @@ func TestRotateFile(t *testing.T) { } }() - dir, err := ioutil.TempDir("", "rotate_test") + dir, err := os.MkdirTemp("", "rotate_test") require.NoError(t, err) defer os.RemoveAll(dir) err = os.Chdir(dir) @@ -151,21 +150,21 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) // Read g.Head.Path+"000" - body1, err := ioutil.ReadFile(g.Head.Path + ".000") + body1, err := os.ReadFile(g.Head.Path + ".000") assert.NoError(t, err, "Failed to read first rolled file") if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path - body2, err := ioutil.ReadFile(g.Head.Path) + body2, err := os.ReadFile(g.Head.Path) assert.NoError(t, err, "Failed to read first rolled file") if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("got unexpected contents: [%v]", string(body2)) } // Make sure there are no files in the current, temporary directory - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) diff --git a/internal/libs/tempfile/tempfile_test.go b/internal/libs/tempfile/tempfile_test.go index 5650fe720..212525d44 100644 --- a/internal/libs/tempfile/tempfile_test.go +++ b/internal/libs/tempfile/tempfile_test.go @@ -5,10 +5,9 @@ package tempfile import ( "bytes" "fmt" - "io/ioutil" mrand "math/rand" "os" - testing "testing" + "testing" "github.com/stretchr/testify/require" @@ -22,13 +21,13 @@ func TestWriteFileAtomic(t *testing.T) { perm os.FileMode = 0600 ) - f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + f, err := os.CreateTemp("/tmp", "write-atomic-test-") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) - if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil { + if err = os.WriteFile(f.Name(), old, 0600); err != nil { t.Fatal(err) } @@ -36,7 +35,7 @@ func TestWriteFileAtomic(t *testing.T) { t.Fatal(err) } - rData, err := ioutil.ReadFile(f.Name()) + rData, err := os.ReadFile(f.Name()) if err != nil { t.Fatal(err) } @@ -81,11 +80,11 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) require.NoError(t, err) // Check that the first atomic file was untouched - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") @@ -132,14 +131,14 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.Nil(t, err, "Error reading first atomic file") require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, "atomic write file %d was overwritten", i) } // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.Nil(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 84384011b..08a7925fa 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" mrand "math/rand" "os" @@ -229,7 +228,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - require.NoError(t, ioutil.WriteFile(goldenFilepath, []byte(data), 0644)) + require.NoError(t, os.WriteFile(goldenFilepath, []byte(data), 0644)) } f, err := os.Open(goldenFilepath) if err != nil { diff --git a/internal/p2p/upnp/upnp.go b/internal/p2p/upnp/upnp.go index c00530aca..e2c8f3fcf 100644 --- a/internal/p2p/upnp/upnp.go +++ b/internal/p2p/upnp/upnp.go @@ -10,7 +10,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "strconv" @@ -312,7 +312,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { return } var envelope Envelope - data, err := ioutil.ReadAll(response.Body) + data, err := io.ReadAll(response.Body) if err != nil { return } @@ -374,7 +374,7 @@ func (n *upnpNAT) AddPortMapping( // TODO: check response to see if the port was forwarded // log.Println(message, response) // JAE: - // body, err := ioutil.ReadAll(response.Body) + // body, err := io.ReadAll(response.Body) // fmt.Println(string(body), err) mappedExternalPort = externalPort _ = response diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index d9f29b677..3ad5fa509 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -3,22 +3,21 @@ package indexer_test import ( "database/sql" "fmt" - "io/ioutil" "os" "testing" "time" "github.com/adlio/schema" - dockertest "github.com/ory/dockertest" + "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" - kv "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" - psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" @@ -114,7 +113,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { func readSchema() ([]*schema.Migration, error) { filename := "./sink/psql/schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index f19bbfba7..f5306801f 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -5,7 +5,6 @@ import ( "database/sql" "flag" "fmt" - "io/ioutil" "log" "os" "os/signal" @@ -227,7 +226,7 @@ func newTestBlockHeader() types.EventDataNewBlockHeader { // readSchema loads the indexing database schema file func readSchema() ([]*schema.Migration, error) { const filename = "schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } diff --git a/internal/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go index 3f4e63ee1..e8504ebcc 100644 --- a/internal/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rand" "fmt" - "io/ioutil" + "os" "testing" dbm "github.com/tendermint/tm-db" @@ -15,7 +15,7 @@ import ( ) func BenchmarkTxSearch(b *testing.B) { - dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test") + dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test") if err != nil { b.Errorf("failed to create temporary directory: %s", err) } diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index c8ab2b0f2..985d58f42 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -3,7 +3,6 @@ package kv import ( "context" "fmt" - "io/ioutil" "os" "testing" @@ -13,7 +12,7 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" @@ -334,7 +333,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - dir, err := ioutil.TempDir("", "tx_index_db") + dir, err := os.MkdirTemp("", "tx_index_db") require.NoError(b, err) defer os.RemoveAll(dir) diff --git a/internal/state/state.go b/internal/state/state.go index 1b3c8f16e..6fd632ff9 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -323,7 +323,7 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { - genDocJSON, err := ioutil.ReadFile(genDocFile) + genDocJSON, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) } diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 84b6971b8..2075adae5 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -3,7 +3,6 @@ package statesync import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -42,7 +41,7 @@ type chunkQueue struct { // newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. // Callers must call Close() when done. func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { - dir, err := ioutil.TempDir(tempDir, "tm-statesync") + dir, err := os.MkdirTemp(tempDir, "tm-statesync") if err != nil { return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) } @@ -87,7 +86,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { } path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := ioutil.WriteFile(path, chunk.Chunk, 0600) + err := os.WriteFile(path, chunk.Chunk, 0600) if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } @@ -229,7 +228,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { return nil, nil } - body, err := ioutil.ReadFile(path) + body, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) } diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index e17c170bd..c3604df9d 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -1,7 +1,6 @@ package statesync import ( - "io/ioutil" "os" "testing" @@ -36,20 +35,20 @@ func TestNewChunkQueue_TempDir(t *testing.T) { Hash: []byte{7}, Metadata: nil, } - dir, err := ioutil.TempDir("", "newchunkqueue") + dir, err := os.MkdirTemp("", "newchunkqueue") require.NoError(t, err) defer os.RemoveAll(dir) queue, err := newChunkQueue(snapshot, dir) require.NoError(t, err) - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 1) err = queue.Close() require.NoError(t, err) - files, err = ioutil.ReadDir(dir) + files, err = os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 0) } diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 4b87bd60b..37fe34fc9 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -19,7 +18,7 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } // RunWithArgs executes the given command with the specified command line args diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index 0cb322344..fec49e5c1 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -2,7 +2,7 @@ package cli import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" "testing" @@ -55,7 +55,7 @@ func TestSetupEnv(t *testing.T) { } func tempDir() string { - cdir, err := ioutil.TempDir("", "test-cli") + cdir, err := os.MkdirTemp("", "test-cli") if err != nil { panic(err) } diff --git a/libs/os/os_test.go b/libs/os/os_test.go index 3a31de04a..22d739ad7 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -3,7 +3,6 @@ package os_test import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -16,7 +15,7 @@ import ( ) func TestCopyFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") if err != nil { t.Fatal(err) } @@ -33,7 +32,7 @@ func TestCopyFile(t *testing.T) { if _, err := os.Stat(copyfile); os.IsNotExist(err) { t.Fatal("copy should exist") } - data, err := ioutil.ReadFile(copyfile) + data, err := os.ReadFile(copyfile) if err != nil { t.Fatal(err) } @@ -70,7 +69,7 @@ func TestTrapSignal(t *testing.T) { } func TestEnsureDir(t *testing.T) { - tmp, err := ioutil.TempDir("", "ensure-dir") + tmp, err := os.MkdirTemp("", "ensure-dir") require.NoError(t, err) defer os.RemoveAll(tmp) @@ -84,7 +83,7 @@ func TestEnsureDir(t *testing.T) { require.NoError(t, err) // Should fail on file. - err = ioutil.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) require.NoError(t, err) err = tmos.EnsureDir(filepath.Join(tmp, "file"), 0755) require.Error(t, err) @@ -140,7 +139,7 @@ func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *byt // the origin is positively a non-directory and that it is ready for copying. // See https://github.com/tendermint/tendermint/issues/6427 func TestTrickedTruncation(t *testing.T) { - tmpDir, err := ioutil.TempDir(os.TempDir(), "pwn_truncate") + tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate") if err != nil { t.Fatal(err) } @@ -148,12 +147,12 @@ func TestTrickedTruncation(t *testing.T) { originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") - if err := ioutil.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { t.Fatal(err) } // 1. Sanity check. - readWAL, err := ioutil.ReadFile(originalWALPath) + readWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } @@ -168,7 +167,7 @@ func TestTrickedTruncation(t *testing.T) { } // 3. Check the WAL's content - reReadWAL, err := ioutil.ReadFile(originalWALPath) + reReadWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } diff --git a/light/example_test.go b/light/example_test.go index d51acb925..1291670e2 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "io/ioutil" stdlog "log" "os" "time" @@ -38,7 +37,7 @@ func ExampleClient() { } defer func() { _ = closer(ctx) }() - dbDir, err := ioutil.TempDir("", "light-client-example") + dbDir, err := os.MkdirTemp("", "light-client-example") if err != nil { stdlog.Fatal(err) } diff --git a/light/light_test.go b/light/light_test.go index 93cef603b..d88891fe9 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "io/ioutil" "os" "testing" "time" @@ -41,7 +40,7 @@ func TestClientIntegration_Update(t *testing.T) { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-test-update-example") + dbDir, err := os.MkdirTemp("", "light-client-test-update-example") require.NoError(t, err) defer os.RemoveAll(dbDir) @@ -100,7 +99,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - dbDir, err := ioutil.TempDir("", "light-client-test-verify-example") + dbDir, err := os.MkdirTemp("", "light-client-test-verify-example") require.NoError(t, err) defer os.RemoveAll(dbDir) diff --git a/light/mbt/driver_test.go b/light/mbt/driver_test.go index bf6ab3d43..f61c3b234 100644 --- a/light/mbt/driver_test.go +++ b/light/mbt/driver_test.go @@ -1,7 +1,7 @@ package mbt import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -22,7 +22,7 @@ func TestVerify(t *testing.T) { filename := filename t.Run(filename, func(t *testing.T) { - jsonBlob, err := ioutil.ReadFile(filename) + jsonBlob, err := os.ReadFile(filename) if err != nil { t.Fatal(err) } diff --git a/privval/file.go b/privval/file.go index 4ec918c70..6a8484557 100644 --- a/privval/file.go +++ b/privval/file.go @@ -5,7 +5,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -200,7 +200,7 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) (*FilePV, error) { // If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } @@ -218,7 +218,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, err pvState := FilePVLastSignState{} if loadState { - stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + stateJSONBytes, err := os.ReadFile(stateFilePath) if err != nil { return nil, err } diff --git a/privval/file_test.go b/privval/file_test.go index 680428ac2..c4314a367 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "fmt" - "io/ioutil" "os" "testing" "time" @@ -24,9 +23,9 @@ import ( func TestGenLoadValidator(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -44,9 +43,9 @@ func TestGenLoadValidator(t *testing.T) { } func TestResetValidator(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -76,9 +75,9 @@ func TestResetValidator(t *testing.T) { func TestLoadOrGenValidator(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) tempKeyFilePath := tempKeyFile.Name() @@ -165,9 +164,9 @@ func TestUnmarshalValidatorKey(t *testing.T) { func TestSignVote(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -219,9 +218,9 @@ func TestSignVote(t *testing.T) { func TestSignProposal(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") @@ -268,9 +267,9 @@ func TestSignProposal(t *testing.T) { } func TestDifferByTimestamp(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 413acca8e..75ad04d42 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "crypto/x509" - "io/ioutil" "os" "time" @@ -14,7 +13,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" ) @@ -65,7 +64,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(ca) + bs, err := os.ReadFile(ca) if err != nil { log.Error("failed to read ca cert:", "error", err) os.Exit(1) diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 5e95ec10c..08a285bdf 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -1,7 +1,6 @@ package privval import ( - "io/ioutil" "net" "os" "testing" @@ -29,7 +28,7 @@ type listenerTestCase struct { // testUnixAddr will attempt to obtain a platform-independent temporary file // name for a Unix socket func testUnixAddr() (string, error) { - f, err := ioutil.TempFile("", "tendermint-privval-test-*") + f, err := os.CreateTemp("", "tendermint-privval-test-*") if err != nil { return "", err } diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index fdf5da851..c242f01c4 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "io/ioutil" "os" "testing" @@ -24,7 +23,7 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { require.NoError(t, err) // start a tendermint node in the background to test against - dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) + dir, err := os.MkdirTemp("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) require.NoError(t, err) app := kvstore.NewPersistentKVStoreApplication(dir) diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 7733eb00c..03fc19be4 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -6,7 +6,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -219,7 +219,7 @@ func (c *Client) Call( defer httpResponse.Body.Close() - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { return nil, fmt.Errorf("failed to read response body: %w", err) } @@ -267,7 +267,7 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque defer httpResponse.Body.Close() - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { return nil, fmt.Errorf("read response body: %w", err) } diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 5a03af512..fd433d458 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -1,7 +1,7 @@ package client import ( - "io/ioutil" + "io" "log" "net/http" "net/http/httptest" @@ -21,7 +21,7 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) { defer tsTLS.Close() // This silences a TLS handshake error, caused by the dialer just immediately // disconnecting, which we can just ignore. - tsTLS.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0) for _, testURL := range []string{ts.URL, tsTLS.URL} { u, err := newParsedURL(testURL) diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go index cd4ff0686..061622942 100644 --- a/rpc/jsonrpc/client/http_uri_client.go +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -3,7 +3,7 @@ package client import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "strings" @@ -76,7 +76,7 @@ func (c *URIClient) Call(ctx context.Context, method string, } defer resp.Body.Close() - responseBytes, err := ioutil.ReadAll(resp.Body) + responseBytes, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("read response body: %w", err) } diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index fbc0cca79..dabeee074 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -5,7 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "reflect" "sort" @@ -21,7 +21,7 @@ import ( // jsonrpc calls grab the given method's function info and runs reflect.Call func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { res := rpctypes.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err), diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 64e7597fd..94c241ca0 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -2,7 +2,7 @@ package server import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -65,7 +65,7 @@ func TestRPCParams(t *testing.T) { defer res.Body.Close() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -112,7 +112,7 @@ func TestJSONRPCID(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -142,7 +142,7 @@ func TestRPCNotification(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") @@ -178,7 +178,7 @@ func TestRPCNotificationInBatch(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -239,7 +239,7 @@ func TestRPCResponseCache(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") require.Equal(t, "max-age=31536000", res.Header.Get("Cache-control")) - _, err := ioutil.ReadAll(res.Body) + _, err := io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") @@ -254,7 +254,7 @@ func TestRPCResponseCache(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") require.Equal(t, "", res.Header.Get("Cache-control")) - _, err = ioutil.ReadAll(res.Body) + _, err = io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") } diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 39e713565..ff2776bb4 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -101,7 +101,7 @@ func TestServeTLS(t *testing.T) { defer res.Body.Close() assert.Equal(t, http.StatusOK, res.StatusCode) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) require.NoError(t, err) assert.Equal(t, []byte("some body"), body) } @@ -114,7 +114,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { err := WriteRPCResponseHTTP(w, true, rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"})) require.NoError(t, err) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -136,7 +136,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { rpctypes.NewRPCSuccessResponse(id, &sampleResult{"world"})) require.NoError(t, err) resp = w.Result() - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) @@ -165,7 +165,7 @@ func TestWriteRPCResponseHTTPError(t *testing.T) { err := WriteRPCResponseHTTPError(w, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), errors.New("foo"))) require.NoError(t, err) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 6a9c0e0dc..65edbc3a5 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -48,7 +47,7 @@ func (s *SnapshotStore) loadMetadata() error { file := filepath.Join(s.dir, "metadata.json") metadata := []abci.Snapshot{} - bz, err := ioutil.ReadFile(file) + bz, err := os.ReadFile(file) switch { case errors.Is(err, os.ErrNotExist): case err != nil: @@ -75,7 +74,7 @@ func (s *SnapshotStore) saveMetadata() error { // save the file to a new file and move it to make saving atomic. newFile := filepath.Join(s.dir, "metadata.json.new") file := filepath.Join(s.dir, "metadata.json") - err = ioutil.WriteFile(newFile, bz, 0644) // nolint: gosec + err = os.WriteFile(newFile, bz, 0644) // nolint: gosec if err != nil { return err } @@ -96,7 +95,7 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { Hash: hashItems(state.Values), Chunks: byteChunks(bz), } - err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) + err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) if err != nil { return abci.Snapshot{}, err } @@ -146,7 +145,7 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([ defer s.RUnlock() for _, snapshot := range s.metadata { if snapshot.Height == height && snapshot.Format == format { - bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) if err != nil { return nil, err } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 441926453..7376b8776 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "sort" "sync" @@ -45,7 +44,7 @@ func NewState(file string, persistInterval uint64) (*State, error) { // load loads state from disk. It does not take out a lock, since it is called // during construction. func (s *State) load() error { - bz, err := ioutil.ReadFile(s.file) + bz, err := os.ReadFile(s.file) if err != nil { return fmt.Errorf("failed to read state from %q: %w", s.file, err) } @@ -66,7 +65,7 @@ func (s *State) save() error { // We write the state to a separate file and move it to the destination, to // make it atomic. newFile := fmt.Sprintf("%v.new", s.file) - err = ioutil.WriteFile(newFile, bz, 0644) + err = os.WriteFile(newFile, bz, 0644) if err != nil { return fmt.Errorf("failed to write state to %q: %w", s.file, err) } diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 720357fae..2773bcd0e 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -5,8 +5,8 @@ import ( "context" "errors" "fmt" - "io/ioutil" "math/rand" + "os" "path/filepath" "time" @@ -232,7 +232,7 @@ func getRandomValidatorIndex(privVals []types.MockPV, vals *types.ValidatorSet) } func readPrivKey(keyFilePath string) (crypto.PrivKey, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 0bead8864..39967e529 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -51,7 +50,7 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) + err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) if err != nil { return err } @@ -92,7 +91,7 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) + err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { return err } @@ -410,11 +409,11 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { // FIXME Apparently there's no function to simply load a config file without // involving the entire Viper apparatus, so we'll just resort to regexps. - bz, err := ioutil.ReadFile(cfgPath) + bz, err := os.ReadFile(cfgPath) if err != nil { return err } bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) - return ioutil.WriteFile(cfgPath, bz, 0644) + return os.WriteFile(cfgPath, bz, 0644) } diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/v0/fuzz_test.go index 4f8f1e9c8..210de40d1 100644 --- a/test/fuzz/mempool/v0/fuzz_test.go +++ b/test/fuzz/mempool/v0/fuzz_test.go @@ -1,7 +1,7 @@ package v0_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestMempoolTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) mempoolv0.Fuzz(input) }) diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/v1/fuzz_test.go index 863697a0a..26b2c3609 100644 --- a/test/fuzz/mempool/v1/fuzz_test.go +++ b/test/fuzz/mempool/v1/fuzz_test.go @@ -1,7 +1,7 @@ package v1_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestMempoolTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) mempoolv1.Fuzz(input) }) diff --git a/test/fuzz/p2p/secretconnection/fuzz_test.go b/test/fuzz/p2p/secretconnection/fuzz_test.go index d48dc4267..1f3757aa0 100644 --- a/test/fuzz/p2p/secretconnection/fuzz_test.go +++ b/test/fuzz/p2p/secretconnection/fuzz_test.go @@ -1,7 +1,7 @@ package secretconnection_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestSecretConnectionTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) secretconnection.Fuzz(input) }) diff --git a/test/fuzz/p2p/secretconnection/init-corpus/main.go b/test/fuzz/p2p/secretconnection/init-corpus/main.go index 635f2d99f..3a2537ff7 100644 --- a/test/fuzz/p2p/secretconnection/init-corpus/main.go +++ b/test/fuzz/p2p/secretconnection/init-corpus/main.go @@ -4,7 +4,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -39,7 +38,7 @@ func initCorpus(baseDir string) { for i, datum := range data { filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i)) - if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil { + if err := os.WriteFile(filename, []byte(datum), 0644); err != nil { log.Fatalf("can't write %v to %q: %v", datum, filename, err) } diff --git a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go index 50b9194fe..41911e725 100644 --- a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go +++ b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go @@ -1,7 +1,7 @@ package server_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestServerTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) server.Fuzz(input) }) diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go index 08f7e2b6b..723c0e030 100644 --- a/test/fuzz/rpc/jsonrpc/server/handler.go +++ b/test/fuzz/rpc/jsonrpc/server/handler.go @@ -3,13 +3,13 @@ package server import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "github.com/tendermint/tendermint/libs/log" rs "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var rpcFuncMap = map[string]*rs.RPCFunc{ @@ -32,7 +32,7 @@ func Fuzz(data []byte) int { rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { panic(err) } diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index cf22bc836..85a589185 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -2,7 +2,6 @@ package internal import ( "fmt" - "io/ioutil" "os" "testing" "time" @@ -187,7 +186,7 @@ func cleanup(cfg TestHarnessConfig) { } func makeTempFile(name, content string) string { - tempFile, err := ioutil.TempFile("", fmt.Sprintf("%s-*", name)) + tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-*", name)) if err != nil { panic(err) } diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go index a6d1312a1..90afd7d1f 100644 --- a/tools/tm-signer-harness/main.go +++ b/tools/tm-signer-harness/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -149,7 +148,7 @@ func extractKey(tmhome, outputPath string) { os.Exit(1) } pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey)) - if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { + if err := os.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { logger.Info("Failed to write private key", "output", outputPath, "err", err) os.Exit(1) } diff --git a/types/genesis.go b/types/genesis.go index 47580d5f7..a4b3904ab 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -5,7 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/tendermint/tendermint/crypto" @@ -51,7 +51,7 @@ func (genDoc *GenesisDoc) SaveAs(file string) error { return err } - return ioutil.WriteFile(file, genDocBytes, 0644) // nolint:gosec + return os.WriteFile(file, genDocBytes, 0644) // nolint:gosec } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc @@ -125,7 +125,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { // GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { - jsonBlob, err := ioutil.ReadFile(genDocFile) + jsonBlob, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } diff --git a/types/genesis_test.go b/types/genesis_test.go index 7fb3088dd..422c2125f 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -1,7 +1,6 @@ package types import ( - "io/ioutil" "os" "testing" @@ -122,7 +121,7 @@ func TestGenesisGood(t *testing.T) { } func TestGenesisSaveAs(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "genesis") + tmpfile, err := os.CreateTemp("", "genesis") require.NoError(t, err) defer os.Remove(tmpfile.Name()) diff --git a/types/node_key.go b/types/node_key.go index 547fa1696..aecbd8a21 100644 --- a/types/node_key.go +++ b/types/node_key.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "os" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -33,7 +33,7 @@ func (nodeKey NodeKey) SaveAs(filePath string) error { if err != nil { return err } - return ioutil.WriteFile(filePath, jsonBytes, 0600) + return os.WriteFile(filePath, jsonBytes, 0600) } // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If @@ -67,7 +67,7 @@ func GenNodeKey() NodeKey { // LoadNodeKey loads NodeKey located in filePath. func LoadNodeKey(filePath string) (NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) + jsonBytes, err := os.ReadFile(filePath) if err != nil { return NodeKey{}, err } diff --git a/types/part_set_test.go b/types/part_set_test.go index c6ea0f452..2dfe12263 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/assert" @@ -57,7 +57,7 @@ func TestBasicPartSet(t *testing.T) { // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() - data2, err := ioutil.ReadAll(data2Reader) + data2, err := io.ReadAll(data2Reader) require.NoError(t, err) assert.Equal(t, data, data2) From 5cc980698a3402afce76b26693ab54b8f67f038b Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 29 Oct 2021 10:19:06 +0200 Subject: [PATCH 133/174] mempool: consoldate implementations (#7171) * mempool: consoldate implementations * update chagelog * fix test * Apply suggestions from code review Co-authored-by: M. J. Fromberger * cleanup locking comments * context twiddle * migrate away from deprecated ioutil APIs (#7175) Co-authored-by: Callum Waters Co-authored-by: M. J. Fromberger Co-authored-by: M. J. Fromberger Co-authored-by: Callum Waters Co-authored-by: M. J. Fromberger --- CHANGELOG_PENDING.md | 1 + config/config.go | 5 - config/toml.go | 5 - internal/consensus/byzantine_test.go | 10 +- internal/consensus/common_test.go | 12 +- internal/consensus/reactor_test.go | 10 +- internal/mempool/mempool.go | 954 +++++++++++++++--- .../mempool/{v1 => }/mempool_bench_test.go | 5 +- internal/mempool/{v1 => }/mempool_test.go | 19 +- internal/mempool/mock/mempool.go | 2 +- internal/mempool/{v1 => }/priority_queue.go | 2 +- .../mempool/{v1 => }/priority_queue_test.go | 2 +- internal/mempool/{v1 => }/reactor.go | 15 +- internal/mempool/{v1 => }/reactor_test.go | 25 +- internal/mempool/tx.go | 276 +++++ internal/mempool/{v1 => }/tx_test.go | 2 +- internal/mempool/types.go | 143 +++ internal/mempool/v0/bench_test.go | 107 -- internal/mempool/v0/cache_test.go | 83 -- internal/mempool/v0/clist_mempool.go | 698 ------------- internal/mempool/v0/clist_mempool_test.go | 691 ------------- internal/mempool/v0/doc.go | 23 - internal/mempool/v0/reactor.go | 392 ------- internal/mempool/v0/reactor_test.go | 393 -------- internal/mempool/v1/mempool.go | 887 ---------------- internal/mempool/v1/tx.go | 281 ------ node/node_test.go | 23 +- node/setup.go | 99 +- test/e2e/generator/generate.go | 8 - test/e2e/generator/generate_test.go | 3 - test/e2e/runner/setup.go | 4 - test/fuzz/mempool/{v0 => }/checktx.go | 22 +- test/fuzz/mempool/{v1 => }/fuzz_test.go | 6 +- .../mempool/{v0 => }/testdata/cases/empty | 0 test/fuzz/mempool/v0/fuzz_test.go | 33 - test/fuzz/mempool/v1/checktx.go | 37 - test/fuzz/mempool/v1/testdata/cases/empty | 0 37 files changed, 1373 insertions(+), 3905 deletions(-) rename internal/mempool/{v1 => }/mempool_bench_test.go (87%) rename internal/mempool/{v1 => }/mempool_test.go (96%) rename internal/mempool/{v1 => }/priority_queue.go (99%) rename internal/mempool/{v1 => }/priority_queue_test.go (99%) rename internal/mempool/{v1 => }/reactor.go (97%) rename internal/mempool/{v1 => }/reactor_test.go (93%) rename internal/mempool/{v1 => }/tx_test.go (99%) create mode 100644 internal/mempool/types.go delete mode 100644 internal/mempool/v0/bench_test.go delete mode 100644 internal/mempool/v0/cache_test.go delete mode 100644 internal/mempool/v0/clist_mempool.go delete mode 100644 internal/mempool/v0/clist_mempool_test.go delete mode 100644 internal/mempool/v0/doc.go delete mode 100644 internal/mempool/v0/reactor.go delete mode 100644 internal/mempool/v0/reactor_test.go delete mode 100644 internal/mempool/v1/mempool.go delete mode 100644 internal/mempool/v1/tx.go rename test/fuzz/mempool/{v0 => }/checktx.go (58%) rename test/fuzz/mempool/{v1 => }/fuzz_test.go (84%) rename test/fuzz/mempool/{v0 => }/testdata/cases/empty (100%) delete mode 100644 test/fuzz/mempool/v0/fuzz_test.go delete mode 100644 test/fuzz/mempool/v1/checktx.go delete mode 100644 test/fuzz/mempool/v1/testdata/cases/empty diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 523c8480b..4735dbcb9 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -14,6 +14,7 @@ Special thanks to external contributors on this release: - [rpc] Remove the deprecated gRPC interface to the RPC service. (@creachadair) - [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish) + - [mempool] \#7171 Remove legacy mempool implementation. (@tychoish) - Apps diff --git a/config/config.go b/config/config.go index e0a5c91ef..3ee62ed88 100644 --- a/config/config.go +++ b/config/config.go @@ -27,9 +27,6 @@ const ( ModeFull = "full" ModeValidator = "validator" ModeSeed = "seed" - - MempoolV0 = "v0" - MempoolV1 = "v1" ) // NOTE: Most of the structs & relevant comments + the @@ -693,7 +690,6 @@ func TestP2PConfig() *P2PConfig { // MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { - Version string `mapstructure:"version"` RootDir string `mapstructure:"home"` Recheck bool `mapstructure:"recheck"` Broadcast bool `mapstructure:"broadcast"` @@ -743,7 +739,6 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Version: MempoolV1, Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement diff --git a/config/toml.go b/config/toml.go index 832c62564..0baf4859f 100644 --- a/config/toml.go +++ b/config/toml.go @@ -327,11 +327,6 @@ recv-rate = {{ .P2P.RecvRate }} ####################################################### [mempool] -# Mempool version to use: -# 1) "v0" - The legacy non-prioritized mempool reactor. -# 2) "v1" (default) - The prioritized mempool reactor. -version = "{{ .Mempool.Version }}" - recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index ab8d12205..717934ad1 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -16,7 +16,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/evidence" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" @@ -69,8 +69,12 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index a44fccc98..5a50aad21 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -21,7 +21,7 @@ import ( "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" @@ -420,8 +420,14 @@ func newStateWithConfigAndBlockStore( proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) + if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index e17404960..13141e170 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/crypto/encoding" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" sm "github.com/tendermint/tendermint/internal/state" @@ -353,8 +352,13 @@ func TestReactorWithEvidence(t *testing.T) { proxyAppConnMem := abciclient.NewLocalClient(mtx, app) proxyAppConnCon := abciclient.NewLocalClient(mtx, app) - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) + if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 6e3955dc3..ec7ef2e15 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -1,143 +1,869 @@ package mempool import ( + "bytes" "context" + "errors" "fmt" - "math" + "reflect" + "sync/atomic" + "time" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/clist" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) -const ( - MempoolChannel = p2p.ChannelID(0x30) +var _ Mempool = (*TxMempool)(nil) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind - PeerCatchupSleepIntervalMS = 100 +// TxMempoolOption sets an optional parameter on the TxMempool. +type TxMempoolOption func(*TxMempool) - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) - UnknownPeerID uint16 = 0 +// TxMempool defines a prioritized mempool data structure used by the v1 mempool +// reactor. It keeps a thread-safe priority queue of transactions that is used +// when a block proposer constructs a block and a thread-safe linked-list that +// is used to gossip transactions to peers in a FIFO manner. +type TxMempool struct { + logger log.Logger + metrics *Metrics + config *config.MempoolConfig + proxyAppConn proxy.AppConnMempool - MaxActiveIDs = math.MaxUint16 -) + // txsAvailable fires once for each height when the mempool is not empty + txsAvailable chan struct{} + notifiedTxsAvailable bool -// Mempool defines the mempool interface. + // height defines the last block height process during Update() + height int64 + + // sizeBytes defines the total size of the mempool (sum of all tx bytes) + sizeBytes int64 + + // cache defines a fixed-size cache of already seen transactions as this + // reduces pressure on the proxyApp. + cache TxCache + + // txStore defines the main storage of valid transactions. Indexes are built + // on top of this store. + txStore *TxStore + + // gossipIndex defines the gossiping index of valid transactions via a + // thread-safe linked-list. We also use the gossip index as a cursor for + // rechecking transactions already in the mempool. + gossipIndex *clist.CList + + // recheckCursor and recheckEnd are used as cursors based on the gossip index + // to recheck transactions that are already in the mempool. Iteration is not + // thread-safe and transaction may be mutated in serial order. + // + // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for + // iterator and cursor management when rechecking transactions. If the gossip + // index changes or is removed in a future refactor, this will have to be + // refactored. Instead, we should consider just keeping a slice of a snapshot + // of the mempool's current transactions during Update and an integer cursor + // into that slice. This, however, requires additional O(n) space complexity. + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // priorityIndex defines the priority index of valid transactions via a + // thread-safe priority queue. + priorityIndex *TxPriorityQueue + + // heightIndex defines a height-based, in ascending order, transaction index. + // i.e. older transactions are first. + heightIndex *WrappedTxList + + // timestampIndex defines a timestamp-based, in ascending order, transaction + // index. i.e. older transactions are first. + timestampIndex *WrappedTxList + + // A read/write lock is used to safe guard updates, insertions and deletions + // from the mempool. A read-lock is implicitly acquired when executing CheckTx, + // however, a caller must explicitly grab a write-lock via Lock when updating + // the mempool via Update(). + mtx tmsync.RWMutex + preCheck PreCheckFunc + postCheck PostCheckFunc +} + +func NewTxMempool( + logger log.Logger, + cfg *config.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...TxMempoolOption, +) *TxMempool { + + txmp := &TxMempool{ + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + height: height, + cache: NopTxCache{}, + metrics: NopMetrics(), + txStore: NewTxStore(), + gossipIndex: clist.New(), + priorityIndex: NewTxPriorityQueue(), + heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.height >= wtx2.height + }), + timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) + }), + } + + if cfg.CacheSize > 0 { + txmp.cache = NewLRUTxCache(cfg.CacheSize) + } + + proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) + + for _, opt := range options { + opt(txmp) + } + + return txmp +} + +// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) +// returns an error. This is executed before CheckTx. It only applies to the +// first created block. After that, Update() overwrites the existing value. +func WithPreCheck(f PreCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a transaction if +// f(tx, resp) returns an error. This is executed after CheckTx. It only applies +// to the first created block. After that, Update overwrites the existing value. +func WithPostCheck(f PostCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.postCheck = f } +} + +// WithMetrics sets the mempool's metrics collector. +func WithMetrics(metrics *Metrics) TxMempoolOption { + return func(txmp *TxMempool) { txmp.metrics = metrics } +} + +// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly +// release the lock when finished. +func (txmp *TxMempool) Lock() { + txmp.mtx.Lock() +} + +// Unlock releases a write-lock on the mempool. +func (txmp *TxMempool) Unlock() { + txmp.mtx.Unlock() +} + +// Size returns the number of valid transactions in the mempool. It is +// thread-safe. +func (txmp *TxMempool) Size() int { + return txmp.txStore.Size() +} + +// SizeBytes return the total sum in bytes of all the valid transactions in the +// mempool. It is thread-safe. +func (txmp *TxMempool) SizeBytes() int64 { + return atomic.LoadInt64(&txmp.sizeBytes) +} + +// FlushAppConn executes FlushSync on the mempool's proxyAppConn. // -// Updates to the mempool need to be synchronized with committing a block so -// applications can reset their transient state on Commit. -type Mempool interface { - // CheckTx executes a new transaction against the application to determine - // its validity and whether it should be added to the mempool. - CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - - // RemoveTxByKey removes a transaction, identified by its key, - // from the mempool. - RemoveTxByKey(txKey types.TxKey) error - - // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes - // bytes total with the condition that the total gasWanted must be less than - // maxGas. - // - // If both maxes are negative, there is no cap on the size of all returned - // transactions (~ all available transactions). - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - - // ReapMaxTxs reaps up to max transactions from the mempool. If max is - // negative, there is no cap on the size of all returned transactions - // (~ all available transactions). - ReapMaxTxs(max int) types.Txs - - // Lock locks the mempool. The consensus must be able to hold lock to safely - // update. - Lock() - - // Unlock unlocks the mempool. - Unlock() - - // Update informs the mempool that the given txs were committed and can be - // discarded. - // - // NOTE: - // 1. This should be called *after* block is committed by consensus. - // 2. Lock/Unlock must be managed by the caller. - Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn PreCheckFunc, - newPostFn PostCheckFunc, - ) error - - // FlushAppConn flushes the mempool connection to ensure async callback calls - // are done, e.g. from CheckTx. - // - // NOTE: - // 1. Lock/Unlock must be managed by caller. - FlushAppConn() error - - // Flush removes all transactions from the mempool and caches. - Flush() - - // TxsAvailable returns a channel which fires once for every height, and only - // when transactions are available in the mempool. - // - // NOTE: - // 1. The returned channel may be nil if EnableTxsAvailable was not called. - TxsAvailable() <-chan struct{} - - // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will - // trigger once every height when transactions are available. - EnableTxsAvailable() - - // Size returns the number of transactions in the mempool. - Size() int - - // SizeBytes returns the total size of all txs in the mempool. - SizeBytes() int64 +// NOTE: The caller must obtain a write-lock prior to execution. +func (txmp *TxMempool) FlushAppConn() error { + return txmp.proxyAppConn.FlushSync(context.Background()) } -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. -type PreCheckFunc func(types.Tx) error +// WaitForNextTx returns a blocking channel that will be closed when the next +// valid transaction is available to gossip. It is thread-safe. +func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { + return txmp.gossipIndex.WaitChan() +} -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. -type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +// NextGossipTx returns the next valid transaction to gossip. A caller must wait +// for WaitForNextTx to signal a transaction is available to gossip first. It is +// thread-safe. +func (txmp *TxMempool) NextGossipTx() *clist.CElement { + return txmp.gossipIndex.Front() +} -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal -// to the expected maxBytes. -func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { - return func(tx types.Tx) error { - txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) +// EnableTxsAvailable enables the mempool to trigger events when transactions +// are available on a block by block basis. +func (txmp *TxMempool) EnableTxsAvailable() { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() - if txSize > maxBytes { - return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + txmp.txsAvailable = make(chan struct{}, 1) +} + +// TxsAvailable returns a channel which fires once for every height, and only +// when transactions are available in the mempool. It is thread-safe. +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { + return txmp.txsAvailable +} + +// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires +// a read-lock attempts to execute the application's CheckTx ABCI method via +// CheckTxAsync. We return an error if any of the following happen: +// +// - The CheckTxAsync execution fails. +// - The transaction already exists in the cache and we've already received the +// transaction from the peer. Otherwise, if it solely exists in the cache, we +// return nil. +// - The transaction size exceeds the maximum transaction size as defined by the +// configuration provided to the mempool. +// - The transaction fails Pre-Check (if it is defined). +// - The proxyAppConn fails, e.g. the buffer is full. +// +// If the mempool is full, we still execute CheckTx and attempt to find a lower +// priority transaction to evict. If such a transaction exists, we remove the +// lower priority transaction and add the new one with higher priority. +// +// NOTE: +// - The applications' CheckTx implementation may panic. +// - The caller is not to explicitly require any locks for executing CheckTx. +func (txmp *TxMempool) CheckTx( + ctx context.Context, + tx types.Tx, + cb func(*abci.Response), + txInfo TxInfo, +) error { + if ctx == nil { + ctx = context.TODO() + } + + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + if txSize := len(tx); txSize > txmp.config.MaxTxBytes { + return types.ErrTxTooLarge{ + Max: txmp.config.MaxTxBytes, + Actual: txSize, + } + } + + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return types.ErrPreCheck{Reason: err} + } + } + + if err := txmp.proxyAppConn.Error(); err != nil { + return err + } + + txHash := tx.Key() + + // We add the transaction to the mempool's cache and if the transaction already + // exists, i.e. false is returned, then we check if we've seen this transaction + // from the same sender and error if we have. Otherwise, we return nil. + if !txmp.cache.Push(tx) { + wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) + if wtx != nil && ok { + // We already have the transaction stored and the we've already seen this + // transaction from txInfo.SenderID. + return types.ErrTxInCache } + txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) return nil } + + reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) + if err != nil { + txmp.cache.Remove(tx) + return err + } + + reqRes.SetCallback(func(res *abci.Response) { + if txmp.recheckCursor != nil { + panic("recheck cursor is non-nil in CheckTx callback") + } + + wtx := &WrappedTx{ + tx: tx, + hash: txHash, + timestamp: time.Now().UTC(), + height: txmp.height, + } + txmp.initTxCallback(wtx, res, txInfo) + + if cb != nil { + cb(res) + } + }) + + return nil +} + +func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + txmp.Lock() + defer txmp.Unlock() + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { + txmp.removeTx(wtx, false) + return nil + } + + return errors.New("transaction not found") +} + +// Flush empties the mempool. It acquires a read-lock, fetches all the +// transactions currently in the transaction store and removes each transaction +// from the store and all indexes and finally resets the cache. +// +// NOTE: +// - Flushing the mempool may leave the mempool in an inconsistent state. +func (txmp *TxMempool) Flush() { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + txmp.heightIndex.Reset() + txmp.timestampIndex.Reset() + + for _, wtx := range txmp.txStore.GetAllTxs() { + txmp.removeTx(wtx, false) + } + + atomic.SwapInt64(&txmp.sizeBytes, 0) + txmp.cache.Reset() +} + +// ReapMaxBytesMaxGas returns a list of transactions within the provided size +// and gas constraints. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + var ( + totalGas int64 + totalSize int64 + ) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) + defer func() { + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + }() + + txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) + for txmp.priorityIndex.NumTxs() > 0 { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) + + // Ensure we have capacity for the transaction with respect to the + // transaction size. + if maxBytes > -1 && totalSize+size > maxBytes { + return txs[:len(txs)-1] + } + + totalSize += size + + // ensure we have capacity for the transaction with respect to total gas + gas := totalGas + wtx.gasWanted + if maxGas > -1 && gas > maxGas { + return txs[:len(txs)-1] + } + + totalGas = gas + } + + return txs +} + +// ReapMaxTxs returns a list of transactions within the provided number of +// transactions bound. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + numTxs := txmp.priorityIndex.NumTxs() + if max < 0 { + max = numTxs + } + + cap := tmmath.MinInt(numTxs, max) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, cap) + txs := make([]types.Tx, 0, cap) + for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + } + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + return txs +} + +// Update iterates over all the transactions provided by the block producer, +// removes them from the cache (if applicable), and removes +// the transactions from the main transaction store and associated indexes. +// If there are transactions remaining in the mempool, we initiate a +// re-CheckTx for them (if applicable), otherwise, we notify the caller more +// transactions are available. +// +// NOTE: +// - The caller must explicitly acquire a write-lock. +func (txmp *TxMempool) Update( + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, +) error { + + txmp.height = blockHeight + txmp.notifiedTxsAvailable = false + + if newPreFn != nil { + txmp.preCheck = newPreFn + } + if newPostFn != nil { + txmp.postCheck = newPostFn + } + + for i, tx := range blockTxs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // add the valid committed transaction to the cache (if missing) + _ = txmp.cache.Push(tx) + } else if !txmp.config.KeepInvalidTxsInCache { + // allow invalid transactions to be re-submitted + txmp.cache.Remove(tx) + } + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { + txmp.removeTx(wtx, false) + } + } + + txmp.purgeExpiredTxs(blockHeight) + + // If there any uncommitted transactions left in the mempool, we either + // initiate re-CheckTx per remaining transaction or notify that remaining + // transactions are left. + if txmp.Size() > 0 { + if txmp.config.Recheck { + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", blockHeight, + ) + txmp.updateReCheckTxs() + } else { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) + return nil +} + +// initTxCallback is the callback invoked for a new unique transaction after CheckTx +// has been executed by the ABCI application for the first time on that transaction. +// CheckTx can be called again for the same transaction later when re-checking; +// however, this callback will not be called. +// +// initTxCallback runs after the ABCI application executes CheckTx. +// It runs the postCheck hook if one is defined on the mempool. +// If the CheckTx response response code is not OK, or if the postCheck hook +// reports an error, the transaction is rejected. Otherwise, we attempt to insert +// the transaction into the mempool. +// +// When inserting a transaction, we first check if there is sufficient capacity. +// If there is, the transaction is added to the txStore and all indexes. +// Otherwise, if the mempool is full, we attempt to find a lower priority transaction +// to evict in place of the new incoming transaction. If no such transaction exists, +// the new incoming transaction is rejected. +// +// NOTE: +// - An explicit lock is NOT required. +func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo TxInfo) { + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if !ok { + return + } + + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) + } + + if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { + // ignore bad transactions + txmp.logger.Info( + "rejected bad transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "peer_id", txInfo.SenderNodeID, + "code", checkTxRes.CheckTx.Code, + "post_check_err", err, + ) + + txmp.metrics.FailedTxs.Add(1) + + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) + } + if err != nil { + checkTxRes.CheckTx.MempoolError = err.Error() + } + return + } + + sender := checkTxRes.CheckTx.Sender + priority := checkTxRes.CheckTx.Priority + + if len(sender) > 0 { + if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { + txmp.logger.Error( + "rejected incoming good transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "sender", sender, + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + } + + if err := txmp.canAddTx(wtx); err != nil { + evictTxs := txmp.priorityIndex.GetEvictableTxs( + priority, + int64(wtx.Size()), + txmp.SizeBytes(), + txmp.config.MaxTxsBytes, + ) + if len(evictTxs) == 0 { + // No room for the new incoming transaction so we just remove it from + // the cache. + txmp.cache.Remove(wtx.tx) + txmp.logger.Error( + "rejected incoming good transaction; mempool full", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err.Error(), + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + + // evict an existing transaction(s) + // + // NOTE: + // - The transaction, toEvict, can be removed while a concurrent + // reCheckTx callback is being executed for the same transaction. + for _, toEvict := range evictTxs { + txmp.removeTx(toEvict, true) + txmp.logger.Debug( + "evicted existing good transaction; mempool full", + "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), + "old_priority", toEvict.priority, + "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_priority", wtx.priority, + ) + txmp.metrics.EvictedTxs.Add(1) + } + } + + wtx.gasWanted = checkTxRes.CheckTx.GasWanted + wtx.priority = priority + wtx.sender = sender + wtx.peers = map[uint16]struct{}{ + txInfo.SenderID: {}, + } + + txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) + txmp.metrics.Size.Set(float64(txmp.Size())) + + txmp.insertTx(wtx) + txmp.logger.Debug( + "inserted good transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "height", txmp.height, + "num_txs", txmp.Size(), + ) + txmp.notifyTxsAvailable() +} + +// defaultTxCallback is the CheckTx application callback used when a transaction +// is being re-checked (if re-checking is enabled). The caller must hold a mempool +// write-lock (via Lock()) and when executing Update(), if the mempool is non-empty +// and Recheck is enabled, then all remaining transactions will be rechecked via +// CheckTxAsync. The order transactions are rechecked must be the same as the +// order in which this callback is called. +func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { + if txmp.recheckCursor == nil { + return + } + + txmp.metrics.RecheckTimes.Add(1) + + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if !ok { + txmp.logger.Error("received incorrect type in mempool callback", + "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), + "got", reflect.TypeOf(res.Value).Name(), + ) + return + } + tx := req.GetCheckTx().Tx + wtx := txmp.recheckCursor.Value.(*WrappedTx) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, wtx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + txmp.logger.Error( + "re-CheckTx transaction mismatch", + "got", wtx.tx.Hash(), + "expected", types.Tx(tx).Key(), + ) + + if txmp.recheckCursor == txmp.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + txmp.recheckCursor = nil + return + } + + txmp.recheckCursor = txmp.recheckCursor.Next() + wtx = txmp.recheckCursor.Value.(*WrappedTx) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes.CheckTx) + } + + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + wtx.priority = checkTxRes.CheckTx.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.CheckTx.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") + } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) + } + } + + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() + } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) +} + +// updateReCheckTxs updates the recheck cursors using the gossipIndex. For +// each transaction, it executes CheckTxAsync. The global callback defined on +// the proxyAppConn will be executed for each transaction after CheckTx is +// executed. +// +// NOTE: +// - The caller must have a write-lock when executing updateReCheckTxs. +func (txmp *TxMempool) updateReCheckTxs() { + if txmp.Size() == 0 { + panic("attempted to update re-CheckTx txs when mempool is empty") + } + + txmp.recheckCursor = txmp.gossipIndex.Front() + txmp.recheckEnd = txmp.gossipIndex.Back() + ctx := context.Background() + + for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { + wtx := e.Value.(*WrappedTx) + + // Only execute CheckTx if the transaction is not marked as removed which + // could happen if the transaction was evicted. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + // no need in retrying since the tx will be rechecked after the next block + txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) + } + } + } + + if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during rechecking", "err", err) + } } -// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed -// maxGas. Returns nil if maxGas is -1. -func PostCheckMaxGas(maxGas int64) PostCheckFunc { - return func(tx types.Tx, res *abci.ResponseCheckTx) error { - if maxGas == -1 { - return nil - } - if res.GasWanted < 0 { - return fmt.Errorf("gas wanted %d is negative", - res.GasWanted) - } - if res.GasWanted > maxGas { - return fmt.Errorf("gas wanted %d is greater than max gas %d", - res.GasWanted, maxGas) - } +// canAddTx returns an error if we cannot insert the provided *WrappedTx into +// the mempool due to mempool configured constraints. If it returns nil, +// the transaction can be inserted into the mempool. +func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { + var ( + numTxs = txmp.Size() + sizeBytes = txmp.SizeBytes() + ) - return nil + if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + return types.ErrMempoolIsFull{ + NumTxs: numTxs, + MaxTxs: txmp.config.Size, + TxsBytes: sizeBytes, + MaxTxsBytes: txmp.config.MaxTxsBytes, + } + } + + return nil +} + +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + txmp.txStore.SetTx(wtx) + txmp.priorityIndex.PushTx(wtx) + txmp.heightIndex.Insert(wtx) + txmp.timestampIndex.Insert(wtx) + + // Insert the transaction into the gossip index and mark the reference to the + // linked-list element, which will be needed at a later point when the + // transaction is removed. + gossipEl := txmp.gossipIndex.PushBack(wtx) + wtx.gossipEl = gossipEl + + atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) +} + +func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { + if txmp.txStore.IsTxRemoved(wtx.hash) { + return + } + + txmp.txStore.RemoveTx(wtx) + txmp.priorityIndex.RemoveTx(wtx) + txmp.heightIndex.Remove(wtx) + txmp.timestampIndex.Remove(wtx) + + // Remove the transaction from the gossip index and cleanup the linked-list + // element so it can be garbage collected. + txmp.gossipIndex.Remove(wtx.gossipEl) + wtx.gossipEl.DetachPrev() + + atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) + + if removeFromCache { + txmp.cache.Remove(wtx.tx) + } +} + +// purgeExpiredTxs removes all transactions that have exceeded their respective +// height- and/or time-based TTLs from their respective indexes. Every expired +// transaction will be removed from the mempool, but preserved in the cache. +// +// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which +// the caller has a write-lock on the mempool and so we can safely iterate over +// the height and time based indexes. +func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { + now := time.Now() + expiredTxs := make(map[types.TxKey]*WrappedTx) + + if txmp.config.TTLNumBlocks > 0 { + purgeIdx := -1 + for i, wtx := range txmp.heightIndex.txs { + if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] + } + } + + if txmp.config.TTLDuration > 0 { + purgeIdx := -1 + for i, wtx := range txmp.timestampIndex.txs { + if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] + } + } + + for _, wtx := range expiredTxs { + txmp.removeTx(wtx, false) + } +} + +func (txmp *TxMempool) notifyTxsAvailable() { + if txmp.Size() == 0 { + panic("attempt to notify txs available but mempool is empty!") + } + + if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { + // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true + + select { + case txmp.txsAvailable <- struct{}{}: + default: + } } } diff --git a/internal/mempool/v1/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go similarity index 87% rename from internal/mempool/v1/mempool_bench_test.go rename to internal/mempool/mempool_bench_test.go index ca23f1479..8a7938fd5 100644 --- a/internal/mempool/v1/mempool_bench_test.go +++ b/internal/mempool/mempool_bench_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -8,7 +8,6 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" ) func BenchmarkTxMempool_CheckTx(b *testing.B) { @@ -27,6 +26,6 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) { tx := []byte(fmt.Sprintf("%X=%d", prefix, priority)) b.StartTimer() - require.NoError(b, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + require.NoError(b, txmp.CheckTx(context.Background(), tx, nil, TxInfo{})) } } diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/mempool_test.go similarity index 96% rename from internal/mempool/v1/mempool_test.go rename to internal/mempool/mempool_test.go index 89c1a6685..23c8c378e 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "bytes" @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -97,7 +96,7 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { txs := make([]testTx, numTxs) - txInfo := mempool.TxInfo{SenderID: peerID} + txInfo := TxInfo{SenderID: peerID} rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -338,13 +337,13 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { _, err := rng.Read(tx) require.NoError(t, err) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.Error(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: 0})) tx = make([]byte, txmp.config.MaxTxBytes-1) _, err = rng.Read(tx) require.NoError(t, err) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: 0})) } func TestTxMempool_CheckTxSamePeer(t *testing.T) { @@ -358,8 +357,8 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) { tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: peerID})) + require.Error(t, txmp.CheckTx(context.Background(), tx, nil, TxInfo{SenderID: peerID})) } func TestTxMempool_CheckTxSameSender(t *testing.T) { @@ -378,9 +377,9 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50)) tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) - require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) } @@ -533,7 +532,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { } require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError) } - require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, TxInfo{SenderID: 0})) }) } } diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 8e6f0c7bf..8344220a0 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -12,7 +12,7 @@ import ( // Mempool is an empty implementation of a Mempool, useful for testing. type Mempool struct{} -var _ mempool.Mempool = Mempool{} +var _ Mempool = Mempool{} func (Mempool) Lock() {} func (Mempool) Unlock() {} diff --git a/internal/mempool/v1/priority_queue.go b/internal/mempool/priority_queue.go similarity index 99% rename from internal/mempool/v1/priority_queue.go rename to internal/mempool/priority_queue.go index df74a92d3..f59715abb 100644 --- a/internal/mempool/v1/priority_queue.go +++ b/internal/mempool/priority_queue.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "container/heap" diff --git a/internal/mempool/v1/priority_queue_test.go b/internal/mempool/priority_queue_test.go similarity index 99% rename from internal/mempool/v1/priority_queue_test.go rename to internal/mempool/priority_queue_test.go index c0048f388..ddc84806d 100644 --- a/internal/mempool/v1/priority_queue_test.go +++ b/internal/mempool/priority_queue_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "math/rand" diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/reactor.go similarity index 97% rename from internal/mempool/v1/reactor.go rename to internal/mempool/reactor.go index 8ef5a6bd8..6d86b3712 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/reactor.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -11,7 +11,6 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -39,7 +38,7 @@ type Reactor struct { cfg *config.MempoolConfig mempool *TxMempool - ids *mempool.IDs + ids *IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. @@ -76,7 +75,7 @@ func NewReactor( cfg: cfg, peerMgr: peerMgr, mempool: txmp, - ids: mempool.NewMempoolIDs(), + ids: NewMempoolIDs(), mempoolCh: mempoolCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), @@ -101,7 +100,7 @@ func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { } return &p2p.ChannelDescriptor{ - ID: mempool.MempoolChannel, + ID: MempoolChannel, MessageType: new(protomem.Message), Priority: 5, RecvMessageCapacity: batchMsg.Size(), @@ -161,7 +160,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { return errors.New("empty txs received from peer") } - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} + txInfo := TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} if len(envelope.From) != 0 { txInfo.SenderNodeID = envelope.From } @@ -198,7 +197,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err r.Logger.Debug("received message", "peer", envelope.From) switch chID { - case mempool.MempoolChannel: + case MempoolChannel: err = r.handleMempoolMessage(envelope) default: @@ -358,7 +357,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) height := r.peerMgr.GetHeight(peerID) if height > 0 && height < memTx.height-1 { // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) + time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) continue } } diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/reactor_test.go similarity index 93% rename from internal/mempool/v1/reactor_test.go rename to internal/mempool/reactor_test.go index 317f7ce73..68753cc69 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -13,7 +13,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" @@ -199,7 +198,7 @@ func TestReactorBroadcastTxs(t *testing.T) { primary := rts.nodes[0] secondaries := rts.nodes[1:] - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) // run the router rts.start(t) @@ -231,7 +230,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update the whole mempool - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() @@ -250,7 +249,7 @@ func TestReactorConcurrency(t *testing.T) { // 1. submit a bunch of txs // 2. update none - _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) + _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, UnknownPeerID) go func() { defer wg.Done() @@ -309,8 +308,8 @@ func TestReactor_MaxTxBytes(t *testing.T) { context.Background(), tx1, nil, - mempool.TxInfo{ - SenderID: mempool.UnknownPeerID, + TxInfo{ + SenderID: UnknownPeerID, }, ) require.NoError(t, err) @@ -322,7 +321,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // broadcast a tx, which is beyond the max size and ensure it's not sent tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) + err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, TxInfo{SenderID: UnknownPeerID}) require.Error(t, err) rts.assertMempoolChannelsDrained(t) @@ -331,7 +330,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { func TestDontExhaustMaxActiveIDs(t *testing.T) { // we're creating a single node network, but not starting the // network. - rts := setupReactors(t, 1, mempool.MaxActiveIDs+1) + rts := setupReactors(t, 1, MaxActiveIDs+1) nodeID := rts.nodes[0] @@ -339,7 +338,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { require.NoError(t, err) // ensure the reactor does not panic (i.e. exhaust active IDs) - for i := 0; i < mempool.MaxActiveIDs+1; i++ { + for i := 0; i < MaxActiveIDs+1; i++ { rts.peerChans[nodeID] <- p2p.PeerUpdate{ Status: p2p.PeerStatusUp, NodeID: peerID, @@ -377,12 +376,12 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { } // 0 is already reserved for UnknownPeerID - ids := mempool.NewMempoolIDs() + ids := NewMempoolIDs() peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) - for i := 0; i < mempool.MaxActiveIDs-1; i++ { + for i := 0; i < MaxActiveIDs-1; i++ { ids.ReserveForPeer(peerID) } @@ -410,7 +409,7 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { } time.Sleep(500 * time.Millisecond) - txs := checkTxs(t, rts.reactors[primary].mempool, 4, mempool.UnknownPeerID) + txs := checkTxs(t, rts.reactors[primary].mempool, 4, UnknownPeerID) require.Equal(t, 4, len(txs)) require.Equal(t, 4, rts.mempools[primary].Size()) require.Equal(t, 0, rts.mempools[secondary].Size()) diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index adafdf85e..af48c9ccc 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,6 +1,11 @@ package mempool import ( + "sort" + "time" + + "github.com/tendermint/tendermint/internal/libs/clist" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -15,3 +20,274 @@ type TxInfo struct { // SenderNodeID is the actual types.NodeID of the sender. SenderNodeID types.NodeID } + +// WrappedTx defines a wrapper around a raw transaction with additional metadata +// that is used for indexing. +type WrappedTx struct { + // tx represents the raw binary transaction data + tx types.Tx + + // hash defines the transaction hash and the primary key used in the mempool + hash types.TxKey + + // height defines the height at which the transaction was validated at + height int64 + + // gasWanted defines the amount of gas the transaction sender requires + gasWanted int64 + + // priority defines the transaction's priority as specified by the application + // in the ResponseCheckTx response. + priority int64 + + // sender defines the transaction's sender as specified by the application in + // the ResponseCheckTx response. + sender string + + // timestamp is the time at which the node first received the transaction from + // a peer. It is used as a second dimension is prioritizing transactions when + // two transactions have the same priority. + timestamp time.Time + + // peers records a mapping of all peers that sent a given transaction + peers map[uint16]struct{} + + // heapIndex defines the index of the item in the heap + heapIndex int + + // gossipEl references the linked-list element in the gossip index + gossipEl *clist.CElement + + // removed marks the transaction as removed from the mempool. This is set + // during RemoveTx and is needed due to the fact that a given existing + // transaction in the mempool can be evicted when it is simultaneously having + // a reCheckTx callback executed. + removed bool +} + +func (wtx *WrappedTx) Size() int { + return len(wtx.tx) +} + +// TxStore implements a thread-safe mapping of valid transaction(s). +// +// NOTE: +// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative +// access is not allowed. Regardless, it is not expected for the mempool to +// need mutative access. +type TxStore struct { + mtx tmsync.RWMutex + hashTxs map[types.TxKey]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application +} + +func NewTxStore() *TxStore { + return &TxStore{ + senderTxs: make(map[string]*WrappedTx), + hashTxs: make(map[types.TxKey]*WrappedTx), + } +} + +// Size returns the total number of transactions in the store. +func (txs *TxStore) Size() int { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return len(txs.hashTxs) +} + +// GetAllTxs returns all the transactions currently in the store. +func (txs *TxStore) GetAllTxs() []*WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wTxs := make([]*WrappedTx, len(txs.hashTxs)) + i := 0 + for _, wtx := range txs.hashTxs { + wTxs[i] = wtx + i++ + } + + return wTxs +} + +// GetTxBySender returns a *WrappedTx by the transaction's sender property +// defined by the ABCI application. +func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.senderTxs[sender] +} + +// GetTxByHash returns a *WrappedTx by the transaction's hash. +func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.hashTxs[hash] +} + +// IsTxRemoved returns true if a transaction by hash is marked as removed and +// false otherwise. +func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx, ok := txs.hashTxs[hash] + if ok { + return wtx.removed + } + + return false +} + +// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a +// non-empty sender, we additionally store the transaction by the sender as +// defined by the ABCI application. +func (txs *TxStore) SetTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + txs.senderTxs[wtx.sender] = wtx + } + + txs.hashTxs[wtx.tx.Key()] = wtx +} + +// RemoveTx removes a *WrappedTx from the transaction store. It deletes all +// indexes of the transaction. +func (txs *TxStore) RemoveTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + delete(txs.senderTxs, wtx.sender) + } + + delete(txs.hashTxs, wtx.tx.Key()) + wtx.removed = true +} + +// TxHasPeer returns true if a transaction by hash has a given peer ID and false +// otherwise. If the transaction does not exist, false is returned. +func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return false + } + + _, ok := wtx.peers[peerID] + return ok +} + +// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the +// given peerID to the WrappedTx's set of peers that sent us this transaction. +// We return true if we've already recorded the given peer for this transaction +// and false otherwise. If the transaction does not exist by hash, we return +// (nil, false). +func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return nil, false + } + + if wtx.peers == nil { + wtx.peers = make(map[uint16]struct{}) + } + + if _, ok := wtx.peers[peerID]; ok { + return wtx, true + } + + wtx.peers[peerID] = struct{}{} + return wtx, false +} + +// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be +// used to build generic transaction indexes in the mempool. It accepts a +// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx +// references which is used during Insert in order to determine sorted order. If +// less returns true, a <= b. +type WrappedTxList struct { + mtx tmsync.RWMutex + txs []*WrappedTx + less func(*WrappedTx, *WrappedTx) bool +} + +func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { + return &WrappedTxList{ + txs: make([]*WrappedTx, 0), + less: less, + } +} + +// Size returns the number of WrappedTx objects in the list. +func (wtl *WrappedTxList) Size() int { + wtl.mtx.RLock() + defer wtl.mtx.RUnlock() + + return len(wtl.txs) +} + +// Reset resets the list of transactions to an empty list. +func (wtl *WrappedTxList) Reset() { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + wtl.txs = make([]*WrappedTx, 0) +} + +// Insert inserts a WrappedTx reference into the sorted list based on the list's +// comparator function. +func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + if i == len(wtl.txs) { + // insert at the end + wtl.txs = append(wtl.txs, wtx) + return + } + + // Make space for the inserted element by shifting values at the insertion + // index up one index. + // + // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). + wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) + wtl.txs[i] = wtx +} + +// Remove attempts to remove a WrappedTx from the sorted list. +func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + // Since the list is sorted, we evaluate all elements starting at i. Note, if + // the element does not exist, we may potentially evaluate the entire remainder + // of the list. However, a caller should not be expected to call Remove with a + // non-existing element. + for i < len(wtl.txs) { + if wtl.txs[i] == wtx { + wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) + return + } + + i++ + } +} diff --git a/internal/mempool/v1/tx_test.go b/internal/mempool/tx_test.go similarity index 99% rename from internal/mempool/v1/tx_test.go rename to internal/mempool/tx_test.go index fb4beafab..b68246076 100644 --- a/internal/mempool/v1/tx_test.go +++ b/internal/mempool/tx_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "fmt" diff --git a/internal/mempool/types.go b/internal/mempool/types.go new file mode 100644 index 000000000..6e3955dc3 --- /dev/null +++ b/internal/mempool/types.go @@ -0,0 +1,143 @@ +package mempool + +import ( + "context" + "fmt" + "math" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + MempoolChannel = p2p.ChannelID(0x30) + + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 + + MaxActiveIDs = math.MaxUint16 +) + +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// applications can reset their transient state on Commit. +type Mempool interface { + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. + CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error + + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. + RemoveTxByKey(txKey types.TxKey) error + + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + + // ReapMaxTxs reaps up to max transactions from the mempool. If max is + // negative, there is no cap on the size of all returned transactions + // (~ all available transactions). + ReapMaxTxs(max int) types.Txs + + // Lock locks the mempool. The consensus must be able to hold lock to safely + // update. + Lock() + + // Unlock unlocks the mempool. + Unlock() + + // Update informs the mempool that the given txs were committed and can be + // discarded. + // + // NOTE: + // 1. This should be called *after* block is committed by consensus. + // 2. Lock/Unlock must be managed by the caller. + Update( + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, + ) error + + // FlushAppConn flushes the mempool connection to ensure async callback calls + // are done, e.g. from CheckTx. + // + // NOTE: + // 1. Lock/Unlock must be managed by caller. + FlushAppConn() error + + // Flush removes all transactions from the mempool and caches. + Flush() + + // TxsAvailable returns a channel which fires once for every height, and only + // when transactions are available in the mempool. + // + // NOTE: + // 1. The returned channel may be nil if EnableTxsAvailable was not called. + TxsAvailable() <-chan struct{} + + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. + EnableTxsAvailable() + + // Size returns the number of transactions in the mempool. + Size() int + + // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 +} + +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. +type PreCheckFunc func(types.Tx) error + +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. +type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error + +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal +// to the expected maxBytes. +func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { + return func(tx types.Tx) error { + txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + + if txSize > maxBytes { + return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + } + + return nil + } +} + +// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed +// maxGas. Returns nil if maxGas is -1. +func PostCheckMaxGas(maxGas int64) PostCheckFunc { + return func(tx types.Tx, res *abci.ResponseCheckTx) error { + if maxGas == -1 { + return nil + } + if res.GasWanted < 0 { + return fmt.Errorf("gas wanted %d is negative", + res.GasWanted) + } + if res.GasWanted > maxGas { + return fmt.Errorf("gas wanted %d is greater than max gas %d", + res.GasWanted, maxGas) + } + + return nil + } +} diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go deleted file mode 100644 index 35558f857..000000000 --- a/internal/mempool/v0/bench_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package v0 - -import ( - "context" - "encoding/binary" - "sync/atomic" - "testing" - - "github.com/stretchr/testify/require" - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/internal/mempool" -) - -func BenchmarkReap(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 100000 - - size := 10000 - for i := 0; i < size; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - mp.ReapMaxBytesMaxGas(100000000, 10000000) - } -} - -func BenchmarkCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 1000000 - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - b.StartTimer() - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkParallelCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 100000000 - - var txcnt uint64 - next := func() uint64 { - return atomic.AddUint64(&txcnt, 1) - 1 - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, next()) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - }) -} - -func BenchmarkCheckDuplicateTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 1000000 - - for i := 0; i < b.N; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err == nil { - b.Fatal("tx should be duplicate") - } - } -} diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go deleted file mode 100644 index 953e82b8d..000000000 --- a/internal/mempool/v0/cache_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package v0 - -import ( - "context" - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" -) - -func TestCacheAfterUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // reAddIndices & txsInCache can have elements > numTxsToCreate - // also assumes max index is 255 for convenience - // txs in cache also checks order of elements - tests := []struct { - numTxsToCreate int - updateIndices []int - reAddIndices []int - txsInCache []int - }{ - {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works - {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache - {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache - {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe - } - for tcIndex, tc := range tests { - for i := 0; i < tc.numTxsToCreate; i++ { - tx := types.Tx{byte(i)} - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - updateTxs := []types.Tx{} - for _, v := range tc.updateIndices { - tx := types.Tx{byte(v)} - updateTxs = append(updateTxs, tx) - } - err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - - for _, v := range tc.reAddIndices { - tx := types.Tx{byte(v)} - _ = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - } - - cache := mp.cache.(*mempool.LRUTxCache) - node := cache.GetList().Front() - counter := 0 - for node != nil { - require.NotEqual(t, len(tc.txsInCache), counter, - "cache larger than expected on testcase %d", tcIndex) - - nodeVal := node.Value.(types.TxKey) - expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) - // Reference for reading the errors: - // >>> sha256('\x00').hexdigest() - // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' - // >>> sha256('\x01').hexdigest() - // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' - // >>> sha256('\x02').hexdigest() - // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - - require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) - counter++ - node = node.Next() - } - require.Equal(t, len(tc.txsInCache), counter, - "cache smaller than expected on testcase %d", tcIndex) - mp.Flush() - } -} diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go deleted file mode 100644 index 0a12c7000..000000000 --- a/internal/mempool/v0/clist_mempool.go +++ /dev/null @@ -1,698 +0,0 @@ -package v0 - -import ( - "bytes" - "context" - "errors" - "sync" - "sync/atomic" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" -) - -// CListMempool is an ordered in-memory pool for transactions before they are -// proposed in a consensus round. Transaction validity is checked using the -// CheckTx abci message before the transaction is added to the pool. The -// mempool uses a concurrent list structure for storing transactions that can -// be efficiently accessed by multiple concurrent readers. -type CListMempool struct { - // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes - - // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool - txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - config *config.MempoolConfig - - // Exclusive mutex for Update method to prevent concurrent execution of - // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. - updateMtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc - - txs *clist.CList // concurrent linked-list of good txs - proxyAppConn proxy.AppConnMempool - - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated in - // serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // Map for quick access to txs to record sender in CheckTx. - // txsMap: txKey -> CElement - txsMap sync.Map - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache mempool.TxCache - - logger log.Logger - metrics *mempool.Metrics -} - -var _ mempool.Mempool = &CListMempool{} - -// CListMempoolOption sets an optional parameter on the mempool. -type CListMempoolOption func(*CListMempool) - -// NewCListMempool returns a new mempool with the given configuration and -// connection to an application. -func NewCListMempool( - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...CListMempoolOption, -) *CListMempool { - - mp := &CListMempool{ - config: cfg, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: mempool.NopMetrics(), - } - - if cfg.CacheSize > 0 { - mp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } else { - mp.cache = mempool.NopTxCache{} - } - - proxyAppConn.SetResponseCallback(mp.globalCb) - - for _, option := range options { - option(mp) - } - - return mp -} - -// NOTE: not thread safe - should only be called once, on startup -func (mem *CListMempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} - -// SetLogger sets the Logger. -func (mem *CListMempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.postCheck = f } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { - return func(mem *CListMempool) { mem.metrics = metrics } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Lock() { - mem.updateMtx.Lock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Unlock() { - mem.updateMtx.Unlock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Size() int { - return mem.txs.Len() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) SizeBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync(context.Background()) -} - -// XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. -func (mem *CListMempool) Flush() { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - _ = atomic.SwapInt64(&mem.txsBytes, 0) - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap.Range(func(key, _ interface{}) bool { - mem.txsMap.Delete(key) - return true - }) -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -// FIXME: leaking implementation details! -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - mem.updateMtx.RLock() - // use defer to unlock mutex because application (*local client*) might panic - defer mem.updateMtx.RUnlock() - - txSize := len(tx) - - if err := mem.isFull(txSize); err != nil { - return err - } - - if txSize > mem.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: mem.config.MaxTxBytes, - Actual: txSize, - } - } - - if mem.preCheck != nil { - if err := mem.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - // NOTE: proxyAppConn may error if tx buffer is full - if err := mem.proxyAppConn.Error(); err != nil { - return err - } - - if !mem.cache.Push(tx) { // if the transaction already exists in the cache - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - if loaded { - return types.ErrTxInCache - } - } - - mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - mem.cache.Remove(tx) - return err - } - reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderNodeID, cb)) - - return nil -} - -// Global callback that will be called after every ABCI response. -// Having a single global callback avoids needing to set a callback for each request. -// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), -// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that -// include this information. If we're not in the midst of a recheck, this function will just return, -// so the request specific callback can do the work. -// -// When rechecking, we don't need the peerID, so the recheck callback happens -// here. -func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - return - } - - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) -} - -// Request specific callback that should be set on individual reqRes objects -// to incorporate local information when processing the response. -// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. -// NOTE: alternatively, we could include this information in the ABCI request itself. -// -// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called -// when all other response processing is complete. -// -// Used in CheckTx to record PeerID who sent us the tx. -func (mem *CListMempool) reqResCb( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - externalCb func(*abci.Response), -) func(res *abci.Response) { - return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") - } - - mem.resCbFirstTime(tx, peerID, peerP2PID, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - // passed in by the caller of CheckTx, eg. the RPC - if externalCb != nil { - externalCb(res) - } - } -} - -// Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *CListMempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(memTx.tx.Key(), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) -} - -// Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated -func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(tx.Key()) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - - if removeFromCache { - mem.cache.Remove(tx) - } -} - -// RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { - if e, ok := mem.txsMap.Load(txKey); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement), false) - return nil - } - return errors.New("transaction not found") - } - return errors.New("invalid transaction found") -} - -func (mem *CListMempool) isFull(txSize int) error { - var ( - memSize = mem.Size() - txsBytes = mem.SizeBytes() - ) - - if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: memSize, - MaxTxs: mem.config.Size, - TxsBytes: txsBytes, - MaxTxsBytes: mem.config.MaxTxsBytes, - } - } - - return nil -} - -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *CListMempool) resCbFirstTime( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - res *abci.Response, -) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Check mempool isn't full again to reduce the chance of exceeding the - // limits. - if err := mem.isFull(len(tx)); err != nil { - // remove from cache (mempool might have a space later) - mem.cache.Remove(tx) - mem.logger.Error(err.Error()) - return - } - - memTx := &mempoolTx{ - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - } - memTx.senders.Store(peerID, true) - mem.addTx(memTx) - mem.logger.Debug( - "added good transaction", - "tx", types.Tx(tx).Hash(), - "res", r, - "height", memTx.height, - "total", mem.Size(), - ) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Debug( - "rejected bad transaction", - "tx", types.Tx(tx).Hash(), - "peerID", peerP2PID, - "res", r, - "err", postCheckErr, - ) - mem.metrics.FailedTxs.Add(1) - - if !mem.config.KeepInvalidTxsInCache { - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - } - - default: - // ignore other messages - } -} - -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, memTx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - mem.logger.Error( - "re-CheckTx transaction mismatch", - "got", types.Tx(tx), - "expected", memTx.tx, - ) - - if mem.recheckCursor == mem.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - mem.recheckCursor = nil - return - } - - mem.recheckCursor = mem.recheckCursor.Next() - memTx = mem.recheckCursor.Value.(*mempoolTx) - } - - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) - // NOTE: we remove tx from the cache because it might be good later - mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - mem.logger.Debug("done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsAvailable() <-chan struct{} { - return mem.txsAvailable -} - -func (mem *CListMempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true - select { - case mem.txsAvailable <- struct{}{}: - default: - } - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - var ( - totalGas int64 - runningSize int64 - ) - - // TODO: we will get a performance boost if we have a good estimate of avg - // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - - txs = append(txs, memTx.tx) - - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) - - // Check total size requirement - if maxBytes > -1 && runningSize+dataSize > maxBytes { - return txs[:len(txs)-1] - } - - runningSize += dataSize - - // Check total gas requirement. - // If maxGas is negative, skip this check. - // Since newTotalGas < masGas, which - // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted - if maxGas > -1 && newTotalGas > maxGas { - return txs[:len(txs)-1] - } - totalGas = newTotalGas - } - return txs -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - if max < 0 { - max = mem.txs.Len() - } - - txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) Update( - height int64, - txs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - preCheck mempool.PreCheckFunc, - postCheck mempool.PostCheckFunc, -) error { - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - if preCheck != nil { - mem.preCheck = preCheck - } - if postCheck != nil { - mem.postCheck = postCheck - } - - for i, tx := range txs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // Add valid committed tx to the cache (if missing). - _ = mem.cache.Push(tx) - } else if !mem.config.KeepInvalidTxsInCache { - // Allow invalid transactions to be resubmitted. - mem.cache.Remove(tx) - } - - // Remove committed tx from the mempool. - // - // Note an evil proposer can drop valid txs! - // Mempool before: - // 100 -> 101 -> 102 - // Block, proposed by an evil proposer: - // 101 -> 102 - // Mempool after: - // 100 - // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - mem.removeTx(tx, e.(*clist.CElement), false) - } - } - - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. - if mem.Size() > 0 { - if mem.config.Recheck { - mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) - mem.recheckTxs() - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } - } - - // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - return nil -} - -func (mem *CListMempool) recheckTxs() { - if mem.Size() == 0 { - panic("recheckTxs is called, but the mempool is empty") - } - - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - ctx := context.Background() - - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: memTx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // No need in retrying since memTx will be rechecked after next block. - mem.logger.Error("Can't check tx", "err", err) - } - } - - _, err := mem.proxyAppConn.FlushAsync(ctx) - if err != nil { - mem.logger.Error("Can't flush txs", "err", err) - } -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // - - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go deleted file mode 100644 index 20d07db83..000000000 --- a/internal/mempool/v0/clist_mempool_test.go +++ /dev/null @@ -1,691 +0,0 @@ -package v0 - -import ( - "context" - "crypto/rand" - "encoding/binary" - "fmt" - mrand "math/rand" - "os" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - abciclimocks "github.com/tendermint/tendermint/abci/client/mocks" - "github.com/tendermint/tendermint/abci/example/kvstore" - abciserver "github.com/tendermint/tendermint/abci/server" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// A cleanupFunc cleans up any config / test files created for a particular -// test. -type cleanupFunc func() - -func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc, error) { - conf, err := config.ResetTestRoot("mempool_test") - if err != nil { - return nil, func() {}, err - } - - mp, cu := newMempoolWithAppAndConfig(cc, conf) - return mp, cu, nil -} - -func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) { - appConnMem, _ := cc() - appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() - if err != nil { - panic(err) - } - - mp := NewCListMempool(cfg.Mempool, appConnMem, 0) - mp.SetLogger(log.TestingLogger()) - - return mp, func() { os.RemoveAll(cfg.RootDir) } -} - -func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - t.Fatal("Expected not to fire") - case <-timer.C: - } -} - -func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - case <-timer.C: - t.Fatal("Expected to fire") - } -} - -func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { - txs := make(types.Txs, count) - txInfo := mempool.TxInfo{SenderID: peerID} - for i := 0; i < count; i++ { - txBytes := make([]byte, 20) - txs[i] = txBytes - _, err := rand.Read(txBytes) - if err != nil { - t.Error(err) - } - if err := mp.CheckTx(context.Background(), txBytes, nil, txInfo); err != nil { - // Skip invalid txs. - // TestMempoolFilters will fail otherwise. It asserts a number of txs - // returned. - if types.IsPreCheckError(err) { - continue - } - t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) - } - } - return txs -} - -func TestReapMaxBytesMaxGas(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // Ensure gas calculation behaves as expected - checkTxs(t, mp, 1, mempool.UnknownPeerID) - tx0 := mp.TxsFront().Value.(*mempoolTx) - // assert that kv store has gas wanted = 1. - require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") - require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") - // ensure each tx is 20 bytes long - require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") - mp.Flush() - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - maxBytes int64 - maxGas int64 - expectedNumTxs int - }{ - {20, -1, -1, 20}, - {20, -1, 0, 0}, - {20, -1, 10, 10}, - {20, -1, 30, 20}, - {20, 0, -1, 0}, - {20, 0, 10, 0}, - {20, 10, 10, 0}, - {20, 24, 10, 1}, - {20, 240, 5, 5}, - {20, 240, -1, 10}, - {20, 240, 10, 10}, - {20, 240, 15, 10}, - {20, 20000, -1, 20}, - {20, 20000, 5, 5}, - {20, 20000, 30, 20}, - } - for tcIndex, tt := range tests { - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) - assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", - len(got), tt.expectedNumTxs, tcIndex) - mp.Flush() - } -} - -func TestMempoolFilters(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - emptyTxArr := []types.Tx{[]byte{}} - - nopPreFilter := func(tx types.Tx) error { return nil } - nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - preFilter mempool.PreCheckFunc - postFilter mempool.PostCheckFunc - expectedNumTxs int - }{ - {10, nopPreFilter, nopPostFilter, 10}, - {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, - {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, - {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, - {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, - {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, - } - for tcIndex, tt := range tests { - err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) - require.NoError(t, err) - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) - mp.Flush() - } -} - -func TestMempoolUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // 1. Adds valid txs to the cache - { - err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. Removes valid txs from the mempool - { - err := mp.CheckTx(context.Background(), []byte{0x02}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - } - - // 3. Removes invalid transactions from the cache and the mempool (if present) - { - err := mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - - err = mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { - var callback abciclient.Callback - mockClient := new(abciclimocks.Client) - mockClient.On("Start").Return(nil) - mockClient.On("SetLogger", mock.Anything) - - mockClient.On("Error").Return(nil).Times(4) - mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil) - mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) - - cc := func() (abciclient.Client, error) { - return mockClient, nil - } - - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. - txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} - for _, tx := range txs { - reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx})) - reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK}) - // SetDone allows the ReqRes to process its callback synchronously. - // This simulates the Response being ready for the client immediately. - reqRes.SetDone() - - mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // Calling update to remove the first transaction from the mempool. - // This call also triggers the mempool to recheck its remaining transactions. - err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.Nil(t, err) - - // The mempool has now sent its requests off to the client to be rechecked - // and is waiting for the corresponding callbacks to be called. - // We now call the mempool-supplied callback on the first and third transaction. - // This simulates the client dropping the second request. - // Previous versions of this code panicked when the ABCI application missed - // a recheck-tx request. - resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK} - req := abci.RequestCheckTx{Tx: txs[1]} - callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) - - req = abci.RequestCheckTx{Tx: txs[3]} - callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) - mockClient.AssertExpectations(t) -} - -func TestMempool_KeepInvalidTxsInCache(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - wcfg := config.DefaultConfig() - wcfg.Mempool.KeepInvalidTxsInCache = true - mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) - defer cleanup() - - // 1. An invalid transaction must remain in the cache after Update - { - a := make([]byte, 8) - binary.BigEndian.PutUint64(a, 0) - - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, 1) - - err := mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) - require.NoError(t, err) - - // simulate new block - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) - err = mp.Update(1, []types.Tx{a, b}, - []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) - require.NoError(t, err) - - // a must be added to the cache - err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) - require.NoError(t, err) - - // b must remain in the cache - err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. An invalid transaction must remain in the cache - { - a := make([]byte, 8) - binary.BigEndian.PutUint64(a, 0) - - // remove a from the cache to test (2) - mp.cache.Remove(a) - - err := mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestTxsAvailable(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - mp.EnableTxsAvailable() - - timeoutMS := 500 - - // with no txs, it shouldnt fire - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch of txs, it should only fire once - txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // call update with half the txs. - // it should fire once now for the new height - // since there are still txs left - committedTxs, txs := txs[:50], txs[50:] - if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // now call update with all the txs. it should not fire as there are no txs left - committedTxs = append(txs, moreTxs...) //nolint: gocritic - if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs, it should only fire once - checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) -} - -func TestSerialReap(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - appConnCon, _ := cc() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err = appConnCon.Start() - require.Nil(t, err) - - cacheMap := make(map[string]struct{}) - deliverTxsRange := func(start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - - // This will succeed - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - _, cached := cacheMap[string(txBytes)] - if cached { - require.NotNil(t, err, "expected error for cached tx") - } else { - require.Nil(t, err, "expected no err for uncached tx") - } - cacheMap[string(txBytes)] = struct{}{} - - // Duplicates are cached and should return error - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") - } - } - - reapCheck := func(exp int) { - txs := mp.ReapMaxBytesMaxGas(-1, -1) - require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) - } - - updateRange := func(start, end int) { - txs := make([]types.Tx, 0) - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - txs = append(txs, txBytes) - } - if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - } - - commitRange := func(start, end int) { - ctx := context.Background() - // Deliver some txs. - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - if err != nil { - t.Errorf("client error committing tx: %v", err) - } - if res.IsErr() { - t.Errorf("error committing tx. Code:%v result:%X log:%v", - res.Code, res.Data, res.Log) - } - } - res, err := appConnCon.CommitSync(ctx) - if err != nil { - t.Errorf("client error committing: %v", err) - } - if len(res.Data) != 8 { - t.Errorf("error committing. Hash:%X", res.Data) - } - } - - //---------------------------------------- - - // Deliver some txs. - deliverTxsRange(0, 100) - - // Reap the txs. - reapCheck(100) - - // Reap again. We should get the same amount - reapCheck(100) - - // Deliver 0 to 999, we should reap 900 new txs - // because 100 were already counted. - deliverTxsRange(0, 1000) - - // Reap the txs. - reapCheck(1000) - - // Reap again. We should get the same amount - reapCheck(1000) - - // Commit from the conensus AppConn - commitRange(0, 500) - updateRange(0, 500) - - // We should have 500 left. - reapCheck(500) - - // Deliver 100 invalid txs and 100 valid txs - deliverTxsRange(900, 1100) - - // We should have 600 now. - reapCheck(600) -} - -func TestMempool_CheckTxChecksTxSize(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mempl, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - maxTxSize := mempl.config.MaxTxBytes - - testCases := []struct { - len int - err bool - }{ - // check small txs. no error - 0: {10, false}, - 1: {1000, false}, - 2: {1000000, false}, - - // check around maxTxSize - 3: {maxTxSize - 1, false}, - 4: {maxTxSize, false}, - 5: {maxTxSize + 1, true}, - } - - for i, testCase := range testCases { - caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) - - tx := tmrand.Bytes(testCase.len) - - err := mempl.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - bv := gogotypes.BytesValue{Value: tx} - bz, err2 := bv.Marshal() - require.NoError(t, err2) - require.Equal(t, len(bz), proto.Size(&bv), caseString) - - if !testCase.err { - require.NoError(t, err, caseString) - } else { - require.Equal(t, err, types.ErrTxTooLarge{ - Max: maxTxSize, - Actual: testCase.len, - }, caseString) - } - } -} - -func TestMempoolTxsBytes(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - cfg, err := config.ResetTestRoot("mempool_test") - require.NoError(t, err) - - cfg.Mempool.MaxTxsBytes = 10 - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - // 1. zero by default - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 2. len(tx) after CheckTx - err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 1, mp.SizeBytes()) - - // 3. zero again after tx is removed by Update - err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 4. zero after Flush - err = mp.CheckTx(context.Background(), []byte{0x02, 0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 2, mp.SizeBytes()) - - mp.Flush() - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. - err = mp.CheckTx( - context.Background(), - []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, - nil, - mempool.TxInfo{}, - ) - require.NoError(t, err) - - err = mp.CheckTx(context.Background(), []byte{0x05}, nil, mempool.TxInfo{}) - if assert.Error(t, err) { - assert.IsType(t, types.ErrMempoolIsFull{}, err) - } - - // 6. zero after tx is rechecked and removed due to not being valid anymore - app2 := kvstore.NewApplication() - cc = abciclient.NewLocalCreator(app2) - mp, cleanup, err = newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(0)) - - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - appConnCon, _ := cc() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err = appConnCon.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := appConnCon.Stop(); err != nil { - t.Error(err) - } - }) - ctx := context.Background() - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - require.NoError(t, err) - require.EqualValues(t, 0, res.Code) - res2, err := appConnCon.CommitSync(ctx) - require.NoError(t, err) - require.NotEmpty(t, res2.Data) - - // Pretend like we committed nothing so txBytes gets rechecked and removed. - err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - // 7. Test RemoveTxByKey function - err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 9, mp.SizeBytes()) - assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) - assert.EqualValues(t, 9, mp.SizeBytes()) - assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) - assert.EqualValues(t, 8, mp.SizeBytes()) - -} - -// This will non-deterministically catch some concurrency failures like -// https://github.com/tendermint/tendermint/issues/3509 -// TODO: all of the tests should probably also run using the remote proxy app -// since otherwise we're not actually testing the concurrency of the mempool here! -func TestMempoolRemoteAppConcurrency(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - app := kvstore.NewApplication() - cc, server := newRemoteApp(t, sockPath, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - cfg, err := config.ResetTestRoot("mempool_test") - require.NoError(t, err) - - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - // generate small number of txs - nTxs := 10 - txLen := 200 - txs := make([]types.Tx, nTxs) - for i := 0; i < nTxs; i++ { - txs[i] = tmrand.Bytes(txLen) - } - - // simulate a group of peers sending them over and over - N := cfg.Mempool.Size - maxPeers := 5 - for i := 0; i < N; i++ { - peerID := mrand.Intn(maxPeers) - txNum := mrand.Intn(nTxs) - tx := txs[txNum] - - // this will err with ErrTxInCache many times ... - mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error - } - err = mp.FlushAppConn() - require.NoError(t, err) -} - -// caller must close server -func newRemoteApp( - t *testing.T, - addr string, - app abci.Application, -) ( - clientCreator abciclient.Creator, - server service.Service, -) { - clientCreator = abciclient.NewRemoteCreator(addr, "socket", true) - - // Start server - server = abciserver.NewSocketServer(addr, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - return clientCreator, server -} - -func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { - responses := make([]*abci.ResponseDeliverTx, 0, n) - for i := 0; i < n; i++ { - responses = append(responses, &abci.ResponseDeliverTx{Code: code}) - } - return responses -} diff --git a/internal/mempool/v0/doc.go b/internal/mempool/v0/doc.go deleted file mode 100644 index 3b5d0d20d..000000000 --- a/internal/mempool/v0/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// The mempool pushes new txs onto the proxyAppConn. -// It gets a stream of (req, res) tuples from the proxy. -// The mempool stores good txs in a concurrent linked-list. - -// Multiple concurrent go-routines can traverse this linked-list -// safely by calling .NextWait() on each element. - -// So we have several go-routines: -// 1. Consensus calling Update() and ReapMaxBytesMaxGas() synchronously -// 2. Many mempool reactor's peer routines calling CheckTx() -// 3. Many mempool reactor's peer routines traversing the txs linked list - -// To manage these goroutines, there are three methods of locking. -// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -// 2. Mutations to the linked-list elements are atomic -// 3. CheckTx() and/or ReapMaxBytesMaxGas() calls can be paused upon Update(), protected by .updateMtx - -// Garbage collection of old elements from mempool.txs is handlde via the -// DetachPrev() call, which makes old elements not reachable by peer -// broadcastTxRoutine(). - -// TODO: Better handle abci client errors. (make it automatically handle connection errors) -package v0 diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go deleted file mode 100644 index 118321645..000000000 --- a/internal/mempool/v0/reactor.go +++ /dev/null @@ -1,392 +0,0 @@ -package v0 - -import ( - "context" - "errors" - "fmt" - "runtime/debug" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -var ( - _ service.Service = (*Reactor)(nil) - _ p2p.Wrapper = (*protomem.Message)(nil) -) - -// PeerManager defines the interface contract required for getting necessary -// peer information. This should eventually be replaced with a message-oriented -// approach utilizing the p2p stack. -type PeerManager interface { - GetHeight(types.NodeID) int64 -} - -// Reactor implements a service that contains mempool of txs that are broadcasted -// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping -// txs to the peers you received it from. -type Reactor struct { - service.BaseService - - cfg *config.MempoolConfig - mempool *CListMempool - ids *mempool.IDs - - // XXX: Currently, this is the only way to get information about a peer. Ideally, - // we rely on message-oriented communication to get necessary peer data. - // ref: https://github.com/tendermint/tendermint/issues/5670 - peerMgr PeerManager - - mempoolCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} - - // peerWG is used to coordinate graceful termination of all peer broadcasting - // goroutines. - peerWG sync.WaitGroup - - mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer -} - -// NewReactor returns a reference to a new reactor. -func NewReactor( - logger log.Logger, - cfg *config.MempoolConfig, - peerMgr PeerManager, - mp *CListMempool, - mempoolCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, -) *Reactor { - - r := &Reactor{ - cfg: cfg, - peerMgr: peerMgr, - mempool: mp, - ids: mempool.NewMempoolIDs(), - mempoolCh: mempoolCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), - } - - r.BaseService = *service.NewBaseService(logger, "Mempool", r) - return r -} - -// GetChannelDescriptor produces an instance of a descriptor for this -// package's required channels. -func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { - largestTx := make([]byte, cfg.MaxTxBytes) - batchMsg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, - }, - } - - return &p2p.ChannelDescriptor{ - ID: mempool.MempoolChannel, - MessageType: new(protomem.Message), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - } -} - -// OnStart starts separate go routines for each p2p Channel and listens for -// envelopes on each. In addition, it also listens for peer updates and handles -// messages on that p2p channel accordingly. The caller must be sure to execute -// OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - if !r.cfg.Broadcast { - r.Logger.Info("tx broadcasting is disabled") - } - - go r.processMempoolCh() - go r.processPeerUpdates() - - return nil -} - -// OnStop stops the reactor by signaling to all spawned goroutines to exit and -// blocking until they all exit. -func (r *Reactor) OnStop() { - r.mtx.Lock() - for _, c := range r.peerRoutines { - c.Close() - } - r.mtx.Unlock() - - // wait for all spawned peer tx broadcasting goroutines to gracefully exit - r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.mempoolCh.Done() - <-r.peerUpdates.Done() -} - -// handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. -// For every tx in the message, we execute CheckTx. It returns an error if an -// empty set of txs are sent in an envelope or if we receive an unexpected -// message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) - - switch msg := envelope.Message.(type) { - case *protomem.Txs: - protoTxs := msg.GetTxs() - if len(protoTxs) == 0 { - return errors.New("empty txs received from peer") - } - - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} - if len(envelope.From) != 0 { - txInfo.SenderNodeID = envelope.From - } - - for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) - } - } - - default: - return fmt.Errorf("received unknown message: %T", msg) - } - - return nil -} - -// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. -// It will handle errors and any possible panics gracefully. A caller can handle -// any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( - "recovering from processing message panic", - "err", err, - "stack", string(debug.Stack()), - ) - } - }() - - r.Logger.Debug("received message", "peer", envelope.From) - - switch chID { - case mempool.MempoolChannel: - err = r.handleMempoolMessage(envelope) - - default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) - } - - return err -} - -// processMempoolCh implements a blocking event loop where we listen for p2p -// Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh() { - defer r.mempoolCh.Close() - - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } - } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on mempool channel; closing...") - return - } - } -} - -// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we -// check if the reactor is running and if we've already started a tx broadcasting -// goroutine or not. If not, we start one for the newly added peer. For down or -// removed peers, we remove the peer from the mempool peer ID set and signal to -// stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) - - r.mtx.Lock() - defer r.mtx.Unlock() - - switch peerUpdate.Status { - case p2p.PeerStatusUp: - // Do not allow starting new tx broadcast loops after reactor shutdown - // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. - if !r.IsRunning() { - return - } - - if r.cfg.Broadcast { - // Check if we've already started a goroutine for this peer, if not we create - // a new done channel so we can explicitly close the goroutine if the peer - // is later removed, we increment the waitgroup so the reactor can stop - // safely, and finally start the goroutine to broadcast txs to that peer. - _, ok := r.peerRoutines[peerUpdate.NodeID] - if !ok { - closer := tmsync.NewCloser() - - r.peerRoutines[peerUpdate.NodeID] = closer - r.peerWG.Add(1) - - r.ids.ReserveForPeer(peerUpdate.NodeID) - - // start a broadcast routine ensuring all txs are forwarded to the peer - go r.broadcastTxRoutine(peerUpdate.NodeID, closer) - } - } - - case p2p.PeerStatusDown: - r.ids.Reclaim(peerUpdate.NodeID) - - // Check if we've started a tx broadcasting goroutine for this peer. - // If we have, we signal to terminate the goroutine via the channel's closure. - // This will internally decrement the peer waitgroup and remove the peer - // from the map of peer tx broadcasting goroutines. - closer, ok := r.peerRoutines[peerUpdate.NodeID] - if ok { - closer.Close() - } - } -} - -// processPeerUpdates initiates a blocking process where we listen for and handle -// PeerUpdate messages. When the reactor is stopped, we will catch the signal and -// close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - - for { - select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") - return - } - } -} - -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { - peerMempoolID := r.ids.GetForPeer(peerID) - var next *clist.CElement - - // remove the peer ID from the map of routines and mark the waitgroup as done - defer func() { - r.mtx.Lock() - delete(r.peerRoutines, peerID) - r.mtx.Unlock() - - r.peerWG.Done() - - if e := recover(); e != nil { - r.Logger.Error( - "recovering from broadcasting mempool loop", - "err", e, - "stack", string(debug.Stack()), - ) - } - }() - - for { - if !r.IsRunning() { - return - } - - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-r.mempool.TxsWaitChan(): // wait until a tx is available - if next = r.mempool.TxsFront(); next == nil { - continue - } - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } - - memTx := next.Value.(*mempoolTx) - - if r.peerMgr != nil { - height := r.peerMgr.GetHeight(peerID) - if height > 0 && height < memTx.Height()-1 { - // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - - // NOTE: Transaction batching was disabled due to: - // https://github.com/tendermint/tendermint/issues/5796 - - if _, ok := memTx.senders.Load(peerMempoolID); !ok { - // Send the mempool tx to the corresponding peer. Note, the peer may be - // behind and thus would not be able to process the mempool tx correctly. - r.mempoolCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{memTx.tx}, - }, - } - r.Logger.Debug( - "gossiped tx to peer", - "tx", fmt.Sprintf("%X", memTx.tx.Hash()), - "peer", peerID, - ) - } - - select { - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } -} diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go deleted file mode 100644 index 14ff58adf..000000000 --- a/internal/mempool/v0/reactor_test.go +++ /dev/null @@ -1,393 +0,0 @@ -package v0 - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/p2ptest" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -type reactorTestSuite struct { - network *p2ptest.Network - logger log.Logger - - reactors map[types.NodeID]*Reactor - mempoolChnnels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*CListMempool - kvstores map[types.NodeID]*kvstore.Application - - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates - - nodes []types.NodeID -} - -func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { - t.Helper() - - rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*CListMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - } - - chDesc := GetChannelDescriptor(config) - chDesc.RecvBufferCapacity = int(chBuf) - rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc) - - for nodeID := range rts.network.Nodes { - rts.kvstores[nodeID] = kvstore.NewApplication() - cc := abciclient.NewLocalCreator(rts.kvstores[nodeID]) - - mempool, memCleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - t.Cleanup(memCleanup) - mempool.SetLogger(rts.logger) - rts.mempools[nodeID] = mempool - - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - config, - rts.network.Nodes[nodeID].PeerManager, - mempool, - rts.mempoolChnnels[nodeID], - rts.peerUpdates[nodeID], - ) - - rts.nodes = append(rts.nodes, nodeID) - - require.NoError(t, rts.reactors[nodeID].Start()) - require.True(t, rts.reactors[nodeID].IsRunning()) - } - - require.Len(t, rts.reactors, numNodes) - - t.Cleanup(func() { - for nodeID := range rts.reactors { - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.False(t, rts.reactors[nodeID].IsRunning()) - } - } - }) - - return rts -} - -func (rts *reactorTestSuite) start(t *testing.T) { - t.Helper() - rts.network.Start(t) - require.Len(t, - rts.network.RandomNode().PeerManager.Peers(), - len(rts.nodes)-1, - "network does not have expected number of nodes") -} - -func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { - t.Helper() - - for id, r := range rts.reactors { - require.NoError(t, r.Stop(), "stopping reactor %s", id) - r.Wait() - require.False(t, r.IsRunning(), "reactor %s did not stop", id) - } - - for _, mch := range rts.mempoolChnnels { - require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) - } -} - -func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) { - t.Helper() - - fn := func(pool *CListMempool) { - for pool.Size() < len(txs) { - time.Sleep(50 * time.Millisecond) - } - - reapedTxs := pool.ReapMaxTxs(len(txs)) - require.Equal(t, len(txs), len(reapedTxs)) - for i, tx := range txs { - require.Equalf(t, - tx, - reapedTxs[i], - "txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i], - ) - } - } - - if len(ids) == 1 { - fn(rts.reactors[ids[0]].mempool) - return - } - - wg := &sync.WaitGroup{} - for id := range rts.mempools { - if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) { - continue - } - - wg.Add(1) - func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) - } - - wg.Wait() -} - -func TestReactorBroadcastTxs(t *testing.T) { - numTxs := 1000 - numNodes := 10 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondaries := rts.nodes[1:] - - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) - - // run the router - rts.start(t) - - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, txs, secondaries...) - - for _, pool := range rts.mempools { - require.Equal(t, len(txs), pool.Size()) - } - - rts.assertMempoolChannelsDrained(t) -} - -// regression test for https://github.com/tendermint/tendermint/issues/5408 -func TestReactorConcurrency(t *testing.T) { - numTxs := 5 - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - rts.start(t) - - var wg sync.WaitGroup - - for i := 0; i < 1000; i++ { - wg.Add(2) - - // 1. submit a bunch of txs - // 2. update the whole mempool - - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) - go func() { - defer wg.Done() - - mempool := rts.mempools[primary] - - mempool.Lock() - defer mempool.Unlock() - - deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) - for i := range txs { - deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} - } - - require.NoError(t, mempool.Update(1, txs, deliverTxResponses, nil, nil)) - }() - - // 1. submit a bunch of txs - // 2. update none - _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) - go func() { - defer wg.Done() - - mempool := rts.mempools[secondary] - - mempool.Lock() - defer mempool.Unlock() - - err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) - require.NoError(t, err) - }() - - // flush the mempool - rts.mempools[secondary].Flush() - } - - wg.Wait() -} - -func TestReactorNoBroadcastToSender(t *testing.T) { - numTxs := 1000 - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, uint(numTxs)) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - peerID := uint16(1) - _ = checkTxs(t, rts.mempools[primary], numTxs, peerID) - - rts.start(t) - - time.Sleep(100 * time.Millisecond) - - require.Eventually(t, func() bool { - return rts.mempools[secondary].Size() == 0 - }, time.Minute, 100*time.Millisecond) - - rts.assertMempoolChannelsDrained(t) -} - -func TestReactor_MaxTxBytes(t *testing.T) { - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - // Broadcast a tx, which has the max size and ensure it's received by the - // second reactor. - tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) - err := rts.reactors[primary].mempool.CheckTx( - context.Background(), - tx1, - nil, - mempool.TxInfo{ - SenderID: mempool.UnknownPeerID, - }, - ) - require.NoError(t, err) - - rts.start(t) - - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, []types.Tx{tx1}, secondary) - - rts.reactors[primary].mempool.Flush() - rts.reactors[secondary].mempool.Flush() - - // broadcast a tx, which is beyond the max size and ensure it's not sent - tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) - require.Error(t, err) - - rts.assertMempoolChannelsDrained(t) -} - -func TestDontExhaustMaxActiveIDs(t *testing.T) { - cfg := config.TestConfig() - - // we're creating a single node network, but not starting the - // network. - rts := setup(t, cfg.Mempool, 1, mempool.MaxActiveIDs+1) - - nodeID := rts.nodes[0] - - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - // ensure the reactor does not panic (i.e. exhaust active IDs) - for i := 0; i < mempool.MaxActiveIDs+1; i++ { - rts.peerChans[nodeID] <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: peerID, - } - - rts.mempoolChnnels[nodeID].Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{}, - }, - } - } - - require.Eventually( - t, - func() bool { - for _, mch := range rts.mempoolChnnels { - if len(mch.Out) > 0 { - return false - } - } - - return true - }, - time.Minute, - 10*time.Millisecond, - ) - - rts.assertMempoolChannelsDrained(t) -} - -func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - // 0 is already reserved for UnknownPeerID - ids := mempool.NewMempoolIDs() - - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - for i := 0; i < mempool.MaxActiveIDs-1; i++ { - ids.ReserveForPeer(peerID) - } - - require.Panics(t, func() { - ids.ReserveForPeer(peerID) - }) -} - -func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, 2, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - rts.start(t) - - // disconnect peer - rts.peerChans[primary] <- p2p.PeerUpdate{ - Status: p2p.PeerStatusDown, - NodeID: secondary, - } -} diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go deleted file mode 100644 index d78d9d287..000000000 --- a/internal/mempool/v1/mempool.go +++ /dev/null @@ -1,887 +0,0 @@ -package v1 - -import ( - "bytes" - "context" - "errors" - "fmt" - "reflect" - "sync/atomic" - "time" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" -) - -var _ mempool.Mempool = (*TxMempool)(nil) - -// TxMempoolOption sets an optional parameter on the TxMempool. -type TxMempoolOption func(*TxMempool) - -// TxMempool defines a prioritized mempool data structure used by the v1 mempool -// reactor. It keeps a thread-safe priority queue of transactions that is used -// when a block proposer constructs a block and a thread-safe linked-list that -// is used to gossip transactions to peers in a FIFO manner. -type TxMempool struct { - logger log.Logger - metrics *mempool.Metrics - config *config.MempoolConfig - proxyAppConn proxy.AppConnMempool - - // txsAvailable fires once for each height when the mempool is not empty - txsAvailable chan struct{} - notifiedTxsAvailable bool - - // height defines the last block height process during Update() - height int64 - - // sizeBytes defines the total size of the mempool (sum of all tx bytes) - sizeBytes int64 - - // cache defines a fixed-size cache of already seen transactions as this - // reduces pressure on the proxyApp. - cache mempool.TxCache - - // txStore defines the main storage of valid transactions. Indexes are built - // on top of this store. - txStore *TxStore - - // gossipIndex defines the gossiping index of valid transactions via a - // thread-safe linked-list. We also use the gossip index as a cursor for - // rechecking transactions already in the mempool. - gossipIndex *clist.CList - - // recheckCursor and recheckEnd are used as cursors based on the gossip index - // to recheck transactions that are already in the mempool. Iteration is not - // thread-safe and transaction may be mutated in serial order. - // - // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for - // iterator and cursor management when rechecking transactions. If the gossip - // index changes or is removed in a future refactor, this will have to be - // refactored. Instead, we should consider just keeping a slice of a snapshot - // of the mempool's current transactions during Update and an integer cursor - // into that slice. This, however, requires additional O(n) space complexity. - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // priorityIndex defines the priority index of valid transactions via a - // thread-safe priority queue. - priorityIndex *TxPriorityQueue - - // heightIndex defines a height-based, in ascending order, transaction index. - // i.e. older transactions are first. - heightIndex *WrappedTxList - - // timestampIndex defines a timestamp-based, in ascending order, transaction - // index. i.e. older transactions are first. - timestampIndex *WrappedTxList - - // A read/write lock is used to safe guard updates, insertions and deletions - // from the mempool. A read-lock is implicitly acquired when executing CheckTx, - // however, a caller must explicitly grab a write-lock via Lock when updating - // the mempool via Update(). - mtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc -} - -func NewTxMempool( - logger log.Logger, - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...TxMempoolOption, -) *TxMempool { - - txmp := &TxMempool{ - logger: logger, - config: cfg, - proxyAppConn: proxyAppConn, - height: height, - cache: mempool.NopTxCache{}, - metrics: mempool.NopMetrics(), - txStore: NewTxStore(), - gossipIndex: clist.New(), - priorityIndex: NewTxPriorityQueue(), - heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }), - timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) - }), - } - - if cfg.CacheSize > 0 { - txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } - - proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) - - for _, opt := range options { - opt(txmp) - } - - return txmp -} - -// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) -// returns an error. This is executed before CheckTx. It only applies to the -// first created block. After that, Update() overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a transaction if -// f(tx, resp) returns an error. This is executed after CheckTx. It only applies -// to the first created block. After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.postCheck = f } -} - -// WithMetrics sets the mempool's metrics collector. -func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { - return func(txmp *TxMempool) { txmp.metrics = metrics } -} - -// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly -// release the lock when finished. -func (txmp *TxMempool) Lock() { - txmp.mtx.Lock() -} - -// Unlock releases a write-lock on the mempool. -func (txmp *TxMempool) Unlock() { - txmp.mtx.Unlock() -} - -// Size returns the number of valid transactions in the mempool. It is -// thread-safe. -func (txmp *TxMempool) Size() int { - return txmp.txStore.Size() -} - -// SizeBytes return the total sum in bytes of all the valid transactions in the -// mempool. It is thread-safe. -func (txmp *TxMempool) SizeBytes() int64 { - return atomic.LoadInt64(&txmp.sizeBytes) -} - -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. -// -// NOTE: The caller must obtain a write-lock via Lock() prior to execution. -func (txmp *TxMempool) FlushAppConn() error { - return txmp.proxyAppConn.FlushSync(context.Background()) -} - -// WaitForNextTx returns a blocking channel that will be closed when the next -// valid transaction is available to gossip. It is thread-safe. -func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { - return txmp.gossipIndex.WaitChan() -} - -// NextGossipTx returns the next valid transaction to gossip. A caller must wait -// for WaitForNextTx to signal a transaction is available to gossip first. It is -// thread-safe. -func (txmp *TxMempool) NextGossipTx() *clist.CElement { - return txmp.gossipIndex.Front() -} - -// EnableTxsAvailable enables the mempool to trigger events when transactions -// are available on a block by block basis. -func (txmp *TxMempool) EnableTxsAvailable() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() - - txmp.txsAvailable = make(chan struct{}, 1) -} - -// TxsAvailable returns a channel which fires once for every height, and only -// when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { - return txmp.txsAvailable -} - -// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires -// a read-lock attempts to execute the application's CheckTx ABCI method via -// CheckTxAsync. We return an error if any of the following happen: -// -// - The CheckTxAsync execution fails. -// - The transaction already exists in the cache and we've already received the -// transaction from the peer. Otherwise, if it solely exists in the cache, we -// return nil. -// - The transaction size exceeds the maximum transaction size as defined by the -// configuration provided to the mempool. -// - The transaction fails Pre-Check (if it is defined). -// - The proxyAppConn fails, e.g. the buffer is full. -// -// If the mempool is full, we still execute CheckTx and attempt to find a lower -// priority transaction to evict. If such a transaction exists, we remove the -// lower priority transaction and add the new one with higher priority. -// -// NOTE: -// - The applications' CheckTx implementation may panic. -// - The caller is not to explicitly require any locks for executing CheckTx. -func (txmp *TxMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txSize := len(tx) - if txSize > txmp.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: txmp.config.MaxTxBytes, - Actual: txSize, - } - } - - if txmp.preCheck != nil { - if err := txmp.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - if err := txmp.proxyAppConn.Error(); err != nil { - return err - } - - txHash := tx.Key() - - // We add the transaction to the mempool's cache and if the transaction already - // exists, i.e. false is returned, then we check if we've seen this transaction - // from the same sender and error if we have. Otherwise, we return nil. - if !txmp.cache.Push(tx) { - wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) - if wtx != nil && ok { - // We already have the transaction stored and the we've already seen this - // transaction from txInfo.SenderID. - return types.ErrTxInCache - } - - txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - txmp.cache.Remove(tx) - return err - } - - reqRes.SetCallback(func(res *abci.Response) { - if txmp.recheckCursor != nil { - panic("recheck cursor is non-nil in CheckTx callback") - } - - wtx := &WrappedTx{ - tx: tx, - hash: txHash, - timestamp: time.Now().UTC(), - height: txmp.height, - } - txmp.initTxCallback(wtx, res, txInfo) - - if cb != nil { - cb(res) - } - }) - - return nil -} - -func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { - txmp.Lock() - defer txmp.Unlock() - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { - txmp.removeTx(wtx, false) - return nil - } - - return errors.New("transaction not found") -} - -// Flush flushes out the mempool. It acquires a read-lock, fetches all the -// transactions currently in the transaction store and removes each transaction -// from the store and all indexes and finally resets the cache. -// -// NOTE: -// - Flushing the mempool may leave the mempool in an inconsistent state. -func (txmp *TxMempool) Flush() { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txmp.heightIndex.Reset() - txmp.timestampIndex.Reset() - - for _, wtx := range txmp.txStore.GetAllTxs() { - txmp.removeTx(wtx, false) - } - - atomic.SwapInt64(&txmp.sizeBytes, 0) - txmp.cache.Reset() -} - -// ReapMaxBytesMaxGas returns a list of transactions within the provided size -// and gas constraints. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - var ( - totalGas int64 - totalSize int64 - ) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) - for txmp.priorityIndex.NumTxs() > 0 { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) - - // Ensure we have capacity for the transaction with respect to the - // transaction size. - if maxBytes > -1 && totalSize+size > maxBytes { - return txs[:len(txs)-1] - } - - totalSize += size - - // ensure we have capacity for the transaction with respect to total gas - gas := totalGas + wtx.gasWanted - if maxGas > -1 && gas > maxGas { - return txs[:len(txs)-1] - } - - totalGas = gas - } - - return txs -} - -// ReapMaxTxs returns a list of transactions within the provided number of -// transactions bound. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - numTxs := txmp.priorityIndex.NumTxs() - if max < 0 { - max = numTxs - } - - cap := tmmath.MinInt(numTxs, max) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, cap) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, cap) - for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - } - - return txs -} - -// Update iterates over all the transactions provided by the caller, i.e. the -// block producer, and removes them from the cache (if applicable) and removes -// the transactions from the main transaction store and associated indexes. -// Finally, if there are trainsactions remaining in the mempool, we initiate a -// re-CheckTx for them (if applicable), otherwise, we notify the caller more -// transactions are available. -// -// NOTE: -// - The caller must explicitly acquire a write-lock via Lock(). -func (txmp *TxMempool) Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn mempool.PreCheckFunc, - newPostFn mempool.PostCheckFunc, -) error { - - txmp.height = blockHeight - txmp.notifiedTxsAvailable = false - - if newPreFn != nil { - txmp.preCheck = newPreFn - } - if newPostFn != nil { - txmp.postCheck = newPostFn - } - - for i, tx := range blockTxs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // add the valid committed transaction to the cache (if missing) - _ = txmp.cache.Push(tx) - } else if !txmp.config.KeepInvalidTxsInCache { - // allow invalid transactions to be re-submitted - txmp.cache.Remove(tx) - } - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { - txmp.removeTx(wtx, false) - } - } - - txmp.purgeExpiredTxs(blockHeight) - - // If there any uncommitted transactions left in the mempool, we either - // initiate re-CheckTx per remaining transaction or notify that remaining - // transactions are left. - if txmp.Size() > 0 { - if txmp.config.Recheck { - txmp.logger.Debug( - "executing re-CheckTx for all remaining transactions", - "num_txs", txmp.Size(), - "height", blockHeight, - ) - txmp.updateReCheckTxs() - } else { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) - return nil -} - -// initTxCallback performs the initial, i.e. the first, callback after CheckTx -// has been executed by the ABCI application. In other words, initTxCallback is -// called after executing CheckTx when we see a unique transaction for the first -// time. CheckTx can be called again for the same transaction at a later point -// in time when re-checking, however, this callback will not be called. -// -// After the ABCI application executes CheckTx, initTxCallback is called with -// the ABCI *Response object and TxInfo. If postCheck is defined on the mempool, -// we execute that first. If there is no error from postCheck (if defined) and -// the ABCI CheckTx response code is OK, we attempt to insert the transaction. -// -// When attempting to insert the transaction, we first check if there is -// sufficient capacity. If there is sufficient capacity, the transaction is -// inserted into the txStore and indexed across all indexes. Otherwise, if the -// mempool is full, we attempt to find a lower priority transaction to evict in -// place of the new incoming transaction. If no such transaction exists, the -// new incoming transaction is rejected. -// -// If the new incoming transaction fails CheckTx or postCheck fails, we reject -// the new incoming transaction. -// -// NOTE: -// - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) { - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if !ok { - return - } - - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) - } - - if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { - // ignore bad transactions - txmp.logger.Info( - "rejected bad transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "peer_id", txInfo.SenderNodeID, - "code", checkTxRes.CheckTx.Code, - "post_check_err", err, - ) - - txmp.metrics.FailedTxs.Add(1) - - if !txmp.config.KeepInvalidTxsInCache { - txmp.cache.Remove(wtx.tx) - } - if err != nil { - checkTxRes.CheckTx.MempoolError = err.Error() - } - return - } - - sender := checkTxRes.CheckTx.Sender - priority := checkTxRes.CheckTx.Priority - - if len(sender) > 0 { - if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { - txmp.logger.Error( - "rejected incoming good transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "sender", sender, - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - } - - if err := txmp.canAddTx(wtx); err != nil { - evictTxs := txmp.priorityIndex.GetEvictableTxs( - priority, - int64(wtx.Size()), - txmp.SizeBytes(), - txmp.config.MaxTxsBytes, - ) - if len(evictTxs) == 0 { - // No room for the new incoming transaction so we just remove it from - // the cache. - txmp.cache.Remove(wtx.tx) - txmp.logger.Error( - "rejected incoming good transaction; mempool full", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err.Error(), - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - - // evict an existing transaction(s) - // - // NOTE: - // - The transaction, toEvict, can be removed while a concurrent - // reCheckTx callback is being executed for the same transaction. - for _, toEvict := range evictTxs { - txmp.removeTx(toEvict, true) - txmp.logger.Debug( - "evicted existing good transaction; mempool full", - "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), - "old_priority", toEvict.priority, - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "new_priority", wtx.priority, - ) - txmp.metrics.EvictedTxs.Add(1) - } - } - - wtx.gasWanted = checkTxRes.CheckTx.GasWanted - wtx.priority = priority - wtx.sender = sender - wtx.peers = map[uint16]struct{}{ - txInfo.SenderID: {}, - } - - txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) - txmp.metrics.Size.Set(float64(txmp.Size())) - - txmp.insertTx(wtx) - txmp.logger.Debug( - "inserted good transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "height", txmp.height, - "num_txs", txmp.Size(), - ) - txmp.notifyTxsAvailable() - -} - -// defaultTxCallback performs the default CheckTx application callback. This is -// NOT executed when a transaction is first seen/received. Instead, this callback -// is executed during re-checking transactions (if enabled). A caller, i.e a -// block proposer, acquires a mempool write-lock via Lock() and when executing -// Update(), if the mempool is non-empty and Recheck is enabled, then all -// remaining transactions will be rechecked via CheckTxAsync. The order in which -// they are rechecked must be the same order in which this callback is called -// per transaction. -func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { - if txmp.recheckCursor == nil { - return - } - - txmp.metrics.RecheckTimes.Add(1) - - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if !ok { - txmp.logger.Error("received incorrect type in mempool callback", - "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), - "got", reflect.TypeOf(res.Value).Name(), - ) - return - } - tx := req.GetCheckTx().Tx - wtx := txmp.recheckCursor.Value.(*WrappedTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, wtx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - txmp.logger.Error( - "re-CheckTx transaction mismatch", - "got", wtx.tx.Hash(), - "expected", types.Tx(tx).Key(), - ) - - if txmp.recheckCursor == txmp.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - txmp.recheckCursor = nil - return - } - - txmp.recheckCursor = txmp.recheckCursor.Next() - wtx = txmp.recheckCursor.Value.(*WrappedTx) - } - - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, checkTxRes.CheckTx) - } - - if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { - wtx.priority = checkTxRes.CheckTx.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", checkTxRes.CheckTx.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } - } - - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() - } - - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) -} - -// updateReCheckTxs updates the recheck cursors by using the gossipIndex. For -// each transaction, it executes CheckTxAsync. The global callback defined on -// the proxyAppConn will be executed for each transaction after CheckTx is -// executed. -// -// NOTE: -// - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs() { - if txmp.Size() == 0 { - panic("attempted to update re-CheckTx txs when mempool is empty") - } - - txmp.recheckCursor = txmp.gossipIndex.Front() - txmp.recheckEnd = txmp.gossipIndex.Back() - ctx := context.Background() - - for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { - wtx := e.Value.(*WrappedTx) - - // Only execute CheckTx if the transaction is not marked as removed which - // could happen if the transaction was evicted. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: wtx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // no need in retrying since the tx will be rechecked after the next block - txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) - } - } - } - - if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { - txmp.logger.Error("failed to flush transactions during rechecking", "err", err) - } -} - -// canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. Otherwise, nil is returned -// and the transaction can be inserted into the mempool. -func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { - var ( - numTxs = txmp.Size() - sizeBytes = txmp.SizeBytes() - ) - - if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: numTxs, - MaxTxs: txmp.config.Size, - TxsBytes: sizeBytes, - MaxTxsBytes: txmp.config.MaxTxsBytes, - } - } - - return nil -} - -func (txmp *TxMempool) insertTx(wtx *WrappedTx) { - txmp.txStore.SetTx(wtx) - txmp.priorityIndex.PushTx(wtx) - txmp.heightIndex.Insert(wtx) - txmp.timestampIndex.Insert(wtx) - - // Insert the transaction into the gossip index and mark the reference to the - // linked-list element, which will be needed at a later point when the - // transaction is removed. - gossipEl := txmp.gossipIndex.PushBack(wtx) - wtx.gossipEl = gossipEl - - atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) -} - -func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { - if txmp.txStore.IsTxRemoved(wtx.hash) { - return - } - - txmp.txStore.RemoveTx(wtx) - txmp.priorityIndex.RemoveTx(wtx) - txmp.heightIndex.Remove(wtx) - txmp.timestampIndex.Remove(wtx) - - // Remove the transaction from the gossip index and cleanup the linked-list - // element so it can be garbage collected. - txmp.gossipIndex.Remove(wtx.gossipEl) - wtx.gossipEl.DetachPrev() - - atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) - - if removeFromCache { - txmp.cache.Remove(wtx.tx) - } -} - -// purgeExpiredTxs removes all transactions that have exceeded their respective -// height and/or time based TTLs from their respective indexes. Every expired -// transaction will be removed from the mempool entirely, except for the cache. -// -// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which -// the caller has a write-lock on the mempool and so we can safely iterate over -// the height and time based indexes. -func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { - now := time.Now() - expiredTxs := make(map[types.TxKey]*WrappedTx) - - if txmp.config.TTLNumBlocks > 0 { - purgeIdx := -1 - for i, wtx := range txmp.heightIndex.txs { - if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] - } - } - - if txmp.config.TTLDuration > 0 { - purgeIdx := -1 - for i, wtx := range txmp.timestampIndex.txs { - if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] - } - } - - for _, wtx := range expiredTxs { - txmp.removeTx(wtx, false) - } -} - -func (txmp *TxMempool) notifyTxsAvailable() { - if txmp.Size() == 0 { - panic("attempt to notify txs available but mempool is empty!") - } - - if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - // channel cap is 1, so this will send once - txmp.notifiedTxsAvailable = true - - select { - case txmp.txsAvailable <- struct{}{}: - default: - } - } -} diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go deleted file mode 100644 index c5b7ca82f..000000000 --- a/internal/mempool/v1/tx.go +++ /dev/null @@ -1,281 +0,0 @@ -package v1 - -import ( - "sort" - "time" - - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// WrappedTx defines a wrapper around a raw transaction with additional metadata -// that is used for indexing. -type WrappedTx struct { - // tx represents the raw binary transaction data - tx types.Tx - - // hash defines the transaction hash and the primary key used in the mempool - hash types.TxKey - - // height defines the height at which the transaction was validated at - height int64 - - // gasWanted defines the amount of gas the transaction sender requires - gasWanted int64 - - // priority defines the transaction's priority as specified by the application - // in the ResponseCheckTx response. - priority int64 - - // sender defines the transaction's sender as specified by the application in - // the ResponseCheckTx response. - sender string - - // timestamp is the time at which the node first received the transaction from - // a peer. It is used as a second dimension is prioritizing transactions when - // two transactions have the same priority. - timestamp time.Time - - // peers records a mapping of all peers that sent a given transaction - peers map[uint16]struct{} - - // heapIndex defines the index of the item in the heap - heapIndex int - - // gossipEl references the linked-list element in the gossip index - gossipEl *clist.CElement - - // removed marks the transaction as removed from the mempool. This is set - // during RemoveTx and is needed due to the fact that a given existing - // transaction in the mempool can be evicted when it is simultaneously having - // a reCheckTx callback executed. - removed bool -} - -func (wtx *WrappedTx) Size() int { - return len(wtx.tx) -} - -// TxStore implements a thread-safe mapping of valid transaction(s). -// -// NOTE: -// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative -// access is not allowed. Regardless, it is not expected for the mempool to -// need mutative access. -type TxStore struct { - mtx tmsync.RWMutex - hashTxs map[types.TxKey]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application -} - -func NewTxStore() *TxStore { - return &TxStore{ - senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[types.TxKey]*WrappedTx), - } -} - -// Size returns the total number of transactions in the store. -func (txs *TxStore) Size() int { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return len(txs.hashTxs) -} - -// GetAllTxs returns all the transactions currently in the store. -func (txs *TxStore) GetAllTxs() []*WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wTxs := make([]*WrappedTx, len(txs.hashTxs)) - i := 0 - for _, wtx := range txs.hashTxs { - wTxs[i] = wtx - i++ - } - - return wTxs -} - -// GetTxBySender returns a *WrappedTx by the transaction's sender property -// defined by the ABCI application. -func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.senderTxs[sender] -} - -// GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.hashTxs[hash] -} - -// IsTxRemoved returns true if a transaction by hash is marked as removed and -// false otherwise. -func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx, ok := txs.hashTxs[hash] - if ok { - return wtx.removed - } - - return false -} - -// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a -// non-empty sender, we additionally store the transaction by the sender as -// defined by the ABCI application. -func (txs *TxStore) SetTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - txs.senderTxs[wtx.sender] = wtx - } - - txs.hashTxs[wtx.tx.Key()] = wtx -} - -// RemoveTx removes a *WrappedTx from the transaction store. It deletes all -// indexes of the transaction. -func (txs *TxStore) RemoveTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - delete(txs.senderTxs, wtx.sender) - } - - delete(txs.hashTxs, wtx.tx.Key()) - wtx.removed = true -} - -// TxHasPeer returns true if a transaction by hash has a given peer ID and false -// otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return false - } - - _, ok := wtx.peers[peerID] - return ok -} - -// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the -// given peerID to the WrappedTx's set of peers that sent us this transaction. -// We return true if we've already recorded the given peer for this transaction -// and false otherwise. If the transaction does not exist by hash, we return -// (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return nil, false - } - - if wtx.peers == nil { - wtx.peers = make(map[uint16]struct{}) - } - - if _, ok := wtx.peers[peerID]; ok { - return wtx, true - } - - wtx.peers[peerID] = struct{}{} - return wtx, false -} - -// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be -// used to build generic transaction indexes in the mempool. It accepts a -// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx -// references which is used during Insert in order to determine sorted order. If -// less returns true, a <= b. -type WrappedTxList struct { - mtx tmsync.RWMutex - txs []*WrappedTx - less func(*WrappedTx, *WrappedTx) bool -} - -func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { - return &WrappedTxList{ - txs: make([]*WrappedTx, 0), - less: less, - } -} - -// Size returns the number of WrappedTx objects in the list. -func (wtl *WrappedTxList) Size() int { - wtl.mtx.RLock() - defer wtl.mtx.RUnlock() - - return len(wtl.txs) -} - -// Reset resets the list of transactions to an empty list. -func (wtl *WrappedTxList) Reset() { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - wtl.txs = make([]*WrappedTx, 0) -} - -// Insert inserts a WrappedTx reference into the sorted list based on the list's -// comparator function. -func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - if i == len(wtl.txs) { - // insert at the end - wtl.txs = append(wtl.txs, wtx) - return - } - - // Make space for the inserted element by shifting values at the insertion - // index up one index. - // - // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). - wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) - wtl.txs[i] = wtx -} - -// Remove attempts to remove a WrappedTx from the sorted list. -func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - // Since the list is sorted, we evaluate all elements starting at i. Note, if - // the element does not exist, we may potentially evaluate the entire remainder - // of the list. However, a caller should not be expected to call Remove with a - // non-existing element. - for i < len(wtl.txs) { - if wtl.txs[i] == wtx { - wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) - return - } - - i++ - } -} diff --git a/node/node_test.go b/node/node_test.go index a4af0a9cc..0a1ebaaaf 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -23,7 +23,6 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" @@ -265,15 +264,12 @@ func TestCreateProposalBlock(t *testing.T) { state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) - mp := mempoolv0.NewCListMempool( + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // Make EvidencePool evidenceDB := dbm.NewMemDB() @@ -362,15 +358,13 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( + + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) @@ -426,15 +420,12 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) diff --git a/node/setup.go b/node/setup.go index f7f6230d1..ffedbec33 100644 --- a/node/setup.go +++ b/node/setup.go @@ -16,8 +16,6 @@ import ( "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" @@ -197,76 +195,37 @@ func createMempoolReactor( logger log.Logger, ) (service.Service, mempool.Mempool, error) { - logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) - peerUpdates := peerManager.Subscribe() + logger = logger.With("module", "mempool") - switch cfg.Mempool.Version { - case config.MempoolV0: - ch, err := router.OpenChannel(mempoolv0.GetChannelDescriptor(cfg.Mempool)) - if err != nil { - return nil, nil, err - } - - mp := mempoolv0.NewCListMempool( - cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(memplMetrics), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), - ) - - mp.SetLogger(logger) - - reactor := mempoolv0.NewReactor( - logger, - cfg.Mempool, - peerManager, - mp, - ch, - peerUpdates, - ) - - if cfg.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactor, mp, nil - - case config.MempoolV1: - ch, err := router.OpenChannel(mempoolv1.GetChannelDescriptor(cfg.Mempool)) - if err != nil { - return nil, nil, err - } - - mp := mempoolv1.NewTxMempool( - logger, - cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv1.WithMetrics(memplMetrics), - mempoolv1.WithPreCheck(sm.TxPreCheck(state)), - mempoolv1.WithPostCheck(sm.TxPostCheck(state)), - ) - - reactor := mempoolv1.NewReactor( - logger, - cfg.Mempool, - peerManager, - mp, - ch, - peerUpdates, - ) - - if cfg.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactor, mp, nil - - default: - return nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) + ch, err := router.OpenChannel(mempool.GetChannelDescriptor(cfg.Mempool)) + if err != nil { + return nil, nil, err } + + mp := mempool.NewTxMempool( + logger, + cfg.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempool.WithMetrics(memplMetrics), + mempool.WithPreCheck(sm.TxPreCheck(state)), + mempool.WithPostCheck(sm.TxPostCheck(state)), + ) + + reactor := mempool.NewReactor( + logger, + cfg.Mempool, + peerManager, + mp, + ch, + peerManager.Subscribe(), + ) + + if cfg.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } + + return reactor, mp, nil } func createEvidenceReactor( diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 4ee9849b2..d4b581928 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -44,10 +44,6 @@ var ( "tcp": 20, "unix": 10, } - nodeMempools = weightedChoice{ - "v0": 20, - "v1": 80, - } nodeStateSyncs = weightedChoice{ e2e.StateSyncDisabled: 10, e2e.StateSyncP2P: 45, @@ -277,7 +273,6 @@ func generateNode( StartAt: startAt, Database: nodeDatabases.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), - Mempool: nodeMempools.Choose(r), StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), @@ -285,9 +280,6 @@ func generateNode( Perturb: nodePerturbations.Choose(r), } - if node.Mempool == "" { - node.Mempool = "v1" - } if node.PrivvalProtocol == "" { node.PrivvalProtocol = "file" } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 3bda6d035..0e0e66baa 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -37,9 +37,6 @@ func TestGenerator(t *testing.T) { } }) if e2e.Mode(node.Mode) != e2e.ModeLight { - t.Run("Mempool", func(t *testing.T) { - require.NotZero(t, node.Mempool) - }) t.Run("PrivvalProtocol", func(t *testing.T) { require.NotZero(t, node.PrivvalProtocol) }) diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 39967e529..cb9ddbc6d 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -291,10 +291,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { return nil, fmt.Errorf("unexpected mode %q", node.Mode) } - if node.Mempool != "" { - cfg.Mempool.Version = node.Mempool - } - switch node.StateSync { case e2e.StateSyncP2P: cfg.StateSync.Enable = true diff --git a/test/fuzz/mempool/v0/checktx.go b/test/fuzz/mempool/checktx.go similarity index 58% rename from test/fuzz/mempool/v0/checktx.go rename to test/fuzz/mempool/checktx.go index 62eda9729..708f2ebd0 100644 --- a/test/fuzz/mempool/v0/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -1,4 +1,4 @@ -package v0 +package mempool import ( "context" @@ -7,10 +7,11 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/libs/log" ) -var mp mempool.Mempool +var mp *mempool.TxMempool +var getMp func() mempool.Mempool func init() { app := kvstore.NewApplication() @@ -24,11 +25,22 @@ func init() { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mp = mempoolv0.NewCListMempool(cfg, appConnMem, 0) + getMp = func() mempool.Mempool { + if mp == nil { + mp = mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + cfg, + appConnMem, + 0, + ) + + } + return mp + } } func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) + err := getMp().CheckTx(context.Background(), data, nil, mempool.TxInfo{}) if err != nil { return 0 } diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/fuzz_test.go similarity index 84% rename from test/fuzz/mempool/v1/fuzz_test.go rename to test/fuzz/mempool/fuzz_test.go index 26b2c3609..8af0326dd 100644 --- a/test/fuzz/mempool/v1/fuzz_test.go +++ b/test/fuzz/mempool/fuzz_test.go @@ -1,4 +1,4 @@ -package v1_test +package mempool_test import ( "io" @@ -7,7 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" - mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1" + mempool "github.com/tendermint/tendermint/test/fuzz/mempool" ) const testdataCasesDir = "testdata/cases" @@ -27,7 +27,7 @@ func TestMempoolTestdataCases(t *testing.T) { require.NoError(t, err) input, err := io.ReadAll(f) require.NoError(t, err) - mempoolv1.Fuzz(input) + mempool.Fuzz(input) }) } } diff --git a/test/fuzz/mempool/v0/testdata/cases/empty b/test/fuzz/mempool/testdata/cases/empty similarity index 100% rename from test/fuzz/mempool/v0/testdata/cases/empty rename to test/fuzz/mempool/testdata/cases/empty diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/v0/fuzz_test.go deleted file mode 100644 index 210de40d1..000000000 --- a/test/fuzz/mempool/v0/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package v0_test - -import ( - "io" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0" -) - -const testdataCasesDir = "testdata/cases" - -func TestMempoolTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := io.ReadAll(f) - require.NoError(t, err) - mempoolv0.Fuzz(input) - }) - } -} diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/v1/checktx.go deleted file mode 100644 index 2ed0b97ff..000000000 --- a/test/fuzz/mempool/v1/checktx.go +++ /dev/null @@ -1,37 +0,0 @@ -package v1 - -import ( - "context" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" -) - -var mp mempool.Mempool - -func init() { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc() - err := appConnMem.Start() - if err != nil { - panic(err) - } - - cfg := config.DefaultMempoolConfig() - cfg.Broadcast = false - - mp = mempoolv1.NewCListMempool(cfg, appConnMem, 0) -} - -func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) - if err != nil { - return 0 - } - - return 1 -} diff --git a/test/fuzz/mempool/v1/testdata/cases/empty b/test/fuzz/mempool/v1/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 From 5599ec37bf64e9cc06772bf989ecafd50437e6e6 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 1 Nov 2021 15:46:35 +0100 Subject: [PATCH 134/174] fuzz: remove fuzz cases for deleted code (#7187) fuzz: remove fuzz cases for deleted code --- .github/workflows/fuzz-nightly.yml | 19 ++---------------- test/fuzz/Makefile | 31 ++++-------------------------- 2 files changed, 6 insertions(+), 44 deletions(-) diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 9e67aed53..bc668ecd0 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -23,24 +23,9 @@ jobs: working-directory: test/fuzz run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build - - name: Fuzz mempool-v1 + - name: Fuzz mempool working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1 - continue-on-error: true - - - name: Fuzz mempool-v0 - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0 - continue-on-error: true - - - name: Fuzz p2p-addrbook - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook - continue-on-error: true - - - name: Fuzz p2p-pex - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool continue-on-error: true - name: Fuzz p2p-sc diff --git a/test/fuzz/Makefile b/test/fuzz/Makefile index 3d34e0a43..3bf4486b8 100644 --- a/test/fuzz/Makefile +++ b/test/fuzz/Makefile @@ -1,38 +1,15 @@ #!/usr/bin/make -f -.PHONY: fuzz-mempool-v1 -fuzz-mempool-v1: - cd mempool/v1 && \ +.PHONY: fuzz-mempool +fuzz-mempool: + cd mempool && \ rm -f *-fuzz.zip && \ go-fuzz-build && \ go-fuzz -.PHONY: fuzz-mempool-v0 -fuzz-mempool-v0: - cd mempool/v0 && \ - rm -f *-fuzz.zip && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-addrbook -fuzz-p2p-addrbook: - cd p2p/addrbook && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-pex -fuzz-p2p-pex: - cd p2p/pex && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - .PHONY: fuzz-p2p-sc fuzz-p2p-sc: - cd p2p/secret_connection && \ + cd p2p/secretconnection && \ rm -f *-fuzz.zip && \ go run ./init-corpus/main.go && \ go-fuzz-build && \ From d32913c889b3e7a94c6fc73ad2669523d4ba23e4 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 1 Nov 2021 10:38:27 -0700 Subject: [PATCH 135/174] pubsub: Use a dynamic queue for buffered subscriptions (#7177) Updates #7156, and a follow-up to #7070. Event subscriptions in Tendermint currently use a fixed-length Go channel as a queue. When the channel fills up, the publisher immediately terminates the subscription. This prevents slow subscribers from creating memory pressure on the node by not servicing their queue fast enough. Replace the buffered channel used to deliver events to buffered subscribers with an explicit queue. The queue provides a soft quota and burst credit mechanism: Clients that usually keep up can survive occasional bursts, without allowing truly slow clients to hog resources indefinitely. --- internal/libs/queue/queue.go | 232 ++++++++++++++++++++++++++++++ internal/libs/queue/queue_test.go | 188 ++++++++++++++++++++++++ libs/pubsub/pubsub.go | 16 +-- libs/pubsub/pubsub_test.go | 5 + libs/pubsub/subscription.go | 61 +++++++- 5 files changed, 485 insertions(+), 17 deletions(-) create mode 100644 internal/libs/queue/queue.go create mode 100644 internal/libs/queue/queue_test.go diff --git a/internal/libs/queue/queue.go b/internal/libs/queue/queue.go new file mode 100644 index 000000000..7b4199504 --- /dev/null +++ b/internal/libs/queue/queue.go @@ -0,0 +1,232 @@ +// Package queue implements a dynamic FIFO queue with a fixed upper bound +// and a flexible quota mechanism to handle bursty load. +package queue + +import ( + "context" + "errors" + "sync" +) + +var ( + // ErrQueueFull is returned by the Add method of a queue when the queue has + // reached its hard capacity limit. + ErrQueueFull = errors.New("queue is full") + + // ErrNoCredit is returned by the Add method of a queue when the queue has + // exceeded its soft quota and there is insufficient burst credit. + ErrNoCredit = errors.New("insufficient burst credit") + + // ErrQueueClosed is returned by the Add method of a closed queue, and by + // the Wait method of a closed empty queue. + ErrQueueClosed = errors.New("queue is closed") + + // Sentinel errors reported by the New constructor. + errHardLimit = errors.New("hard limit must be > 0 and ≥ soft quota") + errBurstCredit = errors.New("burst credit must be non-negative") +) + +// A Queue is a limited-capacity FIFO queue of arbitrary data items. +// +// A queue has a soft quota and a hard limit on the number of items that may be +// contained in the queue. Adding items in excess of the hard limit will fail +// unconditionally. +// +// For items in excess of the soft quota, a credit system applies: Each queue +// maintains a burst credit score. Adding an item in excess of the soft quota +// costs 1 unit of burst credit. If there is not enough burst credit, the add +// will fail. +// +// The initial burst credit is assigned when the queue is constructed. Removing +// items from the queue adds additional credit if the resulting queue length is +// less than the current soft quota. Burst credit is capped by the hard limit. +// +// A Queue is safe for concurrent use by multiple goroutines. +type Queue struct { + mu sync.Mutex // protects the fields below + + softQuota int // adjusted dynamically (see Add, Remove) + hardLimit int // fixed for the lifespan of the queue + queueLen int // number of entries in the queue list + credit float64 // current burst credit + + closed bool + nempty *sync.Cond + back *entry + front *entry + + // The queue is singly-linked. Front points to the sentinel and back points + // to the newest entry. The oldest entry is front.link if it exists. +} + +// New constructs a new empty queue with the specified options. It reports an +// error if any of the option values are invalid. +func New(opts Options) (*Queue, error) { + if opts.HardLimit <= 0 || opts.HardLimit < opts.SoftQuota { + return nil, errHardLimit + } + if opts.BurstCredit < 0 { + return nil, errBurstCredit + } + if opts.SoftQuota <= 0 { + opts.SoftQuota = opts.HardLimit + } + if opts.BurstCredit == 0 { + opts.BurstCredit = float64(opts.SoftQuota) + } + sentinel := new(entry) + q := &Queue{ + softQuota: opts.SoftQuota, + hardLimit: opts.HardLimit, + credit: opts.BurstCredit, + back: sentinel, + front: sentinel, + } + q.nempty = sync.NewCond(&q.mu) + return q, nil +} + +// Add adds item to the back of the queue. It reports an error and does not +// enqueue the item if the queue is full or closed, or if it exceeds its soft +// quota and there is not enough burst credit. +func (q *Queue) Add(item interface{}) error { + q.mu.Lock() + defer q.mu.Unlock() + + if q.closed { + return ErrQueueClosed + } + + if q.queueLen >= q.softQuota { + if q.queueLen == q.hardLimit { + return ErrQueueFull + } else if q.credit < 1 { + return ErrNoCredit + } + + // Successfully exceeding the soft quota deducts burst credit and raises + // the soft quota. This has the effect of reducing the credit cap and the + // amount of credit given for removing items to better approximate the + // rate at which the consumer is servicing the queue. + q.credit-- + q.softQuota = q.queueLen + 1 + } + e := &entry{item: item} + q.back.link = e + q.back = e + q.queueLen++ + if q.queueLen == 1 { // was empty + q.nempty.Signal() + } + return nil +} + +// Remove removes and returns the frontmost (oldest) item in the queue and +// reports whether an item was available. If the queue is empty, Remove +// returns nil, false. +func (q *Queue) Remove() (interface{}, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + if q.queueLen == 0 { + return nil, false + } + return q.popFront(), true +} + +// Wait blocks until q is non-empty or closed, and then returns the frontmost +// (oldest) item from the queue. If ctx ends before an item is available, Wait +// returns a nil value and a context error. If the queue is closed while it is +// still empty, Wait returns nil, ErrQueueClosed. +func (q *Queue) Wait(ctx context.Context) (interface{}, error) { + // If the context terminates, wake the waiter. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { <-ctx.Done(); q.nempty.Broadcast() }() + + q.mu.Lock() + defer q.mu.Unlock() + + for q.queueLen == 0 { + if q.closed { + return nil, ErrQueueClosed + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + q.nempty.Wait() + } + } + return q.popFront(), nil +} + +// Close closes the queue. After closing, any further Add calls will report an +// error, but items that were added to the queue prior to closing will still be +// available for Remove and Wait. Wait will report an error without blocking if +// it is called on a closed, empty queue. +func (q *Queue) Close() error { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.nempty.Broadcast() + return nil +} + +// popFront removes the frontmost item of q and returns its value after +// updating quota and credit settings. +// +// Preconditions: The caller holds q.mu and q is not empty. +func (q *Queue) popFront() interface{} { + e := q.front.link + q.front.link = e.link + if e == q.back { + q.back = q.front + } + q.queueLen-- + + if q.queueLen < q.softQuota { + // Successfully removing items from the queue below half the soft quota + // lowers the soft quota. This has the effect of increasing the credit cap + // and the amount of credit given for removing items to better approximate + // the rate at which the consumer is servicing the queue. + if q.softQuota > 1 && q.queueLen < q.softQuota/2 { + q.softQuota-- + } + + // Give credit for being below the soft quota. Note we do this after + // adjusting the quota so the credit reflects the item we just removed. + q.credit += float64(q.softQuota-q.queueLen) / float64(q.softQuota) + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + q.credit = cap + } + } + + return e.item +} + +// Options are the initial settings for a Queue. +type Options struct { + // The maximum number of items the queue will ever be permitted to hold. + // This value must be positive, and greater than or equal to SoftQuota. The + // hard limit is fixed and does not change as the queue is used. + // + // The hard limit should be chosen to exceed the largest burst size expected + // under normal operating conditions. + HardLimit int + + // The initial expected maximum number of items the queue should contain on + // an average workload. If this value is zero, it is initialized to the hard + // limit. The soft quota is adjusted from the initial value dynamically as + // the queue is used. + SoftQuota int + + // The initial burst credit score. This value must be greater than or equal + // to zero. If it is zero, the soft quota is used. + BurstCredit float64 +} + +type entry struct { + item interface{} + link *entry +} diff --git a/internal/libs/queue/queue_test.go b/internal/libs/queue/queue_test.go new file mode 100644 index 000000000..f339e07fa --- /dev/null +++ b/internal/libs/queue/queue_test.go @@ -0,0 +1,188 @@ +package queue + +import ( + "context" + "testing" + "time" +) + +func TestNew(t *testing.T) { + tests := []struct { + desc string + opts Options + want error + }{ + {"empty options", Options{}, errHardLimit}, + {"zero limit negative quota", Options{SoftQuota: -1}, errHardLimit}, + {"zero limit and quota", Options{SoftQuota: 0}, errHardLimit}, + {"zero limit", Options{SoftQuota: 1, HardLimit: 0}, errHardLimit}, + {"limit less than quota", Options{SoftQuota: 5, HardLimit: 3}, errHardLimit}, + {"negative credit", Options{SoftQuota: 1, HardLimit: 1, BurstCredit: -6}, errBurstCredit}, + {"valid default credit", Options{SoftQuota: 1, HardLimit: 2, BurstCredit: 0}, nil}, + {"valid explicit credit", Options{SoftQuota: 1, HardLimit: 5, BurstCredit: 10}, nil}, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + got, err := New(test.opts) + if err != test.want { + t.Errorf("New(%+v): got (%+v, %v), want err=%v", test.opts, got, err, test.want) + } + }) + } +} + +type testQueue struct { + t *testing.T + *Queue +} + +func (q testQueue) mustAdd(item string) { + q.t.Helper() + if err := q.Add(item); err != nil { + q.t.Errorf("Add(%q): unexpected error: %v", item, err) + } +} + +func (q testQueue) mustRemove(want string) { + q.t.Helper() + got, ok := q.Remove() + if !ok { + q.t.Error("Remove: queue is empty") + } else if got.(string) != want { + q.t.Errorf("Remove: got %q, want %q", got, want) + } +} + +func mustQueue(t *testing.T, opts Options) testQueue { + t.Helper() + + q, err := New(opts) + if err != nil { + t.Fatalf("New(%+v): unexpected error: %v", opts, err) + } + return testQueue{t: t, Queue: q} +} + +func TestHardLimit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 1}) + q.mustAdd("foo") + if err := q.Add("bar"); err != ErrQueueFull { + t.Errorf("Add: got err=%v, want %v", err, ErrQueueFull) + } +} + +func TestSoftQuota(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 4}) + q.mustAdd("foo") + q.mustAdd("bar") + if err := q.Add("baz"); err != ErrNoCredit { + t.Errorf("Add: got err=%v, want %v", err, ErrNoCredit) + } +} + +func TestBurstCredit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 5}) + q.mustAdd("foo") + q.mustAdd("bar") + + // We should still have all our initial credit. + if q.credit < 2 { + t.Errorf("Wrong credit: got %f, want ≥ 2", q.credit) + } + + // Removing an item below soft quota should increase our credit. + q.mustRemove("foo") + if q.credit <= 2 { + t.Errorf("wrong credit: got %f, want > 2", q.credit) + } + + // Credit should be capped by the hard limit. + q.mustRemove("bar") + q.mustAdd("baz") + q.mustRemove("baz") + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + t.Errorf("Wrong credit: got %f, want ≤ %f", q.credit, cap) + } +} + +func TestClose(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 10}) + q.mustAdd("alpha") + q.mustAdd("bravo") + q.mustAdd("charlie") + q.Close() + + // After closing the queue, subsequent writes should fail. + if err := q.Add("foxtrot"); err == nil { + t.Error("Add should have failed after Close") + } + + // However, the remaining contents of the queue should still work. + q.mustRemove("alpha") + q.mustRemove("bravo") + q.mustRemove("charlie") +} + +func TestWait(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 2}) + + // A wait on an empty queue should time out. + t.Run("WaitTimeout", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + got, err := q.Wait(ctx) + if err == nil { + t.Errorf("Wait: got %v, want error", got) + } else { + t.Logf("Wait correctly failed: %v", err) + } + }) + + // A wait on a non-empty queue should report an item. + t.Run("WaitNonEmpty", func(t *testing.T) { + const input = "figgy pudding" + q.mustAdd(input) + + got, err := q.Wait(context.Background()) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }) + + // Wait should block until an item arrives. + t.Run("WaitOnEmpty", func(t *testing.T) { + const input = "fleet footed kittens" + + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(context.Background()) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }() + + q.mustAdd(input) + <-done + }) + + // Closing the queue unblocks a wait. + t.Run("UnblockOnClose", func(t *testing.T) { + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(context.Background()) + if err != ErrQueueClosed { + t.Errorf("Wait: got (%v, %v), want %v", got, err, ErrQueueClosed) + } + }() + + q.Close() + <-done + }) +} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 1ae111f63..fdd728961 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -190,7 +190,10 @@ func (s *Server) subscribe(ctx context.Context, clientID string, query Query, ou return nil, ErrAlreadySubscribed } - sub := NewSubscription(outCapacity) + sub, err := newSubscription(outCapacity) + if err != nil { + return nil, err + } s.subs.index.add(&subInfo{ clientID: clientID, query: query, @@ -388,15 +391,8 @@ func (s *Server) send(data interface{}, events []types.Event) error { // use case doesn't require this affordance, and then remove unbuffered // subscriptions. msg := NewMessage(si.sub.id, data, events) - if cap(si.sub.out) == 0 { - si.sub.out <- msg - continue - } - select { - case si.sub.out <- msg: - // ok, delivered - default: - // slow subscriber, cancel them + if err := si.sub.putMessage(msg); err != nil { + // The subscriber was too slow, cancel them. evict.add(si) } } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index d3855be7a..e0f1f9eb5 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -55,6 +55,9 @@ func TestSubscribe(t *testing.T) { err = s.Publish(ctx, "Ivan") require.NoError(t, err) + + err = s.Publish(ctx, "Natasha") + require.NoError(t, err) }() select { @@ -146,6 +149,8 @@ func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { require.NoError(t, err) err = s.Publish(ctx, "Viper") require.NoError(t, err) + err = s.Publish(ctx, "Black Panther") + require.NoError(t, err) assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) } diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 4210416b6..827e5e6a6 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -1,11 +1,13 @@ package pubsub import ( + "context" "errors" "fmt" "github.com/google/uuid" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/libs/queue" tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) @@ -24,21 +26,66 @@ var ( // 2) channel which is closed if a client is too slow or choose to unsubscribe // 3) err indicating the reason for (2) type Subscription struct { - id string - out chan Message + id string + out chan Message + queue *queue.Queue canceled chan struct{} + stop func() mtx tmsync.RWMutex err error } -// NewSubscription returns a new subscription with the given outCapacity. -func NewSubscription(outCapacity int) *Subscription { - return &Subscription{ +// newSubscription returns a new subscription with the given outCapacity. +func newSubscription(outCapacity int) (*Subscription, error) { + sub := &Subscription{ id: uuid.NewString(), - out: make(chan Message, outCapacity), + out: make(chan Message), canceled: make(chan struct{}), + + // N.B. The output channel is always unbuffered. For an unbuffered + // subscription that was already the case, and for a buffered one the + // queue now serves as the buffer. } + + if outCapacity == 0 { + sub.stop = func() { close(sub.canceled) } + return sub, nil + } + q, err := queue.New(queue.Options{ + SoftQuota: outCapacity, + HardLimit: outCapacity, + }) + if err != nil { + return nil, fmt.Errorf("creating queue: %w", err) + } + sub.queue = q + sub.stop = func() { q.Close(); close(sub.canceled) } + + // Start a goroutine to bridge messages from the queue to the channel. + // TODO(creachadair): This is a temporary hack until we can change the + // interface not to expose the channel directly. + go func() { + for { + next, err := q.Wait(context.Background()) + if err != nil { + return // the subscription was terminated + } + sub.out <- next.(Message) + } + }() + return sub, nil +} + +// putMessage transmits msg to the subscriber. If s is unbuffered, this blocks +// until msg is delivered and returns nil; otherwise it reports an error if the +// queue cannot accept any further messages. +func (s *Subscription) putMessage(msg Message) error { + if s.queue != nil { + return s.queue.Add(msg) + } + s.out <- msg + return nil } // Out returns a channel onto which messages and events are published. @@ -81,7 +128,7 @@ func (s *Subscription) cancel(err error) { s.err = err } - close(s.canceled) + s.stop() } // Message glues data and events together. From 5bc9cb62604b3f920213f4ee2436fcce98340ef7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Nov 2021 07:18:12 -0700 Subject: [PATCH 136/174] build(deps): Bump github.com/rs/zerolog from 1.25.0 to 1.26.0 (#7192) Bumps [github.com/rs/zerolog](https://github.com/rs/zerolog) from 1.25.0 to 1.26.0. - [Release notes](https://github.com/rs/zerolog/releases) - [Commits](https://github.com/rs/zerolog/compare/v1.25.0...v1.26.0) --- updated-dependencies: - dependency-name: github.com/rs/zerolog dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index cfd8e7fe7..3dc17fa30 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/prometheus/client_golang v1.11.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.8.0 - github.com/rs/zerolog v1.25.0 + github.com/rs/zerolog v1.26.0 github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.2.1 diff --git a/go.sum b/go.sum index e767033e5..b2918b67c 100644 --- a/go.sum +++ b/go.sum @@ -824,8 +824,8 @@ github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= -github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= -github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= +github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE= +github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= @@ -972,6 +972,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1125,6 +1126,7 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1241,6 +1243,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= @@ -1363,8 +1366,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 63192ac30099a0fcc432e2177baedba9ff6fa5cc Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 3 Nov 2021 12:01:46 +0100 Subject: [PATCH 137/174] consensus: remove stale WAL benchmark (#7194) --- internal/consensus/wal_test.go | 68 ---------------------------------- 1 file changed, 68 deletions(-) diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 180af5f34..114bb21b1 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -2,10 +2,8 @@ package consensus import ( "bytes" - "crypto/rand" "path/filepath" - // "sync" "testing" "time" @@ -208,69 +206,3 @@ func TestWALPeriodicSync(t *testing.T) { gr.Close() } } - -/* -var initOnce sync.Once - -func registerInterfacesOnce() { - initOnce.Do(func() { - var _ = wire.RegisterInterface( - struct{ WALMessage }{}, - wire.ConcreteType{[]byte{}, 0x10}, - ) - }) -} -*/ - -func nBytes(n int) []byte { - buf := make([]byte, n) - n, _ = rand.Read(buf) - return buf[:n] -} - -func benchmarkWalDecode(b *testing.B, n int) { - // registerInterfacesOnce() - buf := new(bytes.Buffer) - enc := NewWALEncoder(buf) - - data := nBytes(n) - if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil { - b.Error(err) - } - - encoded := buf.Bytes() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - buf.Reset() - buf.Write(encoded) - dec := NewWALDecoder(buf) - if _, err := dec.Decode(); err != nil { - b.Fatal(err) - } - } - b.ReportAllocs() -} - -func BenchmarkWalDecode512B(b *testing.B) { - benchmarkWalDecode(b, 512) -} - -func BenchmarkWalDecode10KB(b *testing.B) { - benchmarkWalDecode(b, 10*1024) -} -func BenchmarkWalDecode100KB(b *testing.B) { - benchmarkWalDecode(b, 100*1024) -} -func BenchmarkWalDecode1MB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024) -} -func BenchmarkWalDecode10MB(b *testing.B) { - benchmarkWalDecode(b, 10*1024*1024) -} -func BenchmarkWalDecode100MB(b *testing.B) { - benchmarkWalDecode(b, 100*1024*1024) -} -func BenchmarkWalDecode1GB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024*1024) -} From e2b626fc926fb2af6f31628631863e38a236ec33 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 3 Nov 2021 12:16:35 +0100 Subject: [PATCH 138/174] node: cleanup construction (#7191) --- node/node.go | 62 +++++++++++++-------------------------------------- node/setup.go | 46 +++++++++++++++++++++----------------- 2 files changed, 42 insertions(+), 66 deletions(-) diff --git a/node/node.go b/node/node.go index bfccff6ef..eec58ef8b 100644 --- a/node/node.go +++ b/node/node.go @@ -151,7 +151,6 @@ func makeNode(cfg *config.Config, state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) @@ -160,7 +159,6 @@ func makeNode(cfg *config.Config, proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } // EventBus and IndexerService must be started before the handshake because @@ -170,7 +168,6 @@ func makeNode(cfg *config.Config, eventBus, err := createAndStartEventBus(logger) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) @@ -224,11 +221,9 @@ func makeNode(cfg *config.Config, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, logger); err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } // Reload the state. It will have the Version.Consensus.App set by the @@ -246,7 +241,7 @@ func makeNode(cfg *config.Config, // app may modify the validator set, specifying ourself as the only validator. blockSync := !onlyValidatorIsUs(state, pubKey) - logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode) + logNodeStartupInfo(state, pubKey, logger, cfg.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. // TODO: Use a persistent peer database. @@ -277,7 +272,6 @@ func makeNode(cfg *config.Config, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } evReactor, evPool, err := createEvidenceReactor( @@ -285,7 +279,6 @@ func makeNode(cfg *config.Config, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } // make block executor for consensus and blockchain reactors to execute blocks @@ -302,7 +295,7 @@ func makeNode(cfg *config.Config, csReactor, csState, err := createConsensusReactor( cfg, state, blockExec, blockStore, mp, evPool, privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, - peerManager, router, consensusLogger, + peerManager, router, logger, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) @@ -332,7 +325,6 @@ func makeNode(cfg *config.Config, // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - ssLogger := logger.With("module", "statesync") ssChDesc := statesync.GetChannelDescriptors() channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc)) for idx := range ssChDesc { @@ -345,50 +337,29 @@ func makeNode(cfg *config.Config, channels[ch.ID] = ch } - peerUpdates := peerManager.Subscribe() stateSyncReactor := statesync.NewReactor( genDoc.ChainID, genDoc.InitialHeight, *cfg.StateSync, - ssLogger, + logger.With("module", "statesync"), proxyApp.Snapshot(), proxyApp.Query(), channels[statesync.SnapshotChannel], channels[statesync.ChunkChannel], channels[statesync.LightBlockChannel], channels[statesync.ParamsChannel], - peerUpdates, + peerManager.Subscribe(), stateStore, blockStore, cfg.StateSync.TempDir, nodeMetrics.statesync, ) - // Optionally, start the pex reactor - // - // TODO: - // - // We need to set Seeds and PersistentPeers on the switch, - // since it needs to be able to use these (and their DNS names) - // even if the PEX is off. We can include the DNS name in the NetAddress, - // but it would still be nice to have a clear list of the current "PersistentPeers" - // somewhere that we can return with net_info. - // - // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. - // Note we currently use the addrBook regardless at least for AddOurAddress - pexReactor, err := createPEXReactor(logger, peerManager, router) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } - if cfg.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) - }() - } - node := &nodeImpl{ config: cfg, genesisDoc: genDoc, @@ -461,7 +432,6 @@ func makeSeedNode(cfg *config.Config, state, err := sm.MakeGenesisState(genDoc) if err != nil { return nil, err - } nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) @@ -487,19 +457,9 @@ func makeSeedNode(cfg *config.Config, closer) } - var pexReactor service.Service - - pexReactor, err = createPEXReactor(logger, peerManager, router) + pexReactor, err := createPEXReactor(logger, peerManager, router) if err != nil { return nil, combineCloseError(err, closer) - - } - - if cfg.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) - }() } node := &nodeImpl{ @@ -522,6 +482,16 @@ func makeSeedNode(cfg *config.Config, // OnStart starts the Node. It implements service.Service. func (n *nodeImpl) OnStart() error { + if n.config.RPC.PprofListenAddress != "" { + // this service is not cleaned up (I believe that we'd + // need to have another thread and a potentially a + // context to get this functionality.) + go func() { + n.Logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) + n.Logger.Error("pprof server error", "err", http.ListenAndServe(n.config.RPC.PprofListenAddress, nil)) + }() + } + now := tmtime.Now() genTime := n.genesisDoc.GenesisTime if genTime.After(now) { diff --git a/node/setup.go b/node/setup.go index ffedbec33..a0d22a31c 100644 --- a/node/setup.go +++ b/node/setup.go @@ -135,18 +135,20 @@ func doHandshake( genDoc *types.GenesisDoc, eventBus types.BlockEventPublisher, proxyApp proxy.AppConns, - consensusLogger log.Logger) error { + logger log.Logger, +) error { handshaker := consensus.NewHandshaker(stateStore, state, blockStore, genDoc) - handshaker.SetLogger(consensusLogger) + handshaker.SetLogger(logger.With("module", "handshaker")) handshaker.SetEventBus(eventBus) + if err := handshaker.Handshake(proxyApp); err != nil { return fmt.Errorf("error during handshake: %v", err) } return nil } -func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) { +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger, mode string) { // Log the version info. logger.Info("Version info", "tmVersion", version.TMVersion, @@ -162,17 +164,23 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL "state", state.Version.Consensus.Block, ) } - switch { - case mode == config.ModeFull: - consensusLogger.Info("This node is a fullnode") - case mode == config.ModeValidator: + + switch mode { + case config.ModeFull: + logger.Info("This node is a fullnode") + case config.ModeValidator: addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { - consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes()) + logger.Info("This node is a validator", + "addr", addr, + "pubKey", pubKey.Bytes(), + ) } else { - consensusLogger.Info("This node is a validator (NOT in the active validator set)", - "addr", addr, "pubKey", pubKey.Bytes()) + logger.Info("This node is a validator (NOT in the active validator set)", + "addr", addr, + "pubKey", pubKey.Bytes(), + ) } } } @@ -312,6 +320,7 @@ func createConsensusReactor( router *p2p.Router, logger log.Logger, ) (*consensus.Reactor, *consensus.State, error) { + logger = logger.With("module", "consensus") consensusState := consensus.NewState( cfg.Consensus, @@ -339,8 +348,6 @@ func createConsensusReactor( channels[ch.ID] = ch } - peerUpdates := peerManager.Subscribe() - reactor := consensus.NewReactor( logger, consensusState, @@ -348,7 +355,7 @@ func createConsensusReactor( channels[consensus.DataChannel], channels[consensus.VoteChannel], channels[consensus.VoteSetBitsChannel], - peerUpdates, + peerManager.Subscribe(), waitSync, consensus.ReactorMetrics(csMetrics), ) @@ -381,6 +388,11 @@ func createPeerManager( nodeID types.NodeID, ) (*p2p.PeerManager, closer, error) { + privatePeerIDs := make(map[types.NodeID]struct{}) + for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { + privatePeerIDs[types.NodeID(id)] = struct{}{} + } + var maxConns uint16 switch { @@ -390,11 +402,6 @@ func createPeerManager( maxConns = 64 } - privatePeerIDs := make(map[types.NodeID]struct{}) - for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { - privatePeerIDs[types.NodeID(id)] = struct{}{} - } - options := p2p.PeerManagerOptions{ MaxConnected: maxConns, MaxConnectedUpgrade: 4, @@ -485,8 +492,7 @@ func createPEXReactor( return nil, err } - peerUpdates := peerManager.Subscribe() - return pex.NewReactor(logger, peerManager, channel, peerUpdates), nil + return pex.NewReactor(logger, peerManager, channel, peerManager.Subscribe()), nil } func makeNodeInfo( From 279e8027d3d8bac50714f72e676c7bac5b60fa4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Nov 2021 11:21:45 +0000 Subject: [PATCH 139/174] build(deps): Bump actions/checkout from 2.3.5 to 2.4.0 (#7199) Bumps [actions/checkout](https://github.com/actions/checkout) from 2.3.5 to 2.4.0.
Release notes

Sourced from actions/checkout's releases.

v2.4.0

  • Convert SSH URLs like org-<ORG_ID>@github.com: to https://github.com/ - pr
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=2.3.5&new-version=2.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/coverage.yml | 8 ++++---- .github/workflows/docker.yml | 2 +- .github/workflows/e2e-nightly-34x.yml | 2 +- .github/workflows/e2e-nightly-35x.yml | 2 +- .github/workflows/e2e-nightly-master.yml | 2 +- .github/workflows/e2e.yml | 2 +- .github/workflows/fuzz-nightly.yml | 2 +- .github/workflows/jepsen.yml | 2 +- .github/workflows/linkchecker.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/linter.yml | 2 +- .github/workflows/proto-docker.yml | 2 +- .github/workflows/proto.yml | 4 ++-- .github/workflows/release.yml | 2 +- .github/workflows/tests.yml | 6 +++--- 15 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index e8f5a600b..1e2749b3c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -12,7 +12,7 @@ jobs: split-test-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -47,7 +47,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -70,7 +70,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -99,7 +99,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 606d66cdb..033a7c46f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: Prepare id: prep run: | diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 4b6fc8c93..910d9bc2c 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 with: ref: 'v0.34.x' diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index 14662bbea..df4dfb039 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 with: ref: 'v0.35.x' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 0d5dbb830..8d55eb6ad 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -24,7 +24,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index e061afc5e..400741def 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: '1.17' - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index bc668ecd0..d38e1f785 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -17,7 +17,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: Install go-fuzz working-directory: test/fuzz diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 2266e5ceb..60b49443d 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.3.5 + uses: actions/checkout@v2.4.0 with: repository: 'tendermint/jepsen' diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index 14998c136..1a6380026 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -6,7 +6,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 948a10951..b808d9f77 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index afb04132d..7cfc7c161 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.3.5 + uses: actions/checkout@v2.4.0 - name: Lint Code Base uses: docker://github/super-linter:v3 env: diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml index ddb189b4b..3f98a4566 100644 --- a/.github/workflows/proto-docker.yml +++ b/.github/workflows/proto-docker.yml @@ -16,7 +16,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: Prepare id: prep run: | diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml index ed73f07d7..5188b46da 100644 --- a/.github/workflows/proto.yml +++ b/.github/workflows/proto.yml @@ -11,13 +11,13 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 4 steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: lint run: make proto-lint proto-breakage: runs-on: ubuntu-latest timeout-minutes: 4 steps: - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - name: check-breakage run: make proto-check-breaking-ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dcf4f957a..54a6c010a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.3.5 + uses: actions/checkout@v2.4.0 with: fetch-depth: 0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 176a686aa..83f753a5e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -50,7 +50,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -81,7 +81,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.5 + - uses: actions/checkout@v2.4.0 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | From ffcd347ef61e7dc1b38da886d1695804beeed2f6 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 3 Nov 2021 12:51:42 +0100 Subject: [PATCH 140/174] pex: allow disabled pex reactor (#7198) This ensures the implementation respects disabling the pex reactor. --- node/node.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/node/node.go b/node/node.go index eec58ef8b..c17a4f409 100644 --- a/node/node.go +++ b/node/node.go @@ -355,9 +355,12 @@ func makeNode(cfg *config.Config, nodeMetrics.statesync, ) - pexReactor, err := createPEXReactor(logger, peerManager, router) - if err != nil { - return nil, combineCloseError(err, makeCloser(closers)) + var pexReactor service.Service + if cfg.P2P.PexReactor { + pexReactor, err = createPEXReactor(logger, peerManager, router) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) + } } node := &nodeImpl{ @@ -423,6 +426,9 @@ func makeSeedNode(cfg *config.Config, genesisDocProvider genesisDocProvider, logger log.Logger, ) (service.Service, error) { + if !cfg.P2P.PexReactor { + return nil, errors.New("cannot run seed nodes with PEX disabled") + } genDoc, err := genesisDocProvider() if err != nil { @@ -546,8 +552,10 @@ func (n *nodeImpl) OnStart() error { } } - if err := n.pexReactor.Start(); err != nil { - return err + if n.config.P2P.PexReactor { + if err := n.pexReactor.Start(); err != nil { + return err + } } // Run state sync From 6f08c1437568dbaa1f1e200d4b94344c76debd93 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 3 Nov 2021 14:30:53 +0100 Subject: [PATCH 141/174] ci: update dependabot configuration (#7204) I increased the chill of the github actions and NPM/docs dependencies, and added default automerging of the go dependencies. --- .github/dependabot.yml | 43 ++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b6729552d..3db35d523 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,25 +3,48 @@ updates: - package-ecosystem: github-actions directory: "/" schedule: - interval: daily - time: "11:00" + interval: weekly open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + - package-ecosystem: npm directory: "/docs" schedule: - interval: daily - time: "11:00" + interval: weekly open-pull-requests-limit: 10 - reviewers: - - fadeev + + ################################### + ## + ## Update All Go Dependencies + - package-ecosystem: gomod directory: "/" schedule: interval: daily - time: "11:00" + target-branch: "master" open-pull-requests-limit: 10 - reviewers: - - melekes - - tessr labels: - T:dependencies + - S:automerge + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + target-branch: "v0.34.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + target-branch: "v0.35.x" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge From dc28734dadfc78dd3784c68cbaec4ec7d7017ed3 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 3 Nov 2021 07:39:09 -0700 Subject: [PATCH 142/174] pubsub: Remove uninformative publisher benchmarks. (#7195) Prior to #7177, these benchmarks did not provide any useful data about the performance of the pubsub system (in fact, prior to #7178, half of them did not work at all). Specifically, they create a bunch of subscribers with 1 buffer slot on a default publisher and copy messages to them. But because the publisher is single-threaded, and doesn't block for delivery, all this tested is how long it takes to receive a single message from a channel and deliver it to another channel. The resulting stat does not even vary meaningfully with batch size, since it's testing a serial workload. Since #7177 the benchmarks do correctly reflect delivery time (good), but still do not tell us anything useful: The latencies that matter for pubsub are not internal queuing, but the effects of backpressure on the publisher via the subscribers. That's an integration problem, and simulating a fake workload does not provide meaningful results. On that basis, remove these benchmarks. --- libs/pubsub/pubsub_test.go | 108 ------------------------------------- 1 file changed, 108 deletions(-) diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index e0f1f9eb5..397a88e3d 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -451,114 +451,6 @@ func TestBufferCapacity(t *testing.T) { } } -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } - -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } - -func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - for i := 0; i < n; i++ { - subscription, err := s.Subscribe( - ctx, - clientID, - query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), - ) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: string(rune(i))}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) - } -} - -func benchmarkNClientsOneQuery(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") - for i := 0; i < n; i++ { - id := fmt.Sprintf("clientID-%d", i+1) - subscription, err := s.Subscribe(ctx, id, q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: "1"}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) - } -} - // HELPERS func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { From 0de4aa176530664ce3e9dce5682b2c57f99ae1a8 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 3 Nov 2021 08:36:02 -0700 Subject: [PATCH 143/174] Fix spurious crasher in mempool fuzz test. (#7190) We were creating a TestLogger without testing being initialized. Switch to a no-op logger instead. --- test/fuzz/mempool/checktx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index 708f2ebd0..c0f207546 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -28,7 +28,7 @@ func init() { getMp = func() mempool.Mempool { if mp == nil { mp = mempool.NewTxMempool( - log.TestingLogger().With("module", "mempool"), + log.NewNopLogger(), cfg, appConnMem, 0, From 01c43c153b65eec1c4ba2a3177756d681fe2d8b4 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 3 Nov 2021 09:28:46 -0700 Subject: [PATCH 144/174] doc: Set up Dependabot on new backport branches. (#7227) --- RELEASES.md | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 8d9bc2b8e..b2c5ef6d5 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -29,8 +29,8 @@ merging the pull request. ### Creating a backport branch -If this is the first release candidate for a major release, you get to have the honor of creating -the backport branch! +If this is the first release candidate for a major release, you get to have the +honor of creating the backport branch! Note that, after creating the backport branch, you'll also need to update the tags on `master` so that `go mod` is able to order the branches correctly. You @@ -43,16 +43,33 @@ the 0.35.x line. 1. Start on `master` 2. Create and push the backport branch: - `git checkout -b v0.35.x; git push origin v0.35.x` -3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: - `git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."; git push origin v0.36.0-dev` -4. Create a new workflow (still on master) to run e2e nightlies for the new backport branch. - (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml - for an example.) -5. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the + ```sh + git checkout -b v0.35.x + git push origin v0.35.x + ``` + +After doing these steps, go back to `master` and do the following: + +1. Tag `master` as the dev branch for the _next_ major release and push it back up. + For example: + ```sh + git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36." + git push origin v0.36.0-dev + ``` + +2. Create a new workflow to run e2e nightlies for the new backport branch. + (See [e2e-nightly-master.yml][e2e] for an example.) + +3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x` [label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered. +4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to + enable automatic update of Go dependencies on this branch. Copy and edit one + of the existing branch configurations to set the correct `target-branch`. + +[e2e]: https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml + ## Release candidates Before creating an official release, especially a major release, we may want to create a From a2f4f3fb4841607c60d8de13114e3e1f311a4314 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Nov 2021 16:40:55 +0000 Subject: [PATCH 145/174] build(deps): Bump github.com/golangci/golangci-lint from 1.42.1 to 1.43.0 (#7219) Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.42.1 to 1.43.0.
Release notes

Sourced from github.com/golangci/golangci-lint's releases.

v1.43.0

Changelog

bdc2f96d Add code comments to document source code (#2306) 861262b7 Add github.com/breml/bidichk linter (#2330) 32292622 Add nilnil linter (#2236) 20699a72 Add tenv linter (#2221) e612577d Bump gochecknoglobals to v0.1.0 (#2317) 1be9570a Refactor: preallocate slices (#2340) 813ba7d9 Update index.mdx (#2230) f500e4cb add varnamelen linter (#2240) e6c56694 build(deps): bump github.com/Antonboom/errname from 0.1.4 to 0.1.5 (#2249) a37843b1 build(deps): bump github.com/butuzov/ireturn from 0.1.0 to 0.1.1 (#2246) 680f3e6c build(deps): bump github.com/charithe/durationcheck from 0.0.8 to 0.0.9 (#2289) 00e47702 build(deps): bump github.com/esimonov/ifshort from 1.0.2 to 1.0.3 (#2303) d3fc84bc build(deps): bump github.com/fatih/color from 1.12.0 to 1.13.0 (#2259) 4ce9a19e build(deps): bump github.com/go-critic/go-critic from 0.5.6 to 0.6.0 (#2041) 5adafe52 build(deps): bump github.com/jingyugao/rowserrcheck from 1.1.0 to 1.1.1 (#2326) 3fe324a1 build(deps): bump github.com/kunwardeep/paralleltest from 1.0.2 to 1.0.3 (#2244) 739ccd3f build(deps): bump github.com/mattn/go-colorable from 0.1.10 to 0.1.11 (#2277) c6c55d25 build(deps): bump github.com/mattn/go-colorable from 0.1.8 to 0.1.9 (#2252) 8f2af022 build(deps): bump github.com/mattn/go-colorable from 0.1.9 to 0.1.10 (#2260) 78d309e7 build(deps): bump github.com/mgechev/revive from 1.1.1 to 1.1.2 (#2276) 1012c10d build(deps): bump github.com/nakabonne/nestif from 0.3.0 to 0.3.1 (#2325) 6edca924 build(deps): bump github.com/securego/gosec/v2 from 2.8.1 to 2.9.1 (#2299) 963257f9 build(deps): bump github.com/shirou/gopsutil/v3 from 3.21.7 to 3.21.8 (#2225) b9f015cc build(deps): bump github.com/shirou/gopsutil/v3 from 3.21.8 to 3.21.9 (#2275) 9f628531 build(deps): bump github.com/shirou/gopsutil/v3 from 3.21.9 to 3.21.10 (#2327) f1258315 build(deps): bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#2243) 60a9d16f build(deps): bump github.com/tetafro/godot from 1.4.10 to 1.4.11 (#2248) 8c60147b build(deps): bump github.com/tetafro/godot from 1.4.9 to 1.4.10 (#2226) 2fb65636 build(deps): bump github.com/tomarrell/wrapcheck/v2 from 2.3.0 to 2.3.1 (#2278) 9bb917de build(deps): bump github.com/tomarrell/wrapcheck/v2 from 2.3.1 to 2.4.0 (#2324) 59c7b10b build(deps): bump github.com/valyala/quicktemplate from 1.6.3 to 1.7.0 (#2250) 5d4fe00f build(deps): bump golang.org/x/tools from 0.1.5 to 0.1.6 (#2245) 91016acc build(deps): bump tmpl from 1.0.4 to 1.0.5 in /.github/peril (#2247) f47f4f55 codeql: Remove unneeded steps (#2336) 413bec6a errcheck: empty selector name. (#2309) 7fc2fe81 feat: add contextcheck linter (#2216) 8cb9c769 fix: Add missing space in "disabled by config" warning (#2310) a8887d56 fix: don't hide enable-all option (#2338) cf9f3f95 fix: go.sum (#2262) 2c01ea7f gocritic: add support for variable substitution in ruleguard path settings (#2308) cc262bba gosec: filter issues according to the severity and confidence (#2295) 9b577fcb new-from-rev: add support for finding issues in entire files in a diff (#2264) 2ea496f2 new-linter: ireturn (checks for function return type) (#2219) 17d24ebd nlreturn: add block-size option (#2237)

Commits
  • 861262b Add github.com/breml/bidichk linter (#2330)
  • 77d4115 docs: add documentation for go-critic and ruleguard settings (#2304)
  • 2c01ea7 gocritic: add support for variable substitution in ruleguard path settings (#...
  • f9f6486 docs: change Github to GitHub in comments and docs (#2341)
  • 1be9570 Refactor: preallocate slices (#2340)
  • a8887d5 fix: don't hide enable-all option (#2338)
  • f47f4f5 codeql: Remove unneeded steps (#2336)
  • 8cb9c76 fix: Add missing space in "disabled by config" warning (#2310)
  • a2b2968 build(deps): bump gatsby-plugin-robots-txt in /docs (#2332)
  • dcaaa89 build(deps): bump gatsby-remark-copy-linked-files in /docs (#2335)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/golangci/golangci-lint&package-manager=go_modules&previous-version=1.42.1&new-version=1.43.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 4 +- go.sum | 162 ++++++++++++++++++++++++++++++++++++--------------------- 2 files changed, 105 insertions(+), 61 deletions(-) diff --git a/go.mod b/go.mod index 3dc17fa30..3d4d459a3 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/golangci/golangci-lint v1.42.1 + github.com/golangci/golangci-lint v1.43.0 github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 @@ -33,7 +33,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 github.com/vektra/mockery/v2 v2.9.4 - golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.41.0 diff --git a/go.sum b/go.sum index b2918b67c..3f3420892 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -51,8 +51,10 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.4 h1:lGSlI42Gm4bI1e+IITtXJXvxFM8N7naWimVFKcb0McY= -github.com/Antonboom/errname v0.1.4/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= +github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -98,7 +100,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= @@ -130,8 +133,12 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.3.0 h1:80mYO7Y5ppeEefg1Jzu+NBg16iwToOQVnDnNIoWSShs= +github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/breml/bidichk v0.1.1 h1:Qpy8Rmgos9qdJxhka0K7ADEE5bQZX9PQUthkgggHpFM= +github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= @@ -146,6 +153,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -156,8 +165,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzFDVc9/j0= -github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= @@ -235,8 +244,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/esimonov/ifshort v1.0.3 h1:JD6x035opqGec5fZ0TLjXeROD2p5H7oLGn8MKfy9HTM= +github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -248,8 +257,9 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+ne github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -268,8 +278,8 @@ github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASx github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/go-critic/go-critic v0.5.6 h1:siUR1+322iVikWXoV75I1YRfNaC/yaLzhdF9Zwd8Tus= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= +github.com/go-critic/go-critic v0.6.1 h1:lS8B9LH/VVsvQQP7Ao5TJyQqteVKVs3E4dXiHMyubtI= +github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -284,6 +294,7 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= @@ -295,8 +306,9 @@ github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= @@ -372,16 +384,16 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.42.1 h1:nC4WyrbdnNdohDVUoNKjy/4N4FTM1gCFaVeXecy6vzM= -github.com/golangci/golangci-lint v1.42.1/go.mod h1:MuInrVlgg2jq4do6XI1jbkErbVHVbwdrLLtGv6p2wPI= +github.com/golangci/golangci-lint v1.43.0 h1:SLwZFEmDgopqZpfP495zCtV9REUf551JJlJ51Ql7NZA= +github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 h1:c9Mqqrm/Clj5biNaG7rABrmwUq88nHh0uABo2b/WYmc= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -430,7 +442,6 @@ github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4Mgqvf github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -453,15 +464,20 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1 h1:/7clKqrVfiVwiBQLM0Uke4KvXnO6JcCTS7HwF2D6wG8= github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 h1:rx8127mFPqXXsfPSo8BwnIU97MKFZc89WHAHt8PwDVY= github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -501,6 +517,8 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -530,8 +548,8 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.0 h1:u6h4eiNuCLqk73Ic5TXQq9yZS+uEXTdusn7c3w1Mr6A= -github.com/jingyugao/rowserrcheck v1.1.0/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -542,6 +560,7 @@ github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -568,9 +587,8 @@ github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -586,8 +604,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.4.0 h1:2Nx7XbdbE/BYZeoip2mURKUdtHQRuy6Ug+wR7K9ywNM= github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= +github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= @@ -600,12 +618,12 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg= github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= @@ -614,11 +632,15 @@ github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKo github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -639,10 +661,10 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.1.1 h1:mkXNHP14Y6tfq+ocnQaiKEtgJDM41yaoyQq4qn6TD/4= -github.com/mgechev/revive v1.1.1/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJdbvozrY= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.2 h1:MiYA/o9M7REjvOF20QN43U8OtXDDHQFKLCtJnxLGLog= +github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= @@ -686,8 +708,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= @@ -727,8 +749,9 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -743,6 +766,12 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -803,12 +832,12 @@ github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4 h1:F6l5p6+7WBcTKS7foNQ4wqA39zjn2+RbdbyzGxIq1B0= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= +github.com/quasilyte/go-ruleguard v0.3.13 h1:O1G41cq1jUr3cJmqp7vOUT0SokqjzmS9aESWJuIDRaY= +github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -840,12 +869,12 @@ github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxr github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= -github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= +github.com/securego/gosec/v2 v2.9.1 h1:anHKLS/ApTYU6NZkKa/5cQqqcbKZURjvc+MtR++S4EQ= +github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -855,6 +884,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= @@ -890,12 +921,11 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= -github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -913,6 +943,8 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= @@ -922,17 +954,21 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= -github.com/tetafro/godot v1.4.9 h1:wsNd0RuUxISqqudFqchsSsMqsM188DoZVPBeKl87tP0= -github.com/tetafro/godot v1.4.9/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= -github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= -github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= +github.com/tomarrell/wrapcheck/v2 v2.4.0 h1:mU4H9KsqqPZUALOUbVOpjy8qNQbWLoLI9fV68/1tq30= +github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= @@ -951,9 +987,9 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g= github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= @@ -1029,8 +1065,9 @@ golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1070,8 +1107,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1108,7 +1146,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1124,6 +1161,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1212,7 +1250,6 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1244,10 +1281,14 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1362,11 +1403,14 @@ golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From ae92b135d8830652cb171ea05a3edab61f34ec93 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 3 Nov 2021 22:28:59 +0100 Subject: [PATCH 146/174] docs: update bounty links (#7203) --- CHANGELOG.md | 4 ++-- README.md | 2 +- SECURITY.md | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1ef2201a..5be350148 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1994,7 +1994,7 @@ For more, see issues marked This release also includes a fix to prevent Tendermint from including the same piece of evidence in more than one block. This issue was reported by @chengwenxi in our -[bug bounty program](https://hackerone.com/tendermint). +[bug bounty program](https://hackerone.com/cosmos). ### BREAKING CHANGES: @@ -2487,7 +2487,7 @@ Special thanks to external contributors on this release: @james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu. Special thanks to @Slamper for a series of bug reports in our [bug bounty -program](https://hackerone.com/tendermint) which are fixed in this release. +program](https://hackerone.com/cosmos) which are fixed in this release. This release is primarily about adding Version fields to various data structures, optimizing consensus messages for signing and verification in diff --git a/README.md b/README.md index f0da8f484..7823d45c1 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ More on how releases are conducted can be found [here](./RELEASES.md). ## Security To report a security vulnerability, see our [bug bounty -program](https://hackerone.com/tendermint). +program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md). We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list diff --git a/SECURITY.md b/SECURITY.md index 57d13e565..133e993c4 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,7 +4,7 @@ As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), we operate a [bug -bounty](https://hackerone.com/tendermint). +bounty](https://hackerone.com/cosmos). See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in. ### Guidelines @@ -86,7 +86,7 @@ If you are running older versions of Tendermint Core, we encourage you to upgrad ## Scope -The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: +The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/cosmos). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: * Any third-party services * Findings from physical testing, such as office access From 797541e7424cc5b45b53591994b276be07da6717 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 4 Nov 2021 14:20:26 +0100 Subject: [PATCH 147/174] docs: add upgrading info about node service (#7241) * docs: add info about RPC local to upgrading docs * Apply suggestions from code review Co-authored-by: Callum Waters * cleanup codeblock Co-authored-by: Callum Waters --- UPGRADING.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/UPGRADING.md b/UPGRADING.md index 99efdf225..bd6411c6a 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -98,7 +98,7 @@ are: - `blockchain` - `evidence` -Accordingly, the `node` package was changed to reduce access to +Accordingly, the `node` package changed to reduce access to tendermint internals: applications that use tendermint as a library will need to change to accommodate these changes. Most notably: @@ -109,6 +109,20 @@ will need to change to accommodate these changes. Most notably: longer exported and have been replaced with `node.New` and `node.NewDefault` which provide more functional interfaces. +To access any of the functionality previously available via the +`node.Node` type, use the `*local.Local` "RPC" client, that exposes +the full RPC interface provided as direct function calls. Import the +`github.com/tendermint/tendermint/rpc/client/local` package and pass +the node service as in the following: + +```go + node := node.NewDefault() //construct the node object + // start and set up the node service + + client := local.New(node.(local.NodeService)) + // use client object to interact with the node +``` + ### gRPC Support Mark gRPC in the RPC layer as deprecated and to be removed in 0.36. From b3b1279d1f4ce2a5e47bf175d30e2800b2ef07af Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 4 Nov 2021 08:29:23 -0700 Subject: [PATCH 148/174] Add v0.35 to the configs for building the docs website. (#7055) --- docs/.vuepress/config.js | 4 ++++ docs/versions | 1 + 2 files changed, 5 insertions(+) diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 4653e2c5b..b1077de20 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -34,6 +34,10 @@ module.exports = { "label": "v0.34", "key": "v0.34" }, + { + "label": "v0.35", + "key": "v0.35" + }, { "label": "master", "key": "master" diff --git a/docs/versions b/docs/versions index c88a614bd..7c63b5056 100644 --- a/docs/versions +++ b/docs/versions @@ -2,3 +2,4 @@ master master v0.32.x v0.32 v0.33.x v0.33 v0.34.x v0.34 +v0.35.x v0.35 From 641a5ec998b9fe4bc30ec8a0fdca28cde8fcb7e8 Mon Sep 17 00:00:00 2001 From: JayT106 Date: Thu, 4 Nov 2021 12:38:08 -0400 Subject: [PATCH 149/174] rpc: fix inappropriate http request log (#7244) --- rpc/jsonrpc/server/http_uri_handler.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 07b3616b4..9fb5c1cde 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "net/http/httputil" "reflect" "regexp" "strings" @@ -36,7 +37,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit // All other endpoints return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) + logger.Debug("HTTP HANDLER", "req", dumpHTTPRequest(r)) ctx := &rpctypes.Context{HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} @@ -232,3 +233,12 @@ func getParam(r *http.Request, param string) string { } return s } + +func dumpHTTPRequest(r *http.Request) string { + d, e := httputil.DumpRequest(r, true) + if e != nil { + return e.Error() + } + + return string(d) +} From 3335cbe8b75699730f3e31d2d6d8295e6010152f Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 4 Nov 2021 09:40:43 -0700 Subject: [PATCH 150/174] Update master changelog after v0.35.0 tag. (#7243) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- CHANGELOG.md | 158 +++++++++++++++++++++++++-------------------------- 1 file changed, 77 insertions(+), 81 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5be350148..d6d780cd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,49 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). -## v0.35.0-rc2 +## v0.35.0 -September 27, 2021 +November 4, 2021 + +Special thanks to external contributors on this release: @JayT106, +@bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman, +@JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua + +### FEATURES + +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of an incorrect app hash. (@cmwaters) +- [config] [\#7174](https://github.com/tendermint/tendermint/pull/7174) expose ability to write config to arbitrary paths. (@tychoish) +- [mempool, rpc] [\#7065](https://github.com/tendermint/tendermint/pull/7065) add removetx rpc method (backport of #7047) (@tychoish). +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). +- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam +- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) +- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to + accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer + exchange reactors behave the same. (@cmwaters) +- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type +- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) + - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of + the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. + - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the + `mempool.version` configuration, where `v1` is the default configuration. + - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. + - Transactions are gossiped in FIFO order as they are in `v0`. +- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) +- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) +- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) +- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) + +### BUG FIXES + +- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish). +- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield). +- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) + wait until peerUpdates channel is closed to close remaining peers (@williambanfield) +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) +- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) +- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) +- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) ### BREAKING CHANGES @@ -16,53 +56,6 @@ September 27, 2021 - [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to `internal` to prevent consumption of these internal APIs by external users. (@tychoish) - -### FEATURES - -- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). - - -## v0.35.0-rc1 - -September 8, 2021 - -Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis, -@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua - -### BREAKING CHANGES - -- CLI/RPC/Config - - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) - - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) - - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) - - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) - - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) - - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) - - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) - - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) - - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) - - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) - - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) - - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) - - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) - - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. - - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 - - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. - - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) - - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. - - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) - -- Apps - - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface - - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. - - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. - - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. - - It is not required any longer to set ldflags to set version strings - - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app - -- Go API - [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) - [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) - [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) @@ -98,35 +91,46 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad, - [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` - [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) +- CLI/RPC/Config + + - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) + - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) + - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) + - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) + - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) + - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) + - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) + - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) + - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) + - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) + - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) + - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) + - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) + - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. + - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 + - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. + - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) + - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. + - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) + - [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106) + - [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106) + +- Apps + + - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. + - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. + - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. + - It is not required any longer to set ldflags to set version strings + - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app + - Data Storage - [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters) - [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) - [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106) -- Tooling - - [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106) - - [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106) - -### FEATURES - -- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam -- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) -- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to - accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer - exchange reactors behave the same. (@cmwaters) -- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type -- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) - - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of - the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. - - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the - `mempool.version` configuration, where `v1` is the default configuration. - - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. - - Transactions are gossiped in FIFO order as they are in `v0`. -- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) -- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) -- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) -- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) - ### IMPROVEMENTS - [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish) @@ -170,14 +174,6 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad, - [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) - [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters) -### BUG FIXES - -- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) -- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) -- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) -- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) -- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) - ## v0.34.14 This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash. From 6c7d6f761bd656cb1978e62bc48df8f9a4763a23 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 5 Nov 2021 03:31:06 -0700 Subject: [PATCH 151/174] Remove unused libs/cmap. (#7245) A follow-up to #7197. --- libs/cmap/cmap.go | 91 --------------------------------- libs/cmap/cmap_test.go | 113 ----------------------------------------- 2 files changed, 204 deletions(-) delete mode 100644 libs/cmap/cmap.go delete mode 100644 libs/cmap/cmap_test.go diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go deleted file mode 100644 index 5aa82e807..000000000 --- a/libs/cmap/cmap.go +++ /dev/null @@ -1,91 +0,0 @@ -package cmap - -import ( - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// CMap is a goroutine-safe map -type CMap struct { - m map[string]interface{} - l tmsync.Mutex -} - -func NewCMap() *CMap { - return &CMap{ - m: make(map[string]interface{}), - } -} - -func (cm *CMap) Set(key string, value interface{}) { - cm.l.Lock() - cm.m[key] = value - cm.l.Unlock() -} - -// GetOrSet returns the existing value if present. Othewise, it stores `newValue` and returns it. -func (cm *CMap) GetOrSet(key string, newValue interface{}) (value interface{}, alreadyExists bool) { - - cm.l.Lock() - defer cm.l.Unlock() - - if v, ok := cm.m[key]; ok { - return v, true - } - - cm.m[key] = newValue - return newValue, false -} - -func (cm *CMap) Get(key string) interface{} { - cm.l.Lock() - val := cm.m[key] - cm.l.Unlock() - return val -} - -func (cm *CMap) Has(key string) bool { - cm.l.Lock() - _, ok := cm.m[key] - cm.l.Unlock() - return ok -} - -func (cm *CMap) Delete(key string) { - cm.l.Lock() - delete(cm.m, key) - cm.l.Unlock() -} - -func (cm *CMap) Size() int { - cm.l.Lock() - size := len(cm.m) - cm.l.Unlock() - return size -} - -func (cm *CMap) Clear() { - cm.l.Lock() - cm.m = make(map[string]interface{}) - cm.l.Unlock() -} - -func (cm *CMap) Keys() []string { - cm.l.Lock() - - keys := make([]string, 0, len(cm.m)) - for k := range cm.m { - keys = append(keys, k) - } - cm.l.Unlock() - return keys -} - -func (cm *CMap) Values() []interface{} { - cm.l.Lock() - items := make([]interface{}, 0, len(cm.m)) - for _, v := range cm.m { - items = append(items, v) - } - cm.l.Unlock() - return items -} diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go deleted file mode 100644 index 68a052bdb..000000000 --- a/libs/cmap/cmap_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package cmap - -import ( - "fmt" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIterateKeysWithValues(t *testing.T) { - cmap := NewCMap() - - for i := 1; i <= 10; i++ { - cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) - } - - // Testing size - assert.Equal(t, 10, cmap.Size()) - assert.Equal(t, 10, len(cmap.Keys())) - assert.Equal(t, 10, len(cmap.Values())) - - // Iterating Keys, checking for matching Value - for _, key := range cmap.Keys() { - val := strings.ReplaceAll(key, "key", "value") - assert.Equal(t, val, cmap.Get(key)) - } - - // Test if all keys are within []Keys() - keys := cmap.Keys() - for i := 1; i <= 10; i++ { - assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") - } - - // Delete 1 Key - cmap.Delete("key1") - - assert.NotEqual( - t, - len(keys), - len(cmap.Keys()), - "[]keys and []Keys() should not be equal, they are copies, one item was removed", - ) -} - -func TestContains(t *testing.T) { - cmap := NewCMap() - - cmap.Set("key1", "value1") - - // Test for known values - assert.True(t, cmap.Has("key1")) - assert.Equal(t, "value1", cmap.Get("key1")) - - // Test for unknown values - assert.False(t, cmap.Has("key2")) - assert.Nil(t, cmap.Get("key2")) -} - -func BenchmarkCMapHas(b *testing.B) { - m := NewCMap() - for i := 0; i < 1000; i++ { - m.Set(string(rune(i)), i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Has(string(rune(i))) - } -} - -func TestCMap_GetOrSet_Parallel(t *testing.T) { - - tests := []struct { - name string - newValue interface{} - parallelism int - }{ - {"test1", "a", 4}, - {"test2", "a", 40}, - {"test3", "a", 1}, - } - - //nolint:scopelint - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := NewCMap() - - wg := sync.WaitGroup{} - wg.Add(tt.parallelism) - for i := 0; i < tt.parallelism; i++ { - go func() { - defer wg.Done() - gotValue, _ := cm.GetOrSet(tt.name, tt.newValue) - assert.EqualValues(t, tt.newValue, gotValue) - }() - } - wg.Wait() - }) - } -} - -func TestCMap_GetOrSet_Exists(t *testing.T) { - cm := NewCMap() - - gotValue, exists := cm.GetOrSet("key", 1000) - assert.False(t, exists) - assert.EqualValues(t, 1000, gotValue) - - gotValue, exists = cm.GetOrSet("key", 2000) - assert.True(t, exists) - assert.EqualValues(t, 1000, gotValue) -} From 432067cc0e4e9734b6d17f4ce171ec381a2501c3 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 5 Nov 2021 08:23:17 -0700 Subject: [PATCH 152/174] RFC 006: Event Subscription (#7184) This is intended to document some ergonomic and reliability issues with the existing implementation of the event subscription service on the Tendermint node, and to discuss possible approaches to improving them. --- docs/rfc/README.md | 1 + docs/rfc/rfc-006-event-subscription.md | 204 +++++++++++++++++++++++++ 2 files changed, 205 insertions(+) create mode 100644 docs/rfc/rfc-006-event-subscription.md diff --git a/docs/rfc/README.md b/docs/rfc/README.md index c3adfa08a..a1c8c2c60 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -43,5 +43,6 @@ sections. - [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) - [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) - [RFC-005: Event System](./rfc-005-event-system.rst) +- [RFC-006: Event Subscription](./rfc-005-event-system.rst) diff --git a/docs/rfc/rfc-006-event-subscription.md b/docs/rfc/rfc-006-event-subscription.md new file mode 100644 index 000000000..4372f8d28 --- /dev/null +++ b/docs/rfc/rfc-006-event-subscription.md @@ -0,0 +1,204 @@ +# RFC 006: Event Subscription + +## Changelog + +- 30-Oct-2021: Initial draft (@creachadair) + +## Abstract + +The Tendermint consensus node allows clients to subscribe to its event stream +via methods on its RPC service. The ability to view the event stream is +valuable for clients, but the current implementation has some deficiencies that +make it difficult for some clients to use effectively. This RFC documents these +issues and discusses possible approaches to solving them. + + +## Background + +A running Tendermint consensus node exports a [JSON-RPC service][rpc-service] +that provides a [large set of methods][rpc-methods] for inspecting and +interacting with the node. One important cluster of these methods are the +`subscribe`, `unsubscribe`, and `unsubscribe_all` methods, which permit clients +to subscribe to a filtered stream of the [events generated by the node][events] +as it runs. + +Unlike the other methods of the service, the methods in the "event +subscription" cluster are not accessible via [ordinary HTTP GET or POST +requests][rpc-transport], but require upgrading the HTTP connection to a +[websocket][ws]. This is necessary because the `subscribe` request needs a +persistent channel to deliver results back to the client, and an ordinary HTTP +connection does not reliably persist across multiple requests. Since these +methods do not work properly without a persistent channel, they are _only_ +exported via a websocket connection, and are not routed for plain HTTP. + + +## Discussion + +There are some operational problems with the current implementation of event +subscription in the RPC service: + +- **Event delivery is not valid JSON-RPC.** When a client issues a `subscribe` + request, the server replies (correctly) with an initial empty acknowledgement + (`{}`). After that, each matching event is delivered "unsolicited" (without + another request from the client), as a separate [response object][json-response] + with the same ID as the initial request. + + This matters because it means a standard JSON-RPC client library can't + interact correctly with the event subscription mechanism. + + Even for clients that can handle unsolicited values pushed by the server, + these responses are invalid: They have an ID, so they cannot be treated as + [notifications][json-notify]; but the ID corresponds to a request that was + already completed. In practice, this means that general-purpose JSON-RPC + libraries cannot use this method correctly -- it requires a custom client. + + The Go RPC client from the Tendermint core can support this case, but clients + in other languages have no easy solution. + + This is the cause of issue [#2949][issue2949]. + +- **Subscriptions are terminated by disconnection.** When the connection to the + client is interrupted, the subscription is silently dropped. + + This is a reasonable behavior, but it matters because a client whose + subscription is dropped gets no useful error feedback, just a closed + connection. Should they try again? Is the node overloaded? Was the client + too slow? Did the caller forget to respond to pings? Debugging these kinds + of failures is unnecessarily painful. + + Websockets compound this, because websocket connections time out if no + traffic is seen for a while, and keeping them alive requires active + cooperation between the client and server. With a plain TCP socket, liveness + is handled transparently by the keepalive mechanism. On a websocket, + however, one side has to occasionally send a PING (if the connection is + otherwise idle). The other side must return a matching PONG in time, or the + connection is dropped. Apart from being tedious, this is highly susceptible + to CPU load. + + The Tendermint Go implementation automatically sends and responds to pings. + Clients in other languages (or not wanting to use the Tendermint libraries) + need to handle it explicitly. This burdens the client for no practical + benefit: A subscriber has no information about when matching events may be + available, so it shouldn't have to participate in keeping the connection + alive. + +- **Mismatched load profiles.** Most of the RPC service is mainly important for + low-volume local use, either by the application the node serves (e.g., the + ABCI methods) or by the node operator (e.g., the info methods). Event + subscription is important for remote clients, and may represent a much higher + volume of traffic. + + This matters because both are using the same JSON-RPC mechanism. For + low-volume local use, the ergonomics of JSON-RPC are a good fit: It's easy to + issue queries from the command line (e.g., using `curl`) or to write scripts + that call the RPC methods to monitor the running node. + + For high-volume remote use, JSON-RPC is not such a good fit: Even leaving + aside the non-standard delivery protocol mentioned above, the time and memory + cost of encoding event data matters for the stability of the node when there + can be potentially hundreds of subscribers. Moreover, a subscription is + long-lived compared to most RPC methods, in that it may persist as long the + node is active. + +- **Mismatched security profiles.** The RPC service exports several methods + that should not be open to arbitrary remote callers, both for correctness + reasons (e.g., `remove_tx` and `broadcast_tx_*`) and for operational + stability reasons (e.g., `tx_search`). A node may still need to expose + events, however, to support UI tools. + + This matters, because all the methods share the same network endpoint. While + it is possible to block the top-level GET and POST handlers with a proxy, + exposing the `/websocket` handler exposes not _only_ the event subscription + methods, but the rest of the service as well. + +### Possible Improvements + +There are several things we could do to improve the experience of developers +who need to subscribe to events from the consensus node. These are not all +mutually exclusive. + +1. **Split event subscription into a separate service**. Instead of exposing + event subscription on the same endpoint as the rest of the RPC service, + dedicate a separate endpoint on the node for _only_ event subscription. The + rest of the RPC services (_sans_ events) would remain as-is. + + This would make it easy to disable or firewall outside access to sensitive + RPC methods, without blocking access to event subscription (and vice versa). + This is probably worth doing, even if we don't take any of the other steps + described here. + +2. **Use a different protocol for event subscription.** There are various ways + we could approach this, depending how much we're willing to shake up the + current API. Here are sketches of a few options: + + - Keep the websocket, but rework the API to be more JSON-RPC compliant, + perhaps by converting event delivery into notifications. This is less + up-front change for existing clients, but retains all of the existing + implementation complexity, and doesn't contribute much toward more serious + performance and UX improvements later. + + - Switch from websocket to plain HTTP, and rework the subscription API to + use a more conventional request/response pattern instead of streaming. + This is a little more up-front work for existing clients, but leverages + better library support for clients not written in Go. + + The protocol would become more chatty, but we could mitigate that with + batching, and in return we would get more control over what to do about + slow clients: Instead of simply silently dropping them, as we do now, we + could drop messages and signal the client that they missed some data ("M + dropped messages since your last poll"). + + This option is probably the best balance between work, API change, and + benefit, and has a nice incidental effect that it would be easier to debug + subscriptions from the command-line, like the other RPC methods. + + - Switch to gRPC: Preserves a persistent connection and gives us a more + efficient binary wire format (protobuf), at the cost of much more work for + clients and harder debugging. This may be the best option if performance + and server load are our top concerns. + + Given that we are currently using JSON-RPC, however, I'm not convinced the + costs of encoding and sending messages on the event subscription channel + are the limiting factor on subscription efficiency, however. + +3. **Delegate event subscriptions to a proxy.** Give responsibility for + managing event subscription to a proxy that runs separately from the node, + and switch the node to push events to the proxy (like a webhook) instead of + serving subscribers directly. This is more work for the operator (another + process to configure and run) but may scale better for big networks. + + I mention this option for completeness, but making this change would be a + fairly substantial project. If we want to consider shifting responsibility + for event subscription outside the node anyway, we should probably be more + systematic about it. For a more principled approach, see point (4) below. + +4. **Move event subscription downstream of indexing.** We are already planning + to give applications more control over event indexing. By extension, we + might allow the application to also control how events are filtered, + queried, and subscribed. Having the application control these concerns, + rather than the node, might make life easier for developers building UI and + tools for that application. + + This is a much larger change, so I don't think it is likely to be practical + in the near-term, but it's worth considering as a broader option. Some of + the existing code for filtering and selection could be made more reusable, + so applications would not need to reinvent everything. + + +## References + +- [Tendermint RPC service][rpc-service] +- [Tendermint RPC routes][rpc-methods] +- [Discussion of the event system][events] +- [Discussion about RPC transport options][rpc-transport] (from RFC 002) +- [RFC 6455: The websocket protocol][ws] +- [JSON-RPC 2.0 Specification](https://www.jsonrpc.org/specification) + +[rpc-service]: https://docs.tendermint.com/master/rpc/ +[rpc-methods]: https://github.com/tendermint/tendermint/blob/master/internal/rpc/core/routes.go#L12 +[events]: ./rfc-005-event-system.rst +[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport +[ws]: https://datatracker.ietf.org/doc/html/rfc6455 +[json-response]: https://www.jsonrpc.org/specification#response_object +[json-notify]: https://www.jsonrpc.org/specification#notification +[issue2949]: https://github.com/tendermint/tendermint/issues/2949 From b4055a0753f8c6bcdd21dec5c94569fe4366aaca Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 5 Nov 2021 08:55:48 -0700 Subject: [PATCH 153/174] Fix ToC entry for RFC 006. (#7248) Oops. --- docs/rfc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rfc/README.md b/docs/rfc/README.md index a1c8c2c60..a66969042 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -43,6 +43,6 @@ sections. - [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) - [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) - [RFC-005: Event System](./rfc-005-event-system.rst) -- [RFC-006: Event Subscription](./rfc-005-event-system.rst) +- [RFC-006: Event Subscription](./rfc-006-event-subscription.md) From 54d70305108818be623501832eaa20a78a9d892e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 5 Nov 2021 10:25:25 -0700 Subject: [PATCH 154/174] pubsub: Move indexing out of the primary subscription path (#7231) This is part of the work described by #7156. Remove "unbuffered subscriptions" from the pubsub service. Replace them with a dedicated blocking "observer" mechanism. Use the observer mechanism for indexing. Add a SubscribeWithArgs method and deprecate the old Subscribe method. Remove SubscribeUnbuffered entirely (breaking). Rework the Subscription interface to eliminate exposed channels. Subscriptions now use a context to manage lifecycle notifications. Internalize the eventbus package. --- CHANGELOG_PENDING.md | 2 + internal/consensus/byzantine_test.go | 28 +- internal/consensus/common_test.go | 30 +- internal/consensus/invalid_test.go | 13 +- internal/consensus/mempool_test.go | 12 +- internal/consensus/reactor.go | 5 +- internal/consensus/reactor_test.go | 162 +++-- internal/consensus/replay.go | 28 +- internal/consensus/replay_file.go | 15 +- internal/consensus/replay_test.go | 22 +- internal/consensus/state.go | 5 +- internal/consensus/state_test.go | 230 ++++--- internal/consensus/wal_generator.go | 3 +- internal/eventbus/event_bus.go | 242 +++++++ .../eventbus}/event_bus_test.go | 305 ++++----- internal/inspect/inspect.go | 5 +- internal/inspect/inspect_test.go | 13 + internal/inspect/rpc/rpc.go | 6 +- internal/rpc/core/env.go | 3 +- internal/rpc/core/events.go | 58 +- internal/state/execution.go | 3 +- internal/state/execution_test.go | 34 +- internal/state/indexer/indexer.go | 16 +- internal/state/indexer/indexer_service.go | 154 ++--- .../state/indexer/indexer_service_test.go | 3 +- libs/pubsub/example_test.go | 25 +- libs/pubsub/pubsub.go | 173 +++-- libs/pubsub/pubsub_test.go | 613 ++++++++---------- libs/pubsub/subscription.go | 149 ++--- node/node.go | 5 +- node/node_test.go | 16 +- node/setup.go | 9 +- rpc/client/local/local.go | 93 ++- types/event_bus.go | 326 ---------- 34 files changed, 1406 insertions(+), 1400 deletions(-) create mode 100644 internal/eventbus/event_bus.go rename {types => internal/eventbus}/event_bus_test.go (56%) delete mode 100644 types/event_bus.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 4735dbcb9..70b361d26 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -26,6 +26,8 @@ Special thanks to external contributors on this release: - Go API + - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) + - [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair) - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 717934ad1..59ab56bbb 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -14,6 +14,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" @@ -92,7 +93,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { pv := privVals[i] cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() eventBus.SetLogger(log.TestingLogger().With("module", "events")) err = eventBus.Start() require.NoError(t, err) @@ -238,24 +239,25 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // we will check the first six just in case evidenceFromEachValidator := make([]types.Evidence, nValidators) - wg := new(sync.WaitGroup) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup i := 0 for _, sub := range rts.subs { wg.Add(1) - go func(j int, s types.Subscription) { + go func(j int, s eventbus.Subscription) { defer wg.Done() for { - select { - case msg := <-s.Out(): - require.NotNil(t, msg) - block := msg.Data().(types.EventDataNewBlock).Block - if len(block.Evidence.Evidence) != 0 { - evidenceFromEachValidator[j] = block.Evidence.Evidence[0] - return - } - case <-s.Canceled(): - require.Fail(t, "subscription failed for %d", j) + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + require.NotNil(t, msg) + block := msg.Data().(types.EventDataNewBlock).Block + if len(block.Evidence.Evidence) != 0 { + evidenceFromEachValidator[j] = block.Evidence.Evidence[0] return } } diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 5a50aad21..8b25a2af7 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -20,6 +20,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" @@ -368,21 +369,20 @@ func validatePrevoteAndPrecommit( cs.mtx.Unlock() } -func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) - } - ch := make(chan tmpubsub.Message) - go func() { - for msg := range votesSub.Out() { - vote := msg.Data().(types.EventDataVote) - // we only fire for our own votes - if bytes.Equal(addr, vote.Vote.ValidatorAddress) { - ch <- msg - } +func subscribeToVoter(t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message { + t.Helper() + + ch := make(chan tmpubsub.Message, 1) + if err := cs.eventBus.Observe(context.Background(), func(msg tmpubsub.Message) error { + vote := msg.Data().(types.EventDataVote) + // we only fire for our own votes + if bytes.Equal(addr, vote.Vote.ValidatorAddress) { + ch <- msg } - }() + return nil + }, types.EventQueryVote); err != nil { + t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err) + } return ch } @@ -446,7 +446,7 @@ func newStateWithConfigAndBlockStore( cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() eventBus.SetLogger(log.TestingLogger().With("module", "events")) err := eventBus.Start() if err != nil { diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 6f858ee11..f06692b77 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -5,7 +5,9 @@ import ( "sync" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -55,13 +57,18 @@ func TestReactorInvalidPrecommit(t *testing.T) { // // TODO: Make this tighter by ensuring the halt happens by block 2. var wg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for i := 0; i < 10; i++ { for _, sub := range rts.subs { wg.Add(1) - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() // cancel other subscribers on failure + } }(sub) } } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 558dbd4b3..b31e1d901 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -37,7 +37,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) ensureNewEventOnChannel(newBlockCh) // first block gets committed @@ -61,7 +61,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { assertMempool(cs.txNotifier).EnableTxsAvailable() - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, cs.Height, cs.Round) ensureNewEventOnChannel(newBlockCh) // first block gets committed @@ -81,9 +81,9 @@ func TestMempoolProgressInHigherRound(t *testing.T) { cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(t, cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(t, cs.eventBus, types.EventQueryTimeoutPropose) cs.setProposal = func(proposal *types.Proposal) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and @@ -131,7 +131,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) err := stateStore.Save(state) require.NoError(t, err) - newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) + newBlockHeaderCh := subscribe(t, cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 go deliverTxsRange(cs, 0, int(numTxs)) diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 62517fd4f..a28f54bf9 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -6,6 +6,7 @@ import ( "time" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" @@ -110,7 +111,7 @@ type Reactor struct { service.BaseService state *State - eventBus *types.EventBus + eventBus *eventbus.EventBus Metrics *Metrics mtx tmsync.RWMutex @@ -243,7 +244,7 @@ func (r *Reactor) OnStop() { } // SetEventBus sets the reactor's event bus. -func (r *Reactor) SetEventBus(b *types.EventBus) { +func (r *Reactor) SetEventBus(b *eventbus.EventBus) { r.eventBus = b r.state.SetEventBus(b) } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 13141e170..3d5168da7 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -19,6 +20,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" @@ -41,8 +43,8 @@ type reactorTestSuite struct { network *p2ptest.Network states map[types.NodeID]*State reactors map[types.NodeID]*Reactor - subs map[types.NodeID]types.Subscription - blocksyncSubs map[types.NodeID]types.Subscription + subs map[types.NodeID]eventbus.Subscription + blocksyncSubs map[types.NodeID]eventbus.Subscription stateChannels map[types.NodeID]*p2p.Channel dataChannels map[types.NodeID]*p2p.Channel voteChannels map[types.NodeID]*p2p.Channel @@ -64,8 +66,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), states: make(map[types.NodeID]*State), reactors: make(map[types.NodeID]*Reactor, numNodes), - subs: make(map[types.NodeID]types.Subscription, numNodes), - blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), + subs: make(map[types.NodeID]eventbus.Subscription, numNodes), + blocksyncSubs: make(map[types.NodeID]eventbus.Subscription, numNodes), } rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel, size)) @@ -73,7 +75,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel, size)) rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel, size)) - _, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + // Canceled during cleanup (see below). i := 0 for nodeID, node := range rts.network.Nodes { @@ -92,10 +95,18 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu reactor.SetEventBus(state.eventBus) - blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size) + blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + Limit: size, + }) require.NoError(t, err) - fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size) + fsSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryBlockSyncStatus, + Limit: size, + }) require.NoError(t, err) rts.states[nodeID] = state @@ -154,15 +165,21 @@ func waitForAndValidateBlock( t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() fn := func(j int) { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) for _, tx := range txs { @@ -171,12 +188,11 @@ func waitForAndValidateBlock( } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -187,18 +203,23 @@ func waitForAndValidateBlockWithTx( t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() fn := func(j int) { ntxs := 0 - BLOCK_TX_LOOP: for { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) // check that txs match the txs we're waiting for. @@ -210,18 +231,17 @@ func waitForAndValidateBlockWithTx( } if ntxs == len(txs) { - break BLOCK_TX_LOOP + break } } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -232,19 +252,25 @@ func waitForBlockWithUpdatedValsAndValidateIt( t *testing.T, n int, updatedVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, css []*State, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() fn := func(j int) { var newBlock *types.Block - LOOP: for { - msg := <-blocksSubs[j].Out() + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock = msg.Data().(types.EventDataNewBlock).Block if newBlock.LastCommit.Size() == len(updatedVals) { - break LOOP + break } } @@ -252,12 +278,11 @@ func waitForBlockWithUpdatedValsAndValidateIt( } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -289,14 +314,19 @@ func TestReactorBasic(t *testing.T) { reactor.SwitchToConsensus(state, false) } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - <-s.Out() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -306,9 +336,13 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the consensus switch - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - msg := <-s.Out() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } ensureBlockSyncStatus(t, msg, true, 0) }(sub) } @@ -381,7 +415,7 @@ func TestReactorWithEvidence(t *testing.T) { cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() eventBus.SetLogger(log.TestingLogger().With("module", "events")) err = eventBus.Start() require.NoError(t, err) @@ -400,18 +434,24 @@ func TestReactorWithEvidence(t *testing.T) { reactor.SwitchToConsensus(state, false) } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) // We expect for each validator that is the proposer to propose one piece of // evidence. - go func(s types.Subscription) { - msg := <-s.Out() - block := msg.Data().(types.EventDataNewBlock).Block + go func(s eventbus.Subscription) { + defer wg.Done() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + block := msg.Data().(types.EventDataNewBlock).Block require.Len(t, block.Evidence.Evidence, 1) - wg.Done() }(sub) } @@ -454,14 +494,19 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { ), ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -484,14 +529,19 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { reactor.SwitchToConsensus(state, false) } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -559,20 +609,25 @@ func TestReactorVotingPowerChange(t *testing.T) { activeVals[string(addr)] = struct{}{} } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } wg.Wait() - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } @@ -659,14 +714,19 @@ func TestReactorValidatorSetChanges(t *testing.T) { activeVals[string(pubKey.Address())] = struct{}{} } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -680,7 +740,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 60ad0c041..315fe0d9a 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "hash/crc32" "io" @@ -11,6 +12,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -36,7 +38,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli) // Unmarshal and apply a single message to the consensus state as if it were // received in receiveRoutine. Lines that start with "#" are ignored. // NOTE: receiveRoutine should not be running. -func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { +func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub eventbus.Subscription) error { // Skip meta messages which exist for demarcating boundaries. if _, ok := msg.Msg.(EndHeightMessage); ok { return nil @@ -47,18 +49,18 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr case types.EventDataRoundState: cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) // these are playback checks - ticker := time.After(time.Second * 2) if newStepSub != nil { - select { - case stepMsg := <-newStepSub.Out(): - m2 := stepMsg.Data().(types.EventDataRoundState) - if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { - return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) - } - case <-newStepSub.Canceled(): - return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled") - case <-ticker: - return fmt.Errorf("failed to read off newStepSub.Out()") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + stepMsg, err := newStepSub.Next(ctx) + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("subscription timed out: %w", err) + } else if err != nil { + return fmt.Errorf("subscription canceled: %w", err) + } + m2 := stepMsg.Data().(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } } case msgInfo: @@ -216,7 +218,7 @@ func NewHandshaker(stateStore sm.Store, state sm.State, stateStore: stateStore, initialState: state, store: store, - eventBus: types.NopEventBus{}, + eventBus: eventbus.NopEventBus{}, genDoc: genDoc, logger: log.NewNopLogger(), nBlocks: 0, diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index f60dff531..c03b234aa 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -13,6 +13,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" @@ -54,7 +55,10 @@ func (cs *State) ReplayFile(file string, console bool) error { // ensure all new step events are regenerated as expected ctx := context.Background() - newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } @@ -125,7 +129,7 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl } // go back count steps by resetting the state and running (pb.count - count) steps -func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { +func (pb *playback) replayReset(count int, newStepSub eventbus.Subscription) error { if err := pb.cs.Stop(); err != nil { return err } @@ -222,7 +226,10 @@ func (pb *playback) replayConsoleLoop() int { ctx := context.Background() // ensure all new step events are regenerated as expected - newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := pb.cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) } @@ -318,7 +325,7 @@ func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.Consensu tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) } - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() if err := eventBus.Start(); err != nil { tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 0b7468b79..49b1b9092 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" "math/rand" @@ -31,6 +32,7 @@ import ( "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -84,14 +86,18 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Co // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new // block, or else something is wrong. - newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + newBlockSub, err := cs.eventBus.SubscribeWithArgs(context.Background(), pubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + }) require.NoError(t, err) - select { - case <-newBlockSub.Out(): - case <-newBlockSub.Canceled(): - t.Fatal("newBlockSub was canceled") - case <-time.After(120 * time.Second): + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + _, err = newBlockSub.Next(ctx) + if errors.Is(err, context.DeadlineExceeded) { t.Fatal("Timed out waiting for new block (see trace above)") + } else if err != nil { + t.Fatal("newBlockSub was canceled") } } @@ -334,8 +340,8 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) - proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(t, css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(t, css[0].eventBus, types.EventQueryCompleteProposal) vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 65f4f865b..6ebc54086 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -15,6 +15,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/libs/fail" tmsync "github.com/tendermint/tendermint/internal/libs/sync" sm "github.com/tendermint/tendermint/internal/state" @@ -117,7 +118,7 @@ type State struct { // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket - eventBus *types.EventBus + eventBus *eventbus.EventBus // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes @@ -207,7 +208,7 @@ func (cs *State) SetLogger(l log.Logger) { } // SetEventBus sets event bus. -func (cs *State) SetEventBus(b *types.EventBus) { +func (cs *State) SetEventBus(b *eventbus.EventBus) { cs.eventBus = b cs.blockExec.SetEventBus(b) } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index c6ef8d3e6..c97acbe43 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -13,6 +13,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -62,8 +63,8 @@ func TestStateProposerSelection0(t *testing.T) { height, round := cs1.Height, cs1.Round - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) startTestRound(cs1, height, round) @@ -105,7 +106,7 @@ func TestStateProposerSelection2(t *testing.T) { require.NoError(t, err) height := cs1.Height - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) @@ -148,7 +149,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + timeoutCh := subscribe(t, cs.eventBus, types.EventQueryTimeoutPropose) startTestRound(cs, height, round) @@ -170,8 +171,8 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + timeoutCh := subscribe(t, cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(t, cs.eventBus, types.EventQueryCompleteProposal) cs.enterNewRound(height, round) cs.startRoutines(3) @@ -204,8 +205,8 @@ func TestStateBadProposal(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) @@ -266,8 +267,8 @@ func TestStateOversizedBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} @@ -332,16 +333,16 @@ func TestStateFullRound1(t *testing.T) { if err := cs.eventBus.Stop(); err != nil { t.Error(err) } - eventBus := types.NewEventBusWithBufferCapacity(0) + eventBus := eventbus.NewDefault() eventBus.SetLogger(log.TestingLogger().With("module", "events")) cs.SetEventBus(eventBus) if err := eventBus.Start(); err != nil { t.Error(err) } - voteCh := subscribe(cs.eventBus, types.EventQueryVote) - propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(t, cs.eventBus, types.EventQueryVote) + propCh := subscribe(t, cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(t, cs.eventBus, types.EventQueryNewRound) // Maybe it would be better to call explicitly startRoutines(4) startTestRound(cs, height, round) @@ -370,7 +371,7 @@ func TestStateFullRoundNil(t *testing.T) { require.NoError(t, err) height, round := cs.Height, cs.Round - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(t, cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -392,8 +393,8 @@ func TestStateFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(t, cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote startTestRound(cs1, height, round) @@ -437,11 +438,11 @@ func TestStateLockNoPOL(t *testing.T) { partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 @@ -629,14 +630,14 @@ func TestStateLockPOLRelock(t *testing.T) { partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + voteCh := subscribeToVoter(t, cs1, addr) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(t, cs1.eventBus, types.EventQueryNewBlockHeader) // everything done from perspective of cs1 @@ -731,14 +732,14 @@ func TestStateLockPOLUnlock(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(t, cs1.eventBus, types.EventQueryUnlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // everything done from perspective of cs1 @@ -826,13 +827,13 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(t, cs1, addr) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) // everything done from perspective of cs1 /* @@ -957,14 +958,14 @@ func TestStateLockPOLSafety1(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, round) @@ -1053,7 +1054,7 @@ func TestStateLockPOLSafety1(t *testing.T) { // we should prevote what we're locked on validatePrevote(t, cs1, round, vss[0], propBlockHash) - newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + newStepCh := subscribe(t, cs1.eventBus, types.EventQueryNewRoundStep) // before prevotes from the previous round are added // add prevotes from the earlier round @@ -1081,14 +1082,14 @@ func TestStateLockPOLSafety2(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(t, cs1.eventBus, types.EventQueryUnlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) @@ -1181,15 +1182,15 @@ func TestProposeValidBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(t, cs1.eventBus, types.EventQueryUnlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, round) @@ -1274,14 +1275,14 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, round) @@ -1339,15 +1340,15 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribeToVoter(t, cs1, addr) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) @@ -1396,8 +1397,8 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) // start round startTestRound(cs1, height, round) @@ -1420,12 +1421,12 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round startTestRound(cs1, height, round) @@ -1459,12 +1460,12 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round startTestRound(cs1, height, round) @@ -1498,12 +1499,12 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round in which PO is not proposer startTestRound(cs1, height, round) @@ -1532,8 +1533,8 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) propBlockHash := propBlock.Hash() @@ -1567,9 +1568,9 @@ func TestCommitFromPreviousRound(t *testing.T) { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) propBlockHash := propBlock.Hash() @@ -1624,16 +1625,16 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(t, cs1.eventBus, types.EventQueryNewBlockHeader) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round and wait for propose and prevote startTestRound(cs1, height, round) @@ -1689,14 +1690,14 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(t, cs1.eventBus, types.EventQueryNewBlockHeader) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round and wait for propose and prevote startTestRound(cs1, height, round) @@ -1747,10 +1748,10 @@ func TestStateSlashingPrevotes(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(t, cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) @@ -1782,10 +1783,10 @@ func TestStateSlashingPrecommits(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(t, cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) @@ -1831,14 +1832,14 @@ func TestStateHalt1(t *testing.T) { height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(t, cs1.eventBus, types.EventQueryNewBlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(t, cs1, addr) // start round and wait for propose and prevote startTestRound(cs1, height, round) @@ -2002,10 +2003,25 @@ func TestSignSameVoteTwice(t *testing.T) { } // subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) +func subscribe(t *testing.T, eventBus *eventbus.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { + t.Helper() + sub, err := eventBus.SubscribeWithArgs(context.Background(), tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: q, + }) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + t.Fatalf("Failed to subscribe %q to %v: %v", testSubscriber, q, err) } - return sub.Out() + ch := make(chan tmpubsub.Message) + go func() { + for { + next, err := sub.Next(context.Background()) + if err != nil { + t.Errorf("Subscription for %v unexpectedly terminated: %v", q, err) + return + } + ch <- next + } + }() + return ch } diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 8f75d6969..77c6ce574 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -16,6 +16,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" @@ -76,7 +77,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } }) - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { return fmt.Errorf("failed to start event bus: %w", err) diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go new file mode 100644 index 000000000..263267461 --- /dev/null +++ b/internal/eventbus/event_bus.go @@ -0,0 +1,242 @@ +package eventbus + +import ( + "context" + "fmt" + "strings" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +// Subscription is a proxy interface for a pubsub Subscription. +type Subscription interface { + ID() string + Next(context.Context) (tmpubsub.Message, error) +} + +// EventBus is a common bus for all events going through the system. +// It is a type-aware wrapper around an underlying pubsub server. +// All events should be published via the bus. +type EventBus struct { + service.BaseService + pubsub *tmpubsub.Server +} + +// NewDefault returns a new event bus with default options. +func NewDefault() *EventBus { + pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(0)) + b := &EventBus{pubsub: pubsub} + b.BaseService = *service.NewBaseService(nil, "EventBus", b) + return b +} + +func (b *EventBus) SetLogger(l log.Logger) { + b.BaseService.SetLogger(l) + b.pubsub.SetLogger(l.With("module", "pubsub")) +} + +func (b *EventBus) OnStart() error { + return b.pubsub.Start() +} + +func (b *EventBus) OnStop() { + if err := b.pubsub.Stop(); err != nil { + b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) + } +} + +func (b *EventBus) NumClients() int { + return b.pubsub.NumClients() +} + +func (b *EventBus) NumClientSubscriptions(clientID string) int { + return b.pubsub.NumClientSubscriptions(clientID) +} + +// Deprecated: Use SubscribeWithArgs instead. +func (b *EventBus) Subscribe(ctx context.Context, + clientID string, query tmpubsub.Query, capacities ...int) (Subscription, error) { + + return b.pubsub.Subscribe(ctx, clientID, query, capacities...) +} + +func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) { + return b.pubsub.SubscribeWithArgs(ctx, args) +} + +func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { + return b.pubsub.Unsubscribe(ctx, args) +} + +func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return b.pubsub.UnsubscribeAll(ctx, subscriber) +} + +func (b *EventBus) Observe(ctx context.Context, observe func(tmpubsub.Message) error, queries ...tmpubsub.Query) error { + return b.pubsub.Observe(ctx, observe, queries...) +} + +func (b *EventBus) Publish(eventValue string, eventData types.TMEventData) error { + // no explicit deadline for publishing events + ctx := context.Background() + + tokens := strings.Split(types.EventTypeKey, ".") + event := abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: eventValue, + }, + }, + } + + return b.pubsub.PublishWithEvents(ctx, eventData, []abci.Event{event}) +} + +func (b *EventBus) PublishEventNewBlock(data types.EventDataNewBlock) error { + // no explicit deadline for publishing events + ctx := context.Background() + events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + + // add Tendermint-reserved new block event + events = append(events, types.EventNewBlock) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewBlockHeader(data types.EventDataNewBlockHeader) error { + // no explicit deadline for publishing events + ctx := context.Background() + events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + + // add Tendermint-reserved new block header event + events = append(events, types.EventNewBlockHeader) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewEvidence(evidence types.EventDataNewEvidence) error { + return b.Publish(types.EventNewEvidenceValue, evidence) +} + +func (b *EventBus) PublishEventVote(data types.EventDataVote) error { + return b.Publish(types.EventVoteValue, data) +} + +func (b *EventBus) PublishEventValidBlock(data types.EventDataRoundState) error { + return b.Publish(types.EventValidBlockValue, data) +} + +func (b *EventBus) PublishEventBlockSyncStatus(data types.EventDataBlockSyncStatus) error { + return b.Publish(types.EventBlockSyncStatusValue, data) +} + +func (b *EventBus) PublishEventStateSyncStatus(data types.EventDataStateSyncStatus) error { + return b.Publish(types.EventStateSyncStatusValue, data) +} + +// PublishEventTx publishes tx event with events from Result. Note it will add +// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys +// will be overwritten. +func (b *EventBus) PublishEventTx(data types.EventDataTx) error { + // no explicit deadline for publishing events + ctx := context.Background() + events := data.Result.Events + + // add Tendermint-reserved events + events = append(events, types.EventTx) + + tokens := strings.Split(types.TxHashKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%X", types.Tx(data.Tx).Hash()), + }, + }, + }) + + tokens = strings.Split(types.TxHeightKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%d", data.Height), + }, + }, + }) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewRoundStep(data types.EventDataRoundState) error { + return b.Publish(types.EventNewRoundStepValue, data) +} + +func (b *EventBus) PublishEventTimeoutPropose(data types.EventDataRoundState) error { + return b.Publish(types.EventTimeoutProposeValue, data) +} + +func (b *EventBus) PublishEventTimeoutWait(data types.EventDataRoundState) error { + return b.Publish(types.EventTimeoutWaitValue, data) +} + +func (b *EventBus) PublishEventNewRound(data types.EventDataNewRound) error { + return b.Publish(types.EventNewRoundValue, data) +} + +func (b *EventBus) PublishEventCompleteProposal(data types.EventDataCompleteProposal) error { + return b.Publish(types.EventCompleteProposalValue, data) +} + +func (b *EventBus) PublishEventPolka(data types.EventDataRoundState) error { + return b.Publish(types.EventPolkaValue, data) +} + +func (b *EventBus) PublishEventUnlock(data types.EventDataRoundState) error { + return b.Publish(types.EventUnlockValue, data) +} + +func (b *EventBus) PublishEventRelock(data types.EventDataRoundState) error { + return b.Publish(types.EventRelockValue, data) +} + +func (b *EventBus) PublishEventLock(data types.EventDataRoundState) error { + return b.Publish(types.EventLockValue, data) +} + +func (b *EventBus) PublishEventValidatorSetUpdates(data types.EventDataValidatorSetUpdates) error { + return b.Publish(types.EventValidatorSetUpdatesValue, data) +} + +//----------------------------------------------------------------------------- + +// NopEventBus implements a types.BlockEventPublisher that discards all events. +type NopEventBus struct{} + +func (NopEventBus) PublishEventNewBlock(types.EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(types.EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventNewEvidence(types.EventDataNewEvidence) error { + return nil +} + +func (NopEventBus) PublishEventTx(types.EventDataTx) error { + return nil +} + +func (NopEventBus) PublishEventValidatorSetUpdates(types.EventDataValidatorSetUpdates) error { + return nil +} diff --git a/types/event_bus_test.go b/internal/eventbus/event_bus_test.go similarity index 56% rename from types/event_bus_test.go rename to internal/eventbus/event_bus_test.go index 9ca075391..e280d2bc4 100644 --- a/types/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -1,4 +1,4 @@ -package types +package eventbus_test import ( "context" @@ -11,12 +11,14 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/types" ) func TestEventBusPublishEventTx(t *testing.T) { - eventBus := NewEventBus() + eventBus := eventbus.NewDefault() err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -25,7 +27,7 @@ func TestEventBusPublishEventTx(t *testing.T) { } }) - tx := Tx("foo") + tx := types.Tx("foo") result := abci.ResponseDeliverTx{ Data: []byte("bar"), Events: []abci.Event{ @@ -34,27 +36,35 @@ func TestEventBusPublishEventTx(t *testing.T) { } // PublishEventTx adds 3 composite keys, so the query below should work + ctx := context.Background() query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) - txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) + txsSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) require.NoError(t, err) done := make(chan struct{}) go func() { - msg := <-txsSub.Out() - edt := msg.Data().(EventDataTx) + defer close(done) + msg, err := txsSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataTx) assert.Equal(t, int64(1), edt.Height) assert.Equal(t, uint32(0), edt.Index) assert.EqualValues(t, tx, edt.Tx) assert.Equal(t, result, edt.Result) - close(done) }() - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) + err = eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + }) assert.NoError(t, err) select { @@ -65,7 +75,7 @@ func TestEventBusPublishEventTx(t *testing.T) { } func TestEventBusPublishEventNewBlock(t *testing.T) { - eventBus := NewEventBus() + eventBus := eventbus.NewDefault() err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -74,8 +84,8 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } }) - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} + block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()} resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, @@ -88,22 +98,28 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work + ctx := context.Background() query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" - blocksSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) + blocksSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) require.NoError(t, err) done := make(chan struct{}) go func() { - msg := <-blocksSub.Out() - edt := msg.Data().(EventDataNewBlock) + defer close(done) + msg, err := blocksSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlock) assert.Equal(t, block, edt.Block) assert.Equal(t, blockID, edt.BlockID) assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) }() - err = eventBus.PublishEventNewBlock(EventDataNewBlock{ + err = eventBus.PublishEventNewBlock(types.EventDataNewBlock{ Block: block, BlockID: blockID, ResultBeginBlock: resultBeginBlock, @@ -119,7 +135,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { - eventBus := NewEventBus() + eventBus := eventbus.NewDefault() err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -128,7 +144,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { } }) - tx := Tx("foo") + tx := types.Tx("foo") result := abci.ResponseDeliverTx{ Data: []byte("bar"), Events: []abci.Event{ @@ -186,48 +202,47 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { } for i, tc := range testCases { - sub, err := eventBus.Subscribe(context.Background(), fmt.Sprintf("client-%d", i), tmquery.MustParse(tc.query)) + ctx := context.Background() + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: tmquery.MustParse(tc.query), + }) require.NoError(t, err) - done := make(chan struct{}) - + gotResult := make(chan bool, 1) go func() { - select { - case msg := <-sub.Out(): - data := msg.Data().(EventDataTx) + defer close(gotResult) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + msg, err := sub.Next(ctx) + if err == nil { + data := msg.Data().(types.EventDataTx) assert.Equal(t, int64(1), data.Height) assert.Equal(t, uint32(0), data.Index) assert.EqualValues(t, tx, data.Tx) assert.Equal(t, result, data.Result) - close(done) - case <-time.After(1 * time.Second): - return + gotResult <- true } }() - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) + assert.NoError(t, eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + })) - select { - case <-done: - if !tc.expectResults { - require.Fail(t, "unexpected transaction result(s) from subscription") - } - case <-time.After(1 * time.Second): - if tc.expectResults { - require.Fail(t, "failed to receive a transaction after 1 second") - } + if got := <-gotResult; got != tc.expectResults { + require.Failf(t, "Wrong transaction result", + "got a tx: %v, wanted a tx: %v", got, tc.expectResults) } } } func TestEventBusPublishEventNewBlockHeader(t *testing.T) { - eventBus := NewEventBus() + eventBus := eventbus.NewDefault() err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -236,7 +251,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } }) - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) + block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, @@ -249,21 +264,27 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work + ctx := context.Background() query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" - headersSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) + headersSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) require.NoError(t, err) done := make(chan struct{}) go func() { - msg := <-headersSub.Out() - edt := msg.Data().(EventDataNewBlockHeader) + defer close(done) + msg, err := headersSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlockHeader) assert.Equal(t, block.Header, edt.Header) assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) }() - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{ + err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ Header: block.Header, ResultBeginBlock: resultBeginBlock, ResultEndBlock: resultEndBlock, @@ -278,7 +299,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } func TestEventBusPublishEventNewEvidence(t *testing.T) { - eventBus := NewEventBus() + eventBus := eventbus.NewDefault() err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -287,22 +308,28 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) { } }) - ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") + ev := types.NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") - query := "tm.event='NewEvidence'" - evSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) + ctx := context.Background() + const query = `tm.event='NewEvidence'` + evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustParse(query), + }) require.NoError(t, err) done := make(chan struct{}) go func() { - msg := <-evSub.Out() - edt := msg.Data().(EventDataNewEvidence) + defer close(done) + msg, err := evSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewEvidence) assert.Equal(t, ev, edt.Evidence) assert.Equal(t, int64(4), edt.Height) - close(done) }() - err = eventBus.PublishEventNewEvidence(EventDataNewEvidence{ + err = eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ Evidence: ev, Height: 4, }) @@ -316,7 +343,7 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) { } func TestEventBusPublish(t *testing.T) { - eventBus := NewEventBus() + eventBus := eventbus.NewDefault() err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -327,59 +354,47 @@ func TestEventBusPublish(t *testing.T) { const numEventsExpected = 14 - sub, err := eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, numEventsExpected) + ctx := context.Background() + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.Empty{}, + Limit: numEventsExpected, + }) require.NoError(t, err) - done := make(chan struct{}) + count := make(chan int, 1) go func() { - numEvents := 0 - for range sub.Out() { - numEvents++ - if numEvents >= numEventsExpected { - close(done) + defer close(count) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + for n := 0; ; n++ { + if _, err := sub.Next(ctx); err != nil { + count <- n return } } }() - err = eventBus.Publish(EventNewBlockHeaderValue, EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlock(EventDataNewBlock{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventVote(EventDataVote{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRoundStep(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutPropose(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutWait(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRound(EventDataNewRound{}) - require.NoError(t, err) - err = eventBus.PublishEventCompleteProposal(EventDataCompleteProposal{}) - require.NoError(t, err) - err = eventBus.PublishEventPolka(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventUnlock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventRelock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventLock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{}) - require.NoError(t, err) - err = eventBus.PublishEventBlockSyncStatus(EventDataBlockSyncStatus{}) - require.NoError(t, err) - err = eventBus.PublishEventStateSyncStatus(EventDataStateSyncStatus{}) - require.NoError(t, err) + require.NoError(t, eventBus.Publish(types.EventNewBlockHeaderValue, + types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventNewBlock(types.EventDataNewBlock{})) + require.NoError(t, eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventVote(types.EventDataVote{})) + require.NoError(t, eventBus.PublishEventNewRoundStep(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutPropose(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutWait(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventNewRound(types.EventDataNewRound{})) + require.NoError(t, eventBus.PublishEventCompleteProposal(types.EventDataCompleteProposal{})) + require.NoError(t, eventBus.PublishEventPolka(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventUnlock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventRelock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventLock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventValidatorSetUpdates(types.EventDataValidatorSetUpdates{})) + require.NoError(t, eventBus.PublishEventBlockSyncStatus(types.EventDataBlockSyncStatus{})) + require.NoError(t, eventBus.PublishEventStateSyncStatus(types.EventDataStateSyncStatus{})) - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatalf("expected to receive %d events after 1 sec.", numEventsExpected) - } + require.GreaterOrEqual(t, <-count, numEventsExpected) } func BenchmarkEventBus(b *testing.B) { @@ -418,7 +433,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes // for random* functions mrand.Seed(time.Now().Unix()) - eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache + eventBus := eventbus.NewDefault() // set buffer capacity to 0 so we are not testing cache err := eventBus.Start() if err != nil { b.Error(err) @@ -430,28 +445,29 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes }) ctx := context.Background() - q := EventQueryNewBlock + q := types.EventQueryNewBlock for i := 0; i < numClients; i++ { if randQueries { q = randQuery() } - sub, err := eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q) + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: q, + }) if err != nil { b.Fatal(err) } go func() { for { - select { - case <-sub.Out(): - case <-sub.Canceled(): + if _, err := sub.Next(ctx); err != nil { return } } }() } - eventValue := EventNewBlockValue + eventValue := types.EventNewBlockValue b.ReportAllocs() b.ResetTimer() @@ -460,7 +476,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes eventValue = randEventValue() } - err := eventBus.Publish(eventValue, EventDataString("Gamora")) + err := eventBus.Publish(eventValue, types.EventDataString("Gamora")) if err != nil { b.Error(err) } @@ -468,42 +484,41 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes } var events = []string{ - EventNewBlockValue, - EventNewBlockHeaderValue, - EventNewRoundValue, - EventNewRoundStepValue, - EventTimeoutProposeValue, - EventCompleteProposalValue, - EventPolkaValue, - EventUnlockValue, - EventLockValue, - EventRelockValue, - EventTimeoutWaitValue, - EventVoteValue, - EventBlockSyncStatusValue, - EventStateSyncStatusValue, + types.EventNewBlockValue, + types.EventNewBlockHeaderValue, + types.EventNewRoundValue, + types.EventNewRoundStepValue, + types.EventTimeoutProposeValue, + types.EventCompleteProposalValue, + types.EventPolkaValue, + types.EventUnlockValue, + types.EventLockValue, + types.EventRelockValue, + types.EventTimeoutWaitValue, + types.EventVoteValue, + types.EventBlockSyncStatusValue, + types.EventStateSyncStatusValue, } func randEventValue() string { - return events[mrand.Intn(len(events))] } var queries = []tmpubsub.Query{ - EventQueryNewBlock, - EventQueryNewBlockHeader, - EventQueryNewRound, - EventQueryNewRoundStep, - EventQueryTimeoutPropose, - EventQueryCompleteProposal, - EventQueryPolka, - EventQueryUnlock, - EventQueryLock, - EventQueryRelock, - EventQueryTimeoutWait, - EventQueryVote, - EventQueryBlockSyncStatus, - EventQueryStateSyncStatus, + types.EventQueryNewBlock, + types.EventQueryNewBlockHeader, + types.EventQueryNewRound, + types.EventQueryNewRoundStep, + types.EventQueryTimeoutPropose, + types.EventQueryCompleteProposal, + types.EventQueryPolka, + types.EventQueryUnlock, + types.EventQueryLock, + types.EventQueryRelock, + types.EventQueryTimeoutWait, + types.EventQueryVote, + types.EventQueryBlockSyncStatus, + types.EventQueryStateSyncStatus, } func randQuery() tmpubsub.Query { diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 90e615341..0a92ef3f2 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -7,6 +7,7 @@ import ( "net" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/inspect/rpc" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" @@ -32,7 +33,7 @@ type Inspector struct { config *config.RPCConfig indexerService *indexer.Service - eventBus *types.EventBus + eventBus *eventbus.EventBus logger log.Logger } @@ -44,7 +45,7 @@ type Inspector struct { //nolint:lll func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { routes := rpc.Routes(*cfg, ss, bs, es, logger) - eb := types.NewEventBus() + eb := eventbus.NewDefault() eb.SetLogger(logger.With("module", "events")) is := indexer.NewIndexerService(es, eb) is.SetLogger(logger.With("module", "txindex")) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index e2808ce85..5527bf2ac 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -79,6 +79,7 @@ func TestBlock(t *testing.T) { blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) rpcConfig := config.TestRPCConfig() l := log.TestingLogger() @@ -223,6 +224,8 @@ func TestConsensusParams(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -274,6 +277,8 @@ func TestBlockResults(t *testing.T) { blockStoreMock.On("Height").Return(testHeight) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -320,6 +325,8 @@ func TestCommit(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -372,6 +379,8 @@ func TestBlockByHash(t *testing.T) { blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -423,6 +432,8 @@ func TestBlockchain(t *testing.T) { }) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -474,6 +485,8 @@ func TestValidators(t *testing.T) { blockStoreMock.On("Base").Return(int64(0)) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index 3043ba6b3..276bfe082 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -26,6 +26,10 @@ type Server struct { Config *config.RPCConfig } +type eventBusUnsubscriber interface { + UnsubscribeAll(ctx context.Context, subscriber string) error +} + // Routes returns the set of routes used by the Inspector server. // //nolint: lll @@ -59,7 +63,7 @@ func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logg mux := http.NewServeMux() wmLogger := logger.With("protocol", "websocket") - var eventBus types.EventBusSubscriber + var eventBus eventBusUnsubscriber websocketDisconnectFn := func(remoteAddr string) { err := eventBus.UnsubscribeAll(context.Background(), remoteAddr) diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index f05c34f14..9adeeee71 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" @@ -86,7 +87,7 @@ type Environment struct { PubKey crypto.PubKey GenDoc *types.GenesisDoc // cache the genesis structure EventSinks []indexer.EventSink - EventBus *types.EventBus // thread safe + EventBus *eventbus.EventBus // thread safe Mempool mempool.Mempool BlockSyncReactor consensus.BlockSyncReactor StateSyncMetricer statesync.Metricer diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index 8632e00c1..57dbd4ebe 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -2,6 +2,7 @@ package core import ( "context" + "errors" "fmt" "time" @@ -37,7 +38,11 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretyp subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() - sub, err := env.EventBus.Subscribe(subCtx, addr, q, subBufferSize) + sub, err := env.EventBus.SubscribeWithArgs(subCtx, tmpubsub.SubscribeArgs{ + ClientID: addr, + Query: q, + Limit: subBufferSize, + }) if err != nil { return nil, err } @@ -46,37 +51,34 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretyp subscriptionID := ctx.JSONReq.ID go func() { for { - select { - case msg := <-sub.Out(): - var ( - resultEvent = &coretypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} - resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) - ) - writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { - env.Logger.Info("Can't write response (slow client)", + msg, err := sub.Next(context.Background()) + if errors.Is(err, tmpubsub.ErrUnsubscribed) { + // The subscription was removed by the client. + return + } else if errors.Is(err, tmpubsub.ErrTerminated) { + // The subscription was terminated by the publisher. + resp := rpctypes.RPCServerError(subscriptionID, err) + ok := ctx.WSConn.TryWriteRPCResponse(resp) + if !ok { + env.Logger.Info("Unable to write response (slow client)", "to", addr, "subscriptionID", subscriptionID, "err", err) } - case <-sub.Canceled(): - if sub.Err() != tmpubsub.ErrUnsubscribed { - var reason string - if sub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = sub.Err().Error() - } - var ( - err = fmt.Errorf("subscription was canceled (reason: %s)", reason) - resp = rpctypes.RPCServerError(subscriptionID, err) - ) - if ok := ctx.WSConn.TryWriteRPCResponse(resp); !ok { - env.Logger.Info("Can't write response (slow client)", - "to", addr, "subscriptionID", subscriptionID, "err", err) - } - } return } + + // We have a message to deliver to the client. + resp := rpctypes.NewRPCSuccessResponse(subscriptionID, &coretypes.ResultEvent{ + Query: query, + Data: msg.Data(), + Events: msg.Events(), + }) + wctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + err = ctx.WSConn.WriteRPCResponse(wctx, resp) + cancel() + if err != nil { + env.Logger.Info("Unable to write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } } }() diff --git a/internal/state/execution.go b/internal/state/execution.go index e3dc80dca..dc64e6e3d 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -8,6 +8,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/libs/fail" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" @@ -69,7 +70,7 @@ func NewBlockExecutor( res := &BlockExecutor{ store: stateStore, proxyApp: proxyApp, - eventBus: types.NopEventBus{}, + eventBus: eventbus.NopEventBus{}, mempool: pool, evpool: evpool, logger: logger, diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index a66b677f9..fb70668fd 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -16,6 +16,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" mmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -23,6 +24,7 @@ import ( sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -367,18 +369,17 @@ func TestEndBlockValidatorUpdates(t *testing.T) { blockStore, ) - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() err = eventBus.Start() require.NoError(t, err) defer eventBus.Stop() //nolint:errcheck // ignore for tests blockExec.SetEventBus(eventBus) - updatesSub, err := eventBus.Subscribe( - context.Background(), - "TestEndBlockValidatorUpdates", - types.EventQueryValidatorSetUpdates, - ) + updatesSub, err := eventBus.SubscribeWithArgs(context.Background(), pubsub.SubscribeArgs{ + ClientID: "TestEndBlockValidatorUpdates", + Query: types.EventQueryValidatorSetUpdates, + }) require.NoError(t, err) block := sf.MakeBlock(state, 1, new(types.Commit)) @@ -402,18 +403,15 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } // test we threw an event - select { - case msg := <-updatesSub.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdates) - require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) - if assert.NotEmpty(t, event.ValidatorUpdates) { - assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) - assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) - } - case <-updatesSub.Canceled(): - t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err()) - case <-time.After(1 * time.Second): - t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + msg, err := updatesSub.Next(ctx) + require.NoError(t, err) + event, ok := msg.Data().(types.EventDataValidatorSetUpdates) + require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) + if assert.NotEmpty(t, event.ValidatorUpdates) { + assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) + assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) } } diff --git a/internal/state/indexer/indexer.go b/internal/state/indexer/indexer.go index 24dc62d70..5c238e137 100644 --- a/internal/state/indexer/indexer.go +++ b/internal/state/indexer/indexer.go @@ -41,26 +41,26 @@ type BlockIndexer interface { // Batch groups together multiple Index operations to be performed at the same time. // NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { - Ops []*abci.TxResult + Ops []*abci.TxResult + Pending int64 } // NewBatch creates a new Batch. func NewBatch(n int64) *Batch { - return &Batch{ - Ops: make([]*abci.TxResult, n), - } + return &Batch{Ops: make([]*abci.TxResult, n), Pending: n} } // Add or update an entry for the given result.Index. func (b *Batch) Add(result *abci.TxResult) error { - b.Ops[result.Index] = result + if b.Ops[result.Index] == nil { + b.Pending-- + b.Ops[result.Index] = result + } return nil } // Size returns the total number of operations inside the batch. -func (b *Batch) Size() int { - return len(b.Ops) -} +func (b *Batch) Size() int { return len(b.Ops) } // ErrorEmptyHash indicates empty hash var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index 39a1847f8..5952050f2 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -3,110 +3,118 @@ package indexer import ( "context" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) -// XXX/TODO: These types should be moved to the indexer package. - -const ( - subscriber = "IndexerService" -) - // Service connects event bus, transaction and block indexers together in // order to index transactions and blocks coming from the event bus. type Service struct { service.BaseService eventSinks []EventSink - eventBus *types.EventBus + eventBus *eventbus.EventBus + + currentBlock struct { + header types.EventDataNewBlockHeader + height int64 + batch *Batch + } } // NewIndexerService returns a new service instance. -func NewIndexerService(es []EventSink, eventBus *types.EventBus) *Service { - +func NewIndexerService(es []EventSink, eventBus *eventbus.EventBus) *Service { is := &Service{eventSinks: es, eventBus: eventBus} is.BaseService = *service.NewBaseService(nil, "IndexerService", is) return is } -// OnStart implements service.Service by subscribing for all transactions -// and indexing them by events. -func (is *Service) OnStart() error { - // Use SubscribeUnbuffered here to ensure both subscriptions does not get - // canceled due to not pulling messages fast enough. Cause this might - // sometimes happen when there are no other subscribers. - blockHeadersSub, err := is.eventBus.SubscribeUnbuffered( - context.Background(), - subscriber, - types.EventQueryNewBlockHeader) - if err != nil { - return err +// publish publishes a pubsub message to the service. The service blocks until +// the message has been fully processed. +func (is *Service) publish(msg pubsub.Message) error { + // Indexing has three states. Initially, no block is in progress (WAIT) and + // we expect a block header. Upon seeing a header, we are waiting for zero + // or more transactions (GATHER). Once all the expected transactions have + // been delivered (in some order), we are ready to index. After indexing a + // block, we revert to the WAIT state for the next block. + + if is.currentBlock.batch == nil { + // WAIT: Start a new block. + hdr := msg.Data().(types.EventDataNewBlockHeader) + is.currentBlock.header = hdr + is.currentBlock.height = hdr.Header.Height + is.currentBlock.batch = NewBatch(hdr.NumTxs) + + if hdr.NumTxs != 0 { + return nil + } + // If the block does not expect any transactions, fall through and index + // it immediately. This shouldn't happen, but this check ensures we do + // not get stuck if it does. } - txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx) - if err != nil { - return err + curr := is.currentBlock.batch + if curr.Pending != 0 { + // GATHER: Accumulate a transaction into the current block's batch. + txResult := msg.Data().(types.EventDataTx).TxResult + if err := curr.Add(&txResult); err != nil { + is.Logger.Error("failed to add tx to batch", + "height", is.currentBlock.height, "index", txResult.Index, "err", err) + } + + // This may have been the last transaction in the batch, so fall through + // to check whether it is time to index. } - go func() { - for { - select { - case <-blockHeadersSub.Canceled(): - return - case msg := <-blockHeadersSub.Out(): + if curr.Pending == 0 { + // INDEX: We have all the transactions we expect for the current block. + for _, sink := range is.eventSinks { + if err := sink.IndexBlockEvents(is.currentBlock.header); err != nil { + is.Logger.Error("failed to index block header", + "height", is.currentBlock.height, "err", err) + } else { + is.Logger.Debug("indexed block", + "height", is.currentBlock.height, "sink", sink.Type()) + } - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) - - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult - - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) - } - } - - if !IndexingEnabled(is.eventSinks) { - continue - } - - for _, sink := range is.eventSinks { - if err := sink.IndexBlockEvents(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - } else { - is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) - } - - if len(batch.Ops) > 0 { - err := sink.IndexTxEvents(batch.Ops) - if err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) - } else { - is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) - } - } + if curr.Size() != 0 { + err := sink.IndexTxEvents(curr.Ops) + if err != nil { + is.Logger.Error("failed to index block txs", + "height", is.currentBlock.height, "err", err) + } else { + is.Logger.Debug("indexed txs", + "height", is.currentBlock.height, "sink", sink.Type()) } } } - }() + is.currentBlock.batch = nil // return to the WAIT state for the next block + } + return nil } -// OnStop implements service.Service by unsubscribing from all transactions and -// close the eventsink. -func (is *Service) OnStop() { - if is.eventBus.IsRunning() { - _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) +// OnStart implements part of service.Service. It registers an observer for the +// indexer if the underlying event sinks support indexing. +// +// TODO(creachadair): Can we get rid of the "enabled" check? +func (is *Service) OnStart() error { + // If the event sinks support indexing, register an observer to capture + // block header data for the indexer. + if IndexingEnabled(is.eventSinks) { + err := is.eventBus.Observe(context.TODO(), is.publish, + types.EventQueryNewBlockHeader, types.EventQueryTx) + if err != nil { + return err + } } + return nil +} +// OnStop implements service.Service by closing the event sinks. +func (is *Service) OnStop() { for _, sink := range is.eventSinks { if err := sink.Stop(); err != nil { is.Logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index 3ad5fa509..d62ebffac 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -15,6 +15,7 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" @@ -39,7 +40,7 @@ var ( func TestIndexerServiceIndexesBlocks(t *testing.T) { // event bus - eventBus := types.NewEventBus() + eventBus := eventbus.NewDefault() eventBus.SetLogger(tmlog.TestingLogger()) err := eventBus.Start() require.NoError(t, err) diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index fd4a94382..cae644f7b 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -7,27 +7,18 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" ) func TestExample(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() - subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'")) - require.NoError(t, err) + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "example-client", + Query: query.MustParse("abci.account.name='John'"), + })) events := []abci.Event{ { @@ -35,8 +26,6 @@ func TestExample(t *testing.T) { Attributes: []abci.EventAttribute{{Key: "name", Value: "John"}}, }, } - err = s.PublishWithEvents(ctx, "Tombstone", events) - require.NoError(t, err) - - assertReceive(t, "Tombstone", subscription.Out()) + require.NoError(t, s.PublishWithEvents(ctx, "Tombstone", events)) + sub.mustReceive(ctx, "Tombstone") } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index fdd728961..edf59bac4 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -10,24 +10,26 @@ // // Example: // -// q, err := query.New("account.name='John'") +// q, err := query.New(`account.name='John'`) // if err != nil { // return err // } -// ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) -// defer cancel() -// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) +// sub, err := pubsub.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ +// ClientID: "johns-transactions", +// Query: q, +// }) // if err != nil { // return err // } // // for { -// select { -// case msg <- subscription.Out(): -// // handle msg.Data() and msg.Events() -// case <-subscription.Canceled(): -// return subscription.Err() +// next, err := sub.Next(ctx) +// if err == pubsub.ErrTerminated { +// return err // terminated by publisher +// } else if err != nil { +// return err // timed out, client unsubscribed, etc. // } +// process(next) // } // package pubsub @@ -67,6 +69,14 @@ type Query interface { String() string } +// SubscribeArgs are the parameters to create a new subscription. +type SubscribeArgs struct { + ClientID string // Client ID + Query Query // filter query for events (required) + Limit int // subscription queue capacity limit (0 means 1) + Quota int // subscription queue soft quota (0 uses Limit) +} + // UnsubscribeArgs are the parameters to remove a subscription. // The subscriber ID must be populated, and at least one of the client ID or // the registered query. @@ -106,6 +116,11 @@ type Server struct { subs struct { sync.RWMutex index *subIndex + + // This function is called synchronously with each message published + // before it is delivered to any other subscriber. This allows an index + // to be persisted before any subscribers see the messages. + observe func(Message) error } // TODO(creachadair): Rework the options so that this does not need to live @@ -149,54 +164,93 @@ func BufferCapacity(cap int) Option { // BufferCapacity returns capacity of the publication queue. func (s *Server) BufferCapacity() int { return cap(s.queue) } -// Subscribe creates a subscription for the given client. +// Subscribe creates a subscription for the given client ID and query. +// If len(capacities) > 0, its first value is used as the queue capacity. // -// An error will be returned to the caller if the context is canceled or if -// subscription already exist for pair clientID and query. -// -// outCapacity can be used to set a capacity for Subscription#Out channel (1 by -// default). Panics if outCapacity is less than or equal to zero. If you want -// an unbuffered channel, use SubscribeUnbuffered. -func (s *Server) Subscribe( - ctx context.Context, - clientID string, - query Query, - outCapacity ...int) (*Subscription, error) { - outCap := 1 - if len(outCapacity) > 0 { - if outCapacity[0] <= 0 { - panic("Negative or zero capacity. Use SubscribeUnbuffered if you want an unbuffered channel") +// Deprecated: Use SubscribeWithArgs. This method will be removed in v0.36. +func (s *Server) Subscribe(ctx context.Context, + clientID string, query Query, capacities ...int) (*Subscription, error) { + + args := SubscribeArgs{ + ClientID: clientID, + Query: query, + Limit: 1, + } + if len(capacities) > 0 { + args.Limit = capacities[0] + if len(capacities) > 1 { + args.Quota = capacities[1] } - outCap = outCapacity[0] + // bounds are checked below + } + return s.SubscribeWithArgs(ctx, args) +} + +// Observe registers an observer function that will be called synchronously +// with each published message matching any of the given queries, prior to it +// being forwarded to any subscriber. If no queries are specified, all +// messages will be observed. An error is reported if an observer is already +// registered. +func (s *Server) Observe(ctx context.Context, observe func(Message) error, queries ...Query) error { + s.subs.Lock() + defer s.subs.Unlock() + if observe == nil { + return errors.New("observe callback is nil") + } else if s.subs.observe != nil { + return errors.New("an observer is already registered") } - return s.subscribe(ctx, clientID, query, outCap) + // Compile the message filter. + var matches func(Message) bool + if len(queries) == 0 { + matches = func(Message) bool { return true } + } else { + matches = func(msg Message) bool { + for _, q := range queries { + match, err := q.Matches(msg.events) + if err == nil && match { + return true + } + } + return false + } + } + + s.subs.observe = func(msg Message) error { + if matches(msg) { + return observe(msg) + } + return nil // nothing to do for this message + } + return nil } -// SubscribeUnbuffered does the same as Subscribe, except it returns a -// subscription with unbuffered channel. Use with caution as it can freeze the -// server. -func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (*Subscription, error) { - return s.subscribe(ctx, clientID, query, 0) -} - -func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { +// SubscribeWithArgs creates a subscription for the given arguments. It is an +// error if the query is nil, a subscription already exists for the specified +// client ID and query, or if the capacity arguments are invalid. +func (s *Server) SubscribeWithArgs(ctx context.Context, args SubscribeArgs) (*Subscription, error) { + if args.Query == nil { + return nil, errors.New("query is nil") + } s.subs.Lock() defer s.subs.Unlock() if s.subs.index == nil { return nil, ErrServerStopped - } else if s.subs.index.contains(clientID, query.String()) { + } else if s.subs.index.contains(args.ClientID, args.Query.String()) { return nil, ErrAlreadySubscribed } - sub, err := newSubscription(outCapacity) + if args.Limit == 0 { + args.Limit = 1 + } + sub, err := newSubscription(args.Quota, args.Limit) if err != nil { return nil, err } s.subs.index.add(&subInfo{ - clientID: clientID, - query: query, + clientID: args.ClientID, + query: args.Query, subID: sub.id, sub: sub, }) @@ -334,11 +388,11 @@ func (s *Server) run() { s.Logger.Error("Error sending event", "err", err) } } - // Terminate all subscribers without error before exit. + // Terminate all subscribers before exit. s.subs.Lock() defer s.subs.Unlock() for si := range s.subs.index.all { - si.sub.cancel(nil) + si.sub.stop(ErrTerminated) } s.subs.index = nil }() @@ -348,7 +402,7 @@ func (s *Server) run() { // error. The caller must hold the s.subs lock. func (s *Server) removeSubs(evict subInfoSet, reason error) { for si := range evict { - si.sub.cancel(reason) + si.sub.stop(reason) } s.subs.index.removeAll(evict) } @@ -362,7 +416,7 @@ func (s *Server) send(data interface{}, events []types.Event) error { if len(evict) != 0 { s.subs.Lock() defer s.subs.Unlock() - s.removeSubs(evict, ErrOutOfCapacity) + s.removeSubs(evict, ErrTerminated) } }() @@ -372,6 +426,19 @@ func (s *Server) send(data interface{}, events []types.Event) error { s.subs.RLock() defer s.subs.RUnlock() + // If an observer is defined, give it control of the message before + // attempting to deliver it to any matching subscribers. If the observer + // fails, the message will not be forwarded. + if s.subs.observe != nil { + err := s.subs.observe(Message{ + data: data, + events: events, + }) + if err != nil { + return fmt.Errorf("observer failed on message: %w", err) + } + } + for si := range s.subs.index.all { match, err := si.query.Matches(events) if err != nil { @@ -381,18 +448,14 @@ func (s *Server) send(data interface{}, events []types.Event) error { continue } - // Subscriptions may be buffered or unbuffered. Unbuffered subscriptions - // are intended for internal use such as indexing, where we don't want to - // penalize a slow reader. Buffered subscribers must keep up with their - // queue, or they will be terminated. - // - // TODO(creachadair): Unbuffered subscriptions used by the event indexer - // to avoid losing events if it happens to be slow. Rework this so that - // use case doesn't require this affordance, and then remove unbuffered - // subscriptions. - msg := NewMessage(si.sub.id, data, events) - if err := si.sub.putMessage(msg); err != nil { - // The subscriber was too slow, cancel them. + // Publish the events to the subscriber's queue. If this fails, e.g., + // because the queue is over capacity or out of quota, evict the + // subscription from the index. + if err := si.sub.publish(Message{ + subID: si.sub.id, + data: data, + events: events, + }); err != nil { evict.add(si) } } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 397a88e3d..357264e8a 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -2,17 +2,15 @@ package pubsub_test import ( "context" + "errors" "fmt" - "runtime/debug" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" ) @@ -21,172 +19,144 @@ const ( clientID = "test-client" ) -func TestSubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - +func TestSubscribeWithArgs(t *testing.T) { + s := newTestServer(t) ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - require.Equal(t, 1, s.NumClients()) - require.Equal(t, 1, s.NumClientSubscriptions(clientID)) + t.Run("DefaultLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + })) - err = s.Publish(ctx, "Ka-Zar") - require.NoError(t, err) - assertReceive(t, "Ka-Zar", subscription.Out()) + require.Equal(t, 1, s.NumClients()) + require.Equal(t, 1, s.NumClientSubscriptions(clientID)) + require.NoError(t, s.Publish(ctx, "Ka-Zar")) + sub.mustReceive(ctx, "Ka-Zar") + }) + t.Run("PositiveLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID + "-2", + Query: query.Empty{}, + Limit: 10, + })) + require.NoError(t, s.Publish(ctx, "Aggamon")) + sub.mustReceive(ctx, "Aggamon") + }) +} + +func TestObserver(t *testing.T) { + s := newTestServer(t) + ctx := context.Background() + + done := make(chan struct{}) + var got interface{} + require.NoError(t, s.Observe(ctx, func(msg pubsub.Message) error { + defer close(done) + got = msg.Data() + return nil + })) + + const input = "Lions and tigers and bears, oh my!" + require.NoError(t, s.Publish(ctx, input)) + <-done + require.Equal(t, got, input) +} + +func TestObserverErrors(t *testing.T) { + s := newTestServer(t) + ctx := context.Background() + + require.Error(t, s.Observe(ctx, nil, query.Empty{})) + require.NoError(t, s.Observe(ctx, func(pubsub.Message) error { return nil })) + require.Error(t, s.Observe(ctx, func(pubsub.Message) error { return nil }, query.Empty{})) +} + +func TestPublishDoesNotBlock(t *testing.T) { + s := newTestServer(t) + ctx := context.Background() + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + })) published := make(chan struct{}) go func() { defer close(published) - err := s.Publish(ctx, "Quicksilver") - require.NoError(t, err) - - err = s.Publish(ctx, "Asylum") - require.NoError(t, err) - - err = s.Publish(ctx, "Ivan") - require.NoError(t, err) - - err = s.Publish(ctx, "Natasha") - require.NoError(t, err) + require.NoError(t, s.Publish(ctx, "Quicksilver")) + require.NoError(t, s.Publish(ctx, "Asylum")) + require.NoError(t, s.Publish(ctx, "Ivan")) }() select { case <-published: - assertReceive(t, "Quicksilver", subscription.Out()) - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) + sub.mustReceive(ctx, "Quicksilver") + sub.mustFail(ctx, pubsub.ErrTerminated) case <-time.After(3 * time.Second): - t.Fatal("Expected Publish(Asylum) not to block") + t.Fatal("Publishing should not have blocked") } } -func TestSubscribeWithCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - +func TestSubscribeErrors(t *testing.T) { + s := newTestServer(t) ctx := context.Background() - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.Empty{}, -1) - require.NoError(t, err) + + t.Run("EmptyQueryErr", func(t *testing.T) { + _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ClientID: clientID}) + require.Error(t, err) }) - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.Empty{}, 0) - require.NoError(t, err) + t.Run("NegativeLimitErr", func(t *testing.T) { + _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + Limit: -5, + }) + require.Error(t, err) }) - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) - require.NoError(t, err) - err = s.Publish(ctx, "Aggamon") - require.NoError(t, err) - assertReceive(t, "Aggamon", subscription.Out()) } -func TestSubscribeUnbuffered(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - +func TestSlowSubscriber(t *testing.T) { + s := newTestServer(t) ctx := context.Background() - subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.Empty{}) - require.NoError(t, err) - published := make(chan struct{}) - go func() { - defer close(published) + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + })) - err := s.Publish(ctx, "Ultron") - require.NoError(t, err) + require.NoError(t, s.Publish(ctx, "Fat Cobra")) + require.NoError(t, s.Publish(ctx, "Viper")) + require.NoError(t, s.Publish(ctx, "Black Panther")) - err = s.Publish(ctx, "Darkhawk") - require.NoError(t, err) - }() - - select { - case <-published: - t.Fatal("Expected Publish(Darkhawk) to block") - case <-time.After(3 * time.Second): - assertReceive(t, "Ultron", subscription.Out()) - assertReceive(t, "Darkhawk", subscription.Out()) - } -} - -func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Publish(ctx, "Fat Cobra") - require.NoError(t, err) - err = s.Publish(ctx, "Viper") - require.NoError(t, err) - err = s.Publish(ctx, "Black Panther") - require.NoError(t, err) - - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) + // We had capacity for one item, so we should get that item, but after that + // the subscription should have been terminated by the publisher. + sub.mustReceive(ctx, "Fat Cobra") + sub.mustFail(ctx, pubsub.ErrTerminated) } func TestDifferentClients(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() - subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-1", + Query: query.MustParse("tm.events.type='NewBlock'"), + })) - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - assertReceive(t, "Iceman", subscription1.Out()) + sub1.mustReceive(ctx, "Iceman") - subscription2, err := s.Subscribe( - ctx, - "client-2", - query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), - ) - require.NoError(t, err) + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-2", + Query: query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), + })) events = []abci.Event{ { @@ -200,234 +170,190 @@ func TestDifferentClients(t *testing.T) { } require.NoError(t, s.PublishWithEvents(ctx, "Ultimo", events)) - assertReceive(t, "Ultimo", subscription1.Out()) - assertReceive(t, "Ultimo", subscription2.Out()) + sub1.mustReceive(ctx, "Ultimo") + sub2.mustReceive(ctx, "Ultimo") - subscription3, err := s.Subscribe( - ctx, - "client-3", - query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), - ) - require.NoError(t, err) + sub3 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-3", + Query: query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), + })) - events = []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, - }, - } + events = []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, + }} require.NoError(t, s.PublishWithEvents(ctx, "Valeria Richards", events)) - require.Zero(t, len(subscription3.Out())) + sub3.mustTimeOut(ctx, 100*time.Millisecond) } func TestSubscribeDuplicateKeys(t *testing.T) { + s := newTestServer(t) ctx := context.Background() - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) testCases := []struct { query string expected interface{} }{ - { - "withdraw.rewards='17'", - "Iceman", - }, - { - "withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='1' AND withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='100'", - nil, - }, + {`withdraw.rewards='17'`, "Iceman"}, + {`withdraw.rewards='22'`, "Iceman"}, + {`withdraw.rewards='1' AND withdraw.rewards='22'`, "Iceman"}, + {`withdraw.rewards='100'`, nil}, } for i, tc := range testCases { - sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustParse(tc.query)) - require.NoError(t, err) + id := fmt.Sprintf("client-%d", i) + q := query.MustParse(tc.query) + t.Run(id, func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: id, + Query: q, + })) - events := []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "sender", Value: "bar"}, - {Key: "sender", Value: "baz"}, + events := []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "sender", Value: "bar"}, + {Key: "sender", Value: "baz"}, + }, }, - }, - { - Type: "withdraw", - Attributes: []abci.EventAttribute{ - {Key: "rewards", Value: "1"}, - {Key: "rewards", Value: "17"}, - {Key: "rewards", Value: "22"}, + { + Type: "withdraw", + Attributes: []abci.EventAttribute{ + {Key: "rewards", Value: "1"}, + {Key: "rewards", Value: "17"}, + {Key: "rewards", Value: "22"}, + }, }, - }, - } + } - require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) + require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - if tc.expected != nil { - assertReceive(t, tc.expected, sub.Out()) - } else { - require.Zero(t, len(sub.Out())) - } + if tc.expected != nil { + sub.mustReceive(ctx, tc.expected) + } else { + sub.mustTimeOut(ctx, 100*time.Millisecond) + } + }) } } func TestClientSubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() + q := query.MustParse("tm.events.type='NewBlock'") + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} - subscription1, err := s.Subscribe(ctx, clientID, q) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + })) require.NoError(t, s.PublishWithEvents(ctx, "Goblin Queen", events)) - assertReceive(t, "Goblin Queen", subscription1.Out()) + sub1.mustReceive(ctx, "Goblin Queen") - subscription2, err := s.Subscribe(ctx, clientID, q) - require.Error(t, err) - require.Nil(t, subscription2) + // Subscribing a second time with the same client ID and query fails. + { + sub2, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + }) + require.Error(t, err) + require.Nil(t, sub2) + } + // The attempt to re-subscribe does not disrupt the existing sub. require.NoError(t, s.PublishWithEvents(ctx, "Spider-Man", events)) - assertReceive(t, "Spider-Man", subscription1.Out()) + sub1.mustReceive(ctx, "Spider-Man") } func TestUnsubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + + // Removing the subscription we just made should succeed. + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.NoError(t, err) + Query: query.MustParse("tm.events.type='NewBlock'"), + })) - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe") + // Publishing should still work. + require.NoError(t, s.Publish(ctx, "Nick Fury")) - assertCanceled(t, subscription, pubsub.ErrUnsubscribed) + // The unsubscribed subscriber should report as such. + sub.mustFail(ctx, pubsub.ErrUnsubscribed) } func TestClientUnsubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) - err = s.UnsubscribeAll(ctx, clientID) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + require.ErrorIs(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + }), pubsub.ErrSubscriptionNotFound) + require.ErrorIs(t, s.UnsubscribeAll(ctx, clientID), pubsub.ErrSubscriptionNotFound) } func TestResubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{Subscriber: clientID, Query: query.Empty{}}) - require.NoError(t, err) - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Publish(ctx, "Cable") - require.NoError(t, err) - assertReceive(t, "Cable", subscription.Out()) + args := pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.Empty{}, + } + newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.Empty{}, + })) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Publish(ctx, "Cable")) + sub.mustReceive(ctx, "Cable") } func TestUnsubscribeAll(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - + s := newTestServer(t) ctx := context.Background() - subscription1, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - subscription2, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'")) - require.NoError(t, err) - err = s.UnsubscribeAll(ctx, clientID) - require.NoError(t, err) + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlock'"), + })) + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustParse("tm.events.type='NewBlockHeader'"), + })) - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll") - require.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll") + require.NoError(t, s.UnsubscribeAll(ctx, clientID)) + require.NoError(t, s.Publish(ctx, "Nick Fury")) + + sub1.mustFail(ctx, pubsub.ErrUnsubscribed) + sub2.mustFail(ctx, pubsub.ErrUnsubscribed) - assertCanceled(t, subscription1, pubsub.ErrUnsubscribed) - assertCanceled(t, subscription2, pubsub.ErrUnsubscribed) } func TestBufferCapacity(t *testing.T) { @@ -437,34 +363,65 @@ func TestBufferCapacity(t *testing.T) { require.Equal(t, 2, s.BufferCapacity()) ctx := context.Background() - err := s.Publish(ctx, "Nighthawk") - require.NoError(t, err) - err = s.Publish(ctx, "Sage") - require.NoError(t, err) - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + require.NoError(t, s.Publish(ctx, "Nighthawk")) + require.NoError(t, s.Publish(ctx, "Sage")) + + ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() - err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { - require.Equal(t, context.DeadlineExceeded, err) + require.ErrorIs(t, s.Publish(ctx, "Ironclad"), context.DeadlineExceeded) +} + +func newTestServer(t testing.TB) *pubsub.Server { + t.Helper() + + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + require.NoError(t, s.Start()) + t.Cleanup(func() { + assert.NoError(t, s.Stop()) + }) + return s +} + +type testSub struct { + t testing.TB + *pubsub.Subscription +} + +func newTestSub(t testing.TB) *testSub { return &testSub{t: t} } + +func (s *testSub) must(sub *pubsub.Subscription, err error) *testSub { + s.t.Helper() + require.NoError(s.t, err) + require.NotNil(s.t, sub) + s.Subscription = sub + return s +} + +func (s *testSub) mustReceive(ctx context.Context, want interface{}) { + s.t.Helper() + got, err := s.Next(ctx) + require.NoError(s.t, err) + require.Equal(s.t, want, got.Data()) +} + +func (s *testSub) mustTimeOut(ctx context.Context, dur time.Duration) { + s.t.Helper() + tctx, cancel := context.WithTimeout(ctx, dur) + defer cancel() + got, err := s.Next(tctx) + if !errors.Is(err, context.DeadlineExceeded) { + s.t.Errorf("Next: got (%+v, %v), want %v", got, err, context.DeadlineExceeded) } } -// HELPERS - -func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { - select { - case actual := <-ch: - require.Equal(t, expected, actual.Data(), msgAndArgs...) - case <-time.After(1 * time.Second): - t.Errorf("expected to receive %v from the channel, got nothing after 1s", expected) - debug.PrintStack() +func (s *testSub) mustFail(ctx context.Context, want error) { + s.t.Helper() + got, err := s.Next(ctx) + if err == nil && want != nil { + s.t.Fatalf("Next: got (%+v, %v), want error %v", got, err, want) } -} - -func assertCanceled(t *testing.T, subscription *pubsub.Subscription, err error) { - _, ok := <-subscription.Canceled() - require.False(t, ok) - require.Equal(t, err, subscription.Err()) + require.ErrorIs(s.t, err, want) } diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 827e5e6a6..6e8c6fd07 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -3,132 +3,71 @@ package pubsub import ( "context" "errors" - "fmt" "github.com/google/uuid" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/queue" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) var ( - // ErrUnsubscribed is returned by Err when a client unsubscribes. - ErrUnsubscribed = errors.New("client unsubscribed") + // ErrUnsubscribed is returned by Next when the client has unsubscribed. + ErrUnsubscribed = errors.New("subscription removed by client") - // ErrOutOfCapacity is returned by Err when a client is not pulling messages - // fast enough. Note the client's subscription will be terminated. - ErrOutOfCapacity = errors.New("client is not pulling messages fast enough") + // ErrTerminated is returned by Next when the subscription was terminated by + // the publisher. + ErrTerminated = errors.New("subscription terminated by publisher") ) -// A Subscription represents a client subscription for a particular query and -// consists of three things: -// 1) channel onto which messages and events are published -// 2) channel which is closed if a client is too slow or choose to unsubscribe -// 3) err indicating the reason for (2) +// A Subscription represents a client subscription for a particular query. type Subscription struct { - id string - out chan Message - queue *queue.Queue - - canceled chan struct{} - stop func() - mtx tmsync.RWMutex - err error + id string + queue *queue.Queue // open until the subscription ends + stopErr error // after queue is closed, the reason why } -// newSubscription returns a new subscription with the given outCapacity. -func newSubscription(outCapacity int) (*Subscription, error) { - sub := &Subscription{ - id: uuid.NewString(), - out: make(chan Message), - canceled: make(chan struct{}), - - // N.B. The output channel is always unbuffered. For an unbuffered - // subscription that was already the case, and for a buffered one the - // queue now serves as the buffer. - } - - if outCapacity == 0 { - sub.stop = func() { close(sub.canceled) } - return sub, nil - } - q, err := queue.New(queue.Options{ - SoftQuota: outCapacity, - HardLimit: outCapacity, +// newSubscription returns a new subscription with the given queue capacity. +func newSubscription(quota, limit int) (*Subscription, error) { + queue, err := queue.New(queue.Options{ + SoftQuota: quota, + HardLimit: limit, }) if err != nil { - return nil, fmt.Errorf("creating queue: %w", err) + return nil, err } - sub.queue = q - sub.stop = func() { q.Close(); close(sub.canceled) } - - // Start a goroutine to bridge messages from the queue to the channel. - // TODO(creachadair): This is a temporary hack until we can change the - // interface not to expose the channel directly. - go func() { - for { - next, err := q.Wait(context.Background()) - if err != nil { - return // the subscription was terminated - } - sub.out <- next.(Message) - } - }() - return sub, nil + return &Subscription{ + id: uuid.NewString(), + queue: queue, + }, nil } -// putMessage transmits msg to the subscriber. If s is unbuffered, this blocks -// until msg is delivered and returns nil; otherwise it reports an error if the -// queue cannot accept any further messages. -func (s *Subscription) putMessage(msg Message) error { - if s.queue != nil { - return s.queue.Add(msg) +// Next blocks until a message is available, ctx ends, or the subscription +// ends. Next returns ErrUnsubscribed if s was unsubscribed, ErrTerminated if +// s was terminated by the publisher, or a context error if ctx ended without a +// message being available. +func (s *Subscription) Next(ctx context.Context) (Message, error) { + next, err := s.queue.Wait(ctx) + if errors.Is(err, queue.ErrQueueClosed) { + return Message{}, s.stopErr + } else if err != nil { + return Message{}, err } - s.out <- msg - return nil + return next.(Message), nil } -// Out returns a channel onto which messages and events are published. -// Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from -// receiving a nil message. -func (s *Subscription) Out() <-chan Message { return s.out } - +// ID returns the unique subscription identifier for s. func (s *Subscription) ID() string { return s.id } -// Canceled returns a channel that's closed when the subscription is -// terminated and supposed to be used in a select statement. -func (s *Subscription) Canceled() <-chan struct{} { - return s.canceled -} +// publish transmits msg to the subscriber. It reports a queue error if the +// queue cannot accept any further messages. +func (s *Subscription) publish(msg Message) error { return s.queue.Add(msg) } -// Err returns nil if the channel returned by Canceled is not yet closed. -// If the channel is closed, Err returns a non-nil error explaining why: -// - ErrUnsubscribed if the subscriber choose to unsubscribe, -// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough -// and the channel returned by Out became full, -// After Err returns a non-nil error, successive calls to Err return the same -// error. -func (s *Subscription) Err() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.err -} - -func (s *Subscription) cancel(err error) { - s.mtx.Lock() - defer s.mtx.Unlock() - defer func() { - perr := recover() - if err == nil && perr != nil { - err = fmt.Errorf("problem closing subscription: %v", perr) - } - }() - - if s.err == nil && err != nil { - s.err = err +// stop terminates the subscription with the given error reason. +func (s *Subscription) stop(err error) { + if err == nil { + panic("nil stop error") } - - s.stop() + s.stopErr = err + s.queue.Close() } // Message glues data and events together. @@ -138,14 +77,6 @@ type Message struct { events []types.Event } -func NewMessage(subID string, data interface{}, events []types.Event) Message { - return Message{ - subID: subID, - data: data, - events: events, - } -} - // SubscriptionID returns the unique identifier for the subscription // that produced this message. func (msg Message) SubscriptionID() string { return msg.subID } diff --git a/node/node.go b/node/node.go index c17a4f409..840eaac76 100644 --- a/node/node.go +++ b/node/node.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" @@ -59,7 +60,7 @@ type nodeImpl struct { isListening bool // services - eventBus *types.EventBus // pub/sub for services + eventBus *eventbus.EventBus // pub/sub for services eventSinks []indexer.EventSink stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk @@ -847,7 +848,7 @@ func (n *nodeImpl) Mempool() mempool.Mempool { } // EventBus returns the Node's EventBus. -func (n *nodeImpl) EventBus() *types.EventBus { +func (n *nodeImpl) EventBus() *eventbus.EventBus { return n.eventBus } diff --git a/node/node_test.go b/node/node_test.go index 0a1ebaaaf..f0591a165 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -29,6 +29,7 @@ import ( "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" @@ -61,14 +62,15 @@ func TestNodeStartStop(t *testing.T) { defer cancel() // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(ctx, "node_test", types.EventQueryNewBlock) + blocksSub, err := n.EventBus().SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "node_test", + Query: types.EventQueryNewBlock, + }) require.NoError(t, err) - select { - case <-blocksSub.Out(): - case <-blocksSub.Canceled(): - t.Fatal("blocksSub was canceled") - case <-time.After(10 * time.Second): - t.Fatal("timed out waiting for the node to produce a block") + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + if _, err := blocksSub.Next(tctx); err != nil { + t.Fatalf("Waiting for event: %v", err) } // stop the node diff --git a/node/setup.go b/node/setup.go index a0d22a31c..6ee509c28 100644 --- a/node/setup.go +++ b/node/setup.go @@ -14,6 +14,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" @@ -97,8 +98,8 @@ func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Lo return proxyApp, nil } -func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { - eventBus := types.NewEventBus() +func createAndStartEventBus(logger log.Logger) (*eventbus.EventBus, error) { + eventBus := eventbus.NewDefault() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { return nil, err @@ -109,7 +110,7 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { func createAndStartIndexerService( cfg *config.Config, dbProvider config.DBProvider, - eventBus *types.EventBus, + eventBus *eventbus.EventBus, logger log.Logger, chainID string, ) (*indexer.Service, []indexer.EventSink, error) { @@ -315,7 +316,7 @@ func createConsensusReactor( privValidator types.PrivValidator, csMetrics *consensus.Metrics, waitSync bool, - eventBus *types.EventBus, + eventBus *eventbus.EventBus, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 21ca6e6f1..cb5a0a5ed 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/internal/eventbus" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" @@ -38,7 +39,7 @@ don't need to do anything). It will keep trying indefinitely with exponential backoff (10ms -> 20ms -> 40ms) until successful. */ type Local struct { - *types.EventBus + *eventbus.EventBus Logger log.Logger ctx *rpctypes.Context env *rpccore.Environment @@ -48,7 +49,7 @@ type Local struct { // local RPC client constructor needs to build a local client. type NodeService interface { RPCEnvironment() *rpccore.Environment - EventBus() *types.EventBus + EventBus() *eventbus.EventBus } // New configures a client that calls the Node directly. @@ -204,82 +205,78 @@ func (c *Local) Subscribe( ctx context.Context, subscriber, queryString string, - outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { + capacity ...int) (out <-chan coretypes.ResultEvent, err error) { q, err := query.New(queryString) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } - outCap := 1 - if len(outCapacity) > 0 { - outCap = outCapacity[0] + limit, quota := 1, 0 + if len(capacity) > 0 { + limit = capacity[0] + if len(capacity) > 1 { + quota = capacity[1] + } } - var sub types.Subscription - if outCap > 0 { - sub, err = c.EventBus.Subscribe(ctx, subscriber, q, outCap) - } else { - sub, err = c.EventBus.SubscribeUnbuffered(ctx, subscriber, q) + ctx, cancel := context.WithCancel(ctx) + go func() { <-c.Quit(); cancel() }() + + subArgs := pubsub.SubscribeArgs{ + ClientID: subscriber, + Query: q, + Quota: quota, + Limit: limit, } + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err != nil { return nil, fmt.Errorf("failed to subscribe: %w", err) } - outc := make(chan coretypes.ResultEvent, outCap) - go c.eventsRoutine(sub, subscriber, q, outc) + outc := make(chan coretypes.ResultEvent, 1) + go c.eventsRoutine(ctx, sub, subArgs, outc) return outc, nil } func (c *Local) eventsRoutine( - sub types.Subscription, - subscriber string, - q pubsub.Query, - outc chan<- coretypes.ResultEvent) { + ctx context.Context, + sub eventbus.Subscription, + subArgs pubsub.SubscribeArgs, + outc chan<- coretypes.ResultEvent, +) { + qstr := subArgs.Query.String() for { - select { - case msg := <-sub.Out(): - result := coretypes.ResultEvent{ - SubscriptionID: msg.SubscriptionID(), - Query: q.String(), - Data: msg.Data(), - Events: msg.Events(), + msg, err := sub.Next(ctx) + if errors.Is(err, pubsub.ErrUnsubscribed) { + return // client unsubscribed + } else if err != nil { + c.Logger.Error("subscription was canceled, resubscribing", + "err", err, "query", subArgs.Query.String()) + sub = c.resubscribe(ctx, subArgs) + if sub == nil { + return // client terminated } - - if cap(outc) == 0 { - outc <- result - } else { - select { - case outc <- result: - default: - c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) - } - } - case <-sub.Canceled(): - if sub.Err() == pubsub.ErrUnsubscribed { - return - } - - c.Logger.Error("subscription was canceled, resubscribing...", "err", sub.Err(), "query", q.String()) - sub = c.resubscribe(subscriber, q) - if sub == nil { // client was stopped - return - } - case <-c.Quit(): - return + continue + } + outc <- coretypes.ResultEvent{ + SubscriptionID: msg.SubscriptionID(), + Query: qstr, + Data: msg.Data(), + Events: msg.Events(), } } } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q pubsub.Query) types.Subscription { +func (c *Local) resubscribe(ctx context.Context, subArgs pubsub.SubscribeArgs) eventbus.Subscription { attempts := 0 for { if !c.IsRunning() { return nil } - sub, err := c.EventBus.Subscribe(context.Background(), subscriber, q) + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err == nil { return sub } diff --git a/types/event_bus.go b/types/event_bus.go deleted file mode 100644 index dfe3a0664..000000000 --- a/types/event_bus.go +++ /dev/null @@ -1,326 +0,0 @@ -package types - -import ( - "context" - "fmt" - "strings" - - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" -) - -const defaultCapacity = 0 - -type EventBusSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (Subscription, error) - Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error - UnsubscribeAll(ctx context.Context, subscriber string) error - - NumClients() int - NumClientSubscriptions(clientID string) int -} - -type Subscription interface { - ID() string - Out() <-chan tmpubsub.Message - Canceled() <-chan struct{} - Err() error -} - -// EventBus is a common bus for all events going through the system. All calls -// are proxied to underlying pubsub server. All events must be published using -// EventBus to ensure correct data types. -type EventBus struct { - service.BaseService - pubsub *tmpubsub.Server -} - -// NewEventBus returns a new event bus. -func NewEventBus() *EventBus { - return NewEventBusWithBufferCapacity(defaultCapacity) -} - -// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. -func NewEventBusWithBufferCapacity(cap int) *EventBus { - // capacity could be exposed later if needed - pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) - b := &EventBus{pubsub: pubsub} - b.BaseService = *service.NewBaseService(nil, "EventBus", b) - return b -} - -func (b *EventBus) SetLogger(l log.Logger) { - b.BaseService.SetLogger(l) - b.pubsub.SetLogger(l.With("module", "pubsub")) -} - -func (b *EventBus) OnStart() error { - return b.pubsub.Start() -} - -func (b *EventBus) OnStop() { - if err := b.pubsub.Stop(); err != nil { - b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) - } -} - -func (b *EventBus) NumClients() int { - return b.pubsub.NumClients() -} - -func (b *EventBus) NumClientSubscriptions(clientID string) int { - return b.pubsub.NumClientSubscriptions(clientID) -} - -func (b *EventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - outCapacity ...int, -) (Subscription, error) { - return b.pubsub.Subscribe(ctx, subscriber, query, outCapacity...) -} - -// This method can be used for a local consensus explorer and synchronous -// testing. Do not use for for public facing / untrusted subscriptions! -func (b *EventBus) SubscribeUnbuffered( - ctx context.Context, - subscriber string, - query tmpubsub.Query, -) (Subscription, error) { - return b.pubsub.SubscribeUnbuffered(ctx, subscriber, query) -} - -func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return b.pubsub.Unsubscribe(ctx, args) -} - -func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return b.pubsub.UnsubscribeAll(ctx, subscriber) -} - -func (b *EventBus) Publish(eventValue string, eventData TMEventData) error { - // no explicit deadline for publishing events - ctx := context.Background() - - tokens := strings.Split(EventTypeKey, ".") - event := types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: eventValue, - }, - }, - } - - return b.pubsub.PublishWithEvents(ctx, eventData, []types.Event{event}) -} - -func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block event - events = append(events, EventNewBlock) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block header event - events = append(events, EventNewBlockHeader) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return b.Publish(EventNewEvidenceValue, evidence) -} - -func (b *EventBus) PublishEventVote(data EventDataVote) error { - return b.Publish(EventVoteValue, data) -} - -func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { - return b.Publish(EventValidBlockValue, data) -} - -func (b *EventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return b.Publish(EventBlockSyncStatusValue, data) -} - -func (b *EventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return b.Publish(EventStateSyncStatusValue, data) -} - -// PublishEventTx publishes tx event with events from Result. Note it will add -// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys -// will be overwritten. -func (b *EventBus) PublishEventTx(data EventDataTx) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := data.Result.Events - - // add Tendermint-reserved events - events = append(events, EventTx) - - tokens := strings.Split(TxHashKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%X", Tx(data.Tx).Hash()), - }, - }, - }) - - tokens = strings.Split(TxHeightKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%d", data.Height), - }, - }, - }) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return b.Publish(EventNewRoundStepValue, data) -} - -func (b *EventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return b.Publish(EventTimeoutProposeValue, data) -} - -func (b *EventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return b.Publish(EventTimeoutWaitValue, data) -} - -func (b *EventBus) PublishEventNewRound(data EventDataNewRound) error { - return b.Publish(EventNewRoundValue, data) -} - -func (b *EventBus) PublishEventCompleteProposal(data EventDataCompleteProposal) error { - return b.Publish(EventCompleteProposalValue, data) -} - -func (b *EventBus) PublishEventPolka(data EventDataRoundState) error { - return b.Publish(EventPolkaValue, data) -} - -func (b *EventBus) PublishEventUnlock(data EventDataRoundState) error { - return b.Publish(EventUnlockValue, data) -} - -func (b *EventBus) PublishEventRelock(data EventDataRoundState) error { - return b.Publish(EventRelockValue, data) -} - -func (b *EventBus) PublishEventLock(data EventDataRoundState) error { - return b.Publish(EventLockValue, data) -} - -func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return b.Publish(EventValidatorSetUpdatesValue, data) -} - -//----------------------------------------------------------------------------- -type NopEventBus struct{} - -func (NopEventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - out chan<- interface{}, -) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return nil -} - -func (NopEventBus) PublishEventVote(data EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(data EventDataTx) error { - return nil -} - -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return nil -} - -func (NopEventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return nil -} - -func (NopEventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return nil -} From d5865af1f454cc238272b7cab4c5fe3d50c2445e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 5 Nov 2021 12:50:53 -0700 Subject: [PATCH 155/174] Add basic metrics to the indexer package. (#7250) This follows the same model as we did in the p2p package. Rework the indexer service constructor to take a struct of arguments, that makes it easier to construct the optional settings. Deprecate but do not remove the existing constructor. Clean up node initialization a little bit. --- internal/inspect/inspect.go | 7 ++- internal/state/indexer/indexer_service.go | 41 +++++++++++-- internal/state/indexer/metrics.go | 73 +++++++++++++++++++++++ node/node.go | 34 ++++++----- node/node_test.go | 3 +- node/setup.go | 9 ++- 6 files changed, 143 insertions(+), 24 deletions(-) create mode 100644 internal/state/indexer/metrics.go diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 0a92ef3f2..66e9c9421 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -47,8 +47,11 @@ func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexe routes := rpc.Routes(*cfg, ss, bs, es, logger) eb := eventbus.NewDefault() eb.SetLogger(logger.With("module", "events")) - is := indexer.NewIndexerService(es, eb) - is.SetLogger(logger.With("module", "txindex")) + is := indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eb, + Logger: logger.With("module", "txindex"), + }) return &Inspector{ routes: routes, config: cfg, diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index 5952050f2..8f69f488b 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -2,8 +2,10 @@ package indexer import ( "context" + "time" "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" @@ -16,6 +18,7 @@ type Service struct { eventSinks []EventSink eventBus *eventbus.EventBus + metrics *Metrics currentBlock struct { header types.EventDataNewBlockHeader @@ -24,13 +27,29 @@ type Service struct { } } -// NewIndexerService returns a new service instance. -func NewIndexerService(es []EventSink, eventBus *eventbus.EventBus) *Service { - is := &Service{eventSinks: es, eventBus: eventBus} - is.BaseService = *service.NewBaseService(nil, "IndexerService", is) +// NewService constructs a new indexer service from the given arguments. +func NewService(args ServiceArgs) *Service { + is := &Service{ + eventSinks: args.Sinks, + eventBus: args.EventBus, + metrics: args.Metrics, + } + if is.metrics == nil { + is.metrics = NopMetrics() + } + is.BaseService = *service.NewBaseService(args.Logger, "IndexerService", is) return is } +// NewIndexerService returns a new service instance. +// Deprecated: Use NewService instead. +func NewIndexerService(es []EventSink, eventBus *eventbus.EventBus) *Service { + return NewService(ServiceArgs{ + Sinks: es, + EventBus: eventBus, + }) +} + // publish publishes a pubsub message to the service. The service blocks until // the message has been fully processed. func (is *Service) publish(msg pubsub.Message) error { @@ -71,20 +90,26 @@ func (is *Service) publish(msg pubsub.Message) error { if curr.Pending == 0 { // INDEX: We have all the transactions we expect for the current block. for _, sink := range is.eventSinks { + start := time.Now() if err := sink.IndexBlockEvents(is.currentBlock.header); err != nil { is.Logger.Error("failed to index block header", "height", is.currentBlock.height, "err", err) } else { + is.metrics.BlockEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.BlocksIndexed.Add(1) is.Logger.Debug("indexed block", "height", is.currentBlock.height, "sink", sink.Type()) } if curr.Size() != 0 { + start := time.Now() err := sink.IndexTxEvents(curr.Ops) if err != nil { is.Logger.Error("failed to index block txs", "height", is.currentBlock.height, "err", err) } else { + is.metrics.TxEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.TransactionsIndexed.Add(float64(curr.Size())) is.Logger.Debug("indexed txs", "height", is.currentBlock.height, "sink", sink.Type()) } @@ -122,6 +147,14 @@ func (is *Service) OnStop() { } } +// ServiceArgs are arguments for constructing a new indexer service. +type ServiceArgs struct { + Sinks []EventSink + EventBus *eventbus.EventBus + Metrics *Metrics + Logger log.Logger +} + // KVSinkEnabled returns the given eventSinks is containing KVEventSink. func KVSinkEnabled(sinks []EventSink) bool { for _, sink := range sinks { diff --git a/internal/state/indexer/metrics.go b/internal/state/indexer/metrics.go new file mode 100644 index 000000000..aa64a4bb2 --- /dev/null +++ b/internal/state/indexer/metrics.go @@ -0,0 +1,73 @@ +package indexer + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// MetricsSubsystem is a the subsystem label for the indexer package. +const MetricsSubsystem = "indexer" + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Latency for indexing block events. + BlockEventsSeconds metrics.Histogram + + // Latency for indexing transaction events. + TxEventsSeconds metrics.Histogram + + // Number of complete blocks indexed. + BlocksIndexed metrics.Counter + + // Number of transactions indexed. + TransactionsIndexed metrics.Counter +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_events_seconds", + Help: "Latency for indexing block events.", + }, labels).With(labelsAndValues...), + TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_events_seconds", + Help: "Latency for indexing transaction events.", + }, labels).With(labelsAndValues...), + BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "blocks_indexed", + Help: "Number of complete blocks indexed.", + }, labels).With(labelsAndValues...), + TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "transactions_indexed", + Help: "Number of transactions indexed.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns an indexer metrics stub that discards all samples. +func NopMetrics() *Metrics { + return &Metrics{ + BlockEventsSeconds: discard.NewHistogram(), + TxEventsSeconds: discard.NewHistogram(), + BlocksIndexed: discard.NewCounter(), + TransactionsIndexed: discard.NewCounter(), + } +} diff --git a/node/node.go b/node/node.go index 840eaac76..fa703f1d6 100644 --- a/node/node.go +++ b/node/node.go @@ -171,7 +171,8 @@ func makeNode(cfg *config.Config, return nil, combineCloseError(err, makeCloser(closers)) } - indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) + indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, + logger, genDoc.ChainID, nodeMetrics.indexer) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } @@ -900,11 +901,12 @@ func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider { type nodeMetrics struct { consensus *consensus.Metrics - p2p *p2p.Metrics + indexer *indexer.Metrics mempool *mempool.Metrics + p2p *p2p.Metrics + proxy *proxy.Metrics state *sm.Metrics statesync *statesync.Metrics - proxy *proxy.Metrics } // metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. @@ -916,21 +918,23 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { return func(chainID string) *nodeMetrics { if cfg.Prometheus { return &nodeMetrics{ - consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + consensus: consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + indexer: indexer.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + mempool: mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + p2p: p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + proxy: proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + state: sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + statesync: statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), } } return &nodeMetrics{ - consensus.NopMetrics(), - p2p.NopMetrics(), - mempool.NopMetrics(), - sm.NopMetrics(), - statesync.NopMetrics(), - proxy.NopMetrics(), + consensus: consensus.NopMetrics(), + indexer: indexer.NopMetrics(), + mempool: mempool.NopMetrics(), + p2p: p2p.NopMetrics(), + proxy: proxy.NopMetrics(), + state: sm.NopMetrics(), + statesync: statesync.NopMetrics(), } } } diff --git a/node/node_test.go b/node/node_test.go index f0591a165..90a585a63 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -558,7 +558,8 @@ func TestNodeSetEventSink(t *testing.T) { require.NoError(t, err) indexService, eventSinks, err := createAndStartIndexerService(cfg, - config.DefaultDBProvider, eventBus, logger, genDoc.ChainID) + config.DefaultDBProvider, eventBus, logger, genDoc.ChainID, + indexer.NopMetrics()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) return eventSinks diff --git a/node/setup.go b/node/setup.go index 6ee509c28..8d5fb21b9 100644 --- a/node/setup.go +++ b/node/setup.go @@ -113,14 +113,19 @@ func createAndStartIndexerService( eventBus *eventbus.EventBus, logger log.Logger, chainID string, + metrics *indexer.Metrics, ) (*indexer.Service, []indexer.EventSink, error) { eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID) if err != nil { return nil, nil, err } - indexerService := indexer.NewIndexerService(eventSinks, eventBus) - indexerService.SetLogger(logger.With("module", "txindex")) + indexerService := indexer.NewService(indexer.ServiceArgs{ + Sinks: eventSinks, + EventBus: eventBus, + Logger: logger.With("module", "txindex"), + Metrics: metrics, + }) if err := indexerService.Start(); err != nil { return nil, nil, err From 2af7d8defdac144ec5e50c6b804dcd6584c92263 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Nov 2021 02:37:58 +0000 Subject: [PATCH 156/174] build(deps): Bump google.golang.org/grpc from 1.41.0 to 1.42.0 (#7200) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.41.0 to 1.42.0.
Release notes

Sourced from google.golang.org/grpc's releases.

Release 1.42.0

Behavior Changes

  • grpc: Dial("unix://relative-path") no longer works (#4817)
    • use "unix://absolute-path" or "unix:relative-path" instead in accordance with our documentation
  • xds/csds: use new field GenericXdsConfig instead of PerXdsConfig (#4898)

New Features

  • grpc: support grpc.WithAuthority when secure credentials are used (#4817)
  • creds/google: add NewDefaultCredentialsWithOptions() to support custom per-RPC creds (#4830)
  • authz: create file watcher interceptor for gRPC SDK API (#4760)
  • attributes: add Equal method (#4855)
  • resolver: add AddressMap and State.BalancerAttributes (#4855)
  • resolver: Add URL field to Target to store parsed dial target (#4817)
  • grpclb: add a target_name field to lb config to specify target when used as a child policy (#4847)
  • grpclog: support formatting log output as JSON (#4854)

Bug Fixes

  • server: add missing conn.Close if the connection dies before reading the HTTP/2 preface (#4837)
  • grpclb: recover if addresses are received after an empty server list was received previously (#4879)
  • authz: support empty principals and fix rbac authenticated matcher (#4883)
  • xds/rds: NACK the RDS response if it contains unknown cluster specifier (#4788)
  • xds/priority: do not switch to low priority when high priority is in Idle (e.g. ringhash) (#4889)

Documentation

  • grpc: stabilize WithDefaultServiceConfig and improve godoc (#4888)
  • status: clarify FromError docstring (#4880)
  • examples: add example illustrating the use of unix abstract sockets (#4848)
  • examples: update load balancing example to use loadBalancingConfig (#4887)
  • doc: promote WithDisableRetry to stable; clarify retry is enabled by default (#4901)

API Changes

  • credentials: Mark TransportCredentials.OverrideServerName method as deprecated (#4817)
Commits
  • aff571c Change version to 1.42.0 (#4910)
  • 2d7bdf2 xds: Set RBAC on by default (#4909)
  • d47437c xds: Fix invert functionality for header matcher (#4902)
  • 9fa2698 xds/csds: populate new GenericXdsConfig field (#4898)
  • 6e8625d doc: promote WithDisableRetry to stable; clarify retry is enabled by default ...
  • f1d87c1 client: properly disable retry if GRPC_GO_RETRY=off (#4899)
  • 03753f5 creds/google: fix CFE cluster name check (#4893)
  • 4f21cde authz: support empty principals and fix rbac authenticated matcher (#4883)
  • f00baa6 resolver: replace AddressMap.Range with Keys (#4891)
  • 2a31245 client: don't force passthrough as default resolver (#4890)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/grpc&package-manager=go_modules&previous-version=1.41.0&new-version=1.42.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- abci/example/example_test.go | 2 +- abci/example/kvstore/kvstore_test.go | 2 +- go.mod | 2 +- go.sum | 7 +++++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/abci/example/example_test.go b/abci/example/example_test.go index cde8a15b1..64705d0e0 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -129,7 +129,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { numDeliverTxs := 2000 - socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) + socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 2c6f48903..7c89db1ad 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -296,7 +296,7 @@ func TestClientServer(t *testing.T) { // set up grpc app kvstore = NewApplication() - gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc") + gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc") require.NoError(t, err) t.Cleanup(func() { diff --git a/go.mod b/go.mod index 3d4d459a3..27fd32206 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.41.0 + google.golang.org/grpc v1.42.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index 3f3420892..0af234146 100644 --- a/go.sum +++ b/go.sum @@ -181,8 +181,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= @@ -1549,8 +1552,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 1e701ed9b506cba8fa0897e89797ffbba409d271 Mon Sep 17 00:00:00 2001 From: JayT106 Date: Mon, 8 Nov 2021 15:45:56 -0500 Subject: [PATCH 157/174] fix LGTM warning (#7257) --- cmd/tendermint/commands/debug/kill.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index 7f1ebdba1..fbb2dcebe 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -33,7 +33,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, } func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) + pid, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return err } @@ -91,7 +91,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { } logger.Info("killing Tendermint process") - if err := killProc(pid, tmpDir); err != nil { + if err := killProc(int(pid), tmpDir); err != nil { return err } @@ -104,7 +104,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. @@ -127,7 +127,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the Tendermint process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill Tendermint process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { From b3b90f820cd2001e3504142b9b22bb723129ed1b Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 9 Nov 2021 10:51:19 +0100 Subject: [PATCH 158/174] consensus: add some more checks to vote counting (#7253) --- internal/consensus/peer_state.go | 8 +++++++- types/vote_set.go | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index 73e61f21c..6a64e8e10 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -193,7 +193,10 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, boo } if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - return votes.GetByIndex(int32(index)), true + vote := votes.GetByIndex(int32(index)) + if vote != nil { + return vote, true + } } return nil, false @@ -358,6 +361,9 @@ func (ps *PeerState) BlockPartsSent() int { // SetHasVote sets the given vote as known by the peer func (ps *PeerState) SetHasVote(vote *types.Vote) { + if vote == nil { + return + } ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/types/vote_set.go b/types/vote_set.go index b064f2c07..e014ae7bb 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -372,6 +372,9 @@ func (voteSet *VoteSet) GetByIndex(valIndex int32) *Vote { } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() + if int(valIndex) >= len(voteSet.votes) { + return nil + } return voteSet.votes[valIndex] } From e4466b79057214cf14d1f16849241eb0304c4897 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Nov 2021 13:22:40 +0000 Subject: [PATCH 159/174] build(deps): Bump github.com/lib/pq from 1.10.3 to 1.10.4 (#7261) Bumps [github.com/lib/pq](https://github.com/lib/pq) from 1.10.3 to 1.10.4.
Release notes

Sourced from github.com/lib/pq's releases.

v1.10.4

  • Keep track of (context cancelled) error on connection.
  • Fix android build
Commits
  • 8446d16 issue 1062: Keep track of (context cancelled) error on connection, and make r...
  • 6a102c0 Merge pull request #1060 from ian4hu/patch-1
  • a54251e Merge pull request #1061 from mjl-/fix-flaky-TestConnPrepareContext
  • 2b4fa17 Fix flaky TestConnPrepareContext
  • b33a1b7 Fix android build
  • 16e9cad Fix build in android
  • 26399a7 Merge pull request #1057 from jfcg/master
  • 0870776 fix possible integer truncation
  • c01ab77 Create codeql-analysis.yml
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/lib/pq&package-manager=go_modules&previous-version=1.10.3&new-version=1.10.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 27fd32206..1c789ef0d 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.3 + github.com/lib/pq v1.10.4 github.com/libp2p/go-buffer-pool v0.0.2 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b diff --git a/go.sum b/go.sum index 0af234146..ece67f19e 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,9 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg= github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= From 9dc3d7f9a26f2e395311b2f1d705aa950bb25c7a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 9 Nov 2021 08:35:51 -0800 Subject: [PATCH 160/174] Set a cap on the length of subscription queries. (#7263) As a safety measure, don't allow a query string to be unreasonably long. The query filter is not especially efficient, so a query that needs more than basic detail should filter coarsely in the subscriber and refine on the client side. This affects Subscribe and TxSearch queries. --- internal/rpc/core/events.go | 6 ++++++ internal/rpc/core/tx.go | 2 ++ 2 files changed, 8 insertions(+) diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index 57dbd4ebe..d8e09f35e 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -15,6 +15,10 @@ import ( const ( // Buffer on the Tendermint (server) side to allow some slowness in clients. subBufferSize = 100 + + // maxQueryLength is the maximum length of a query string that will be + // accepted. This is just a safety check to avoid outlandish queries. + maxQueryLength = 512 ) // Subscribe for events via WebSocket. @@ -26,6 +30,8 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretyp return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) } else if env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient { return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) + } else if len(query) > maxQueryLength { + return nil, errors.New("maximum query length exceeded") } env.Logger.Info("Subscribe to query", "remote", addr, "query", query) diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 60c7519c0..7ba2bf90c 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -72,6 +72,8 @@ func (env *Environment) TxSearch( if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") + } else if len(query) > maxQueryLength { + return nil, errors.New("maximum query length exceeded") } q, err := tmquery.New(query) From 4acd117b5e28cf39709e87cf802dbee8e928d8b9 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 9 Nov 2021 15:48:01 -0500 Subject: [PATCH 161/174] evidence: remove source of non-determinism from test (#7266) The evidence test produces a set of mock evidence in the evidence pool of the 'Primary' node. The test then fills the evidence pools of secondaries with half of this mock evidence. Finally, the test waits until the secondary has an evidence pool as full as the primary. The assertions that are removed here were checking that the primary and secondaries' evidence channels were empty. However, nothing in the test actually ensures that the channels are empty. The test only waits for the secondaries to have received the complete set of evidence, and the secondaries already received half of the evidence at the beginning. It's more than possible that the secondaries can receive the complete set of evidence and not finish reading the duplicate evidence off the channels. --- internal/evidence/reactor_test.go | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index cf8f840ea..d1ae55803 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -394,11 +394,8 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { require.Len(t, rts.pools, 2) assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), "primary node should have all the evidence") - if assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), - "secondary nodes should have caught up") { - - rts.assertEvidenceChannelsEmpty(t) - } + assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") } func TestReactorBroadcastEvidence_Committed(t *testing.T) { @@ -438,11 +435,6 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { // start the network and ensure it's configured rts.start(t) - // without the following sleep the test consistently fails; - // likely because the sleep forces a context switch that lets - // the router process other operations. - time.Sleep(2 * time.Millisecond) - // The secondary reactor should have received all the evidence ignoring the // already committed evidence. rts.waitForEvidence(t, evList[numEvidence/2:], secondary.NodeID) @@ -450,11 +442,8 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { require.Len(t, rts.pools, 2) assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), "primary node should have all the evidence") - if assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), - "secondary nodes should have caught up") { - - rts.assertEvidenceChannelsEmpty(t) - } + assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") } func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { From 27560cf7a4dc5c8a1fed98f6bbe86898c1ca790f Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Wed, 10 Nov 2021 10:52:18 -0500 Subject: [PATCH 162/174] p2p: reduce peer score for dial failures (#7265) When dialing fails to succeed we should reduce the score of the peer, which puts the peer at (potentially) greater chances of being removed from the peer manager, and reduces the chance of the peer being gossiped by the PEX reactor. --- CHANGELOG_PENDING.md | 1 + internal/p2p/peermanager.go | 21 ++++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 70b361d26..08f53c6e0 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -23,6 +23,7 @@ Special thanks to external contributors on this release: - P2P Protocol - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) + - [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) - Go API diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 7ccc0d59c..8c37cc1ff 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -532,6 +532,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error { if !ok { return nil // Assume the address has been removed, ignore. } + addressInfo.LastDialFailure = time.Now().UTC() addressInfo.DialFailures++ if err := m.store.Set(peer); err != nil { @@ -602,6 +603,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error { addressInfo.LastDialSuccess = now // If not found, assume address has been removed. } + if err := m.store.Set(peer); err != nil { return err } @@ -660,6 +662,11 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { peer = m.newPeerInfo(peerID) } + // reset this to avoid penalizing peers for their past transgressions + for _, addr := range peer.AddressInfo { + addr.DialFailures = 0 + } + // If all connections slots are full, but we allow upgrades (and we checked // above that we have upgrade capacity), then we can look for a lower-scored // peer to replace and if found accept the connection anyway and evict it. @@ -1287,15 +1294,23 @@ func (p *peerInfo) Score() PeerScore { return PeerScorePersistent } - if p.MutableScore <= 0 { + score := p.MutableScore + + for _, addr := range p.AddressInfo { + // DialFailures is reset when dials succeed, so this + // is either the number of dial failures or 0. + score -= int64(addr.DialFailures) + } + + if score <= 0 { return 0 } - if p.MutableScore >= math.MaxUint8 { + if score >= math.MaxUint8 { return PeerScore(math.MaxUint8) } - return PeerScore(p.MutableScore) + return PeerScore(score) } // Validate validates the peer info. From f28d629e280ddcdc0dd644ccf1586d73dddfb7a1 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Fri, 12 Nov 2021 15:21:48 -0500 Subject: [PATCH 163/174] ADR: Update the proposer-based timestamp spec per discussion with @cason (#7153) * update the proposer-based timestamps spec per discussion with @cason * add vote to list of changed structs * sentence fix * clarify crypto sig logic update * language updates per feedback from @cason * fix POL < 0 wording * update timely description for non-polka --- .../adr-071-proposer-based-timestamps.md | 272 ++++++------------ 1 file changed, 83 insertions(+), 189 deletions(-) diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md index c23488005..9bb1c245d 100644 --- a/docs/architecture/adr-071-proposer-based-timestamps.md +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -1,45 +1,13 @@ # ADR 71: Proposer-Based Timestamps -* [Changelog](#changelog) -* [Status](#status) -* [Context](#context) -* [Alternative Approaches](#alternative-approaches) - * [Remove timestamps altogether](#remove-timestamps-altogether) -* [Decision](#decision) -* [Detailed Design](#detailed-design) - * [Overview](#overview) - * [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp) - * [Saving the timestamp across heights](#saving-the-timestamp-across-heights) - * [Changes to `CommitSig`](#changes-to-commitsig) - * [Changes to `Commit`](#changes-to-commit) - * [Changes to `Vote` messages](#changes-to-vote-messages) - * [New consensus parameters](#new-consensus-parameters) - * [Changes to `Header`](#changes-to-header) - * [Changes to the block proposal step](#changes-to-the-block-proposal-step) - * [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp) - * [Proposer selects block timestamp](#proposer-selects-block-timestamp) - * [Proposer waits](#proposer-waits) - * [Changes to the propose step timeout](#changes-to-the-propose-step-timeout) - * [Changes to validation rules](#changes-to-validation-rules) - * [Proposal timestamp validation](#proposal-timestamp-validation) - * [Block timestamp validation](#block-timestamp-validation) - * [Changes to the prevote step](#changes-to-the-prevote-step) - * [Changes to the precommit step](#changes-to-the-precommit-step) - * [Changes to locking a block](#changes-to-locking-a-block) - * [Remove voteTime Completely](#remove-votetime-completely) -* [Future Improvements](#future-improvements) -* [Consequences](#consequences) - * [Positive](#positive) - * [Neutral](#neutral) - * [Negative](#negative) -* [References](#references) - ## Changelog - July 15 2021: Created by @williambanfield - Aug 4 2021: Draft completed by @williambanfield - Aug 5 2021: Draft updated to include data structure changes by @williambanfield - Aug 20 2021: Language edits completed by @williambanfield + - Oct 25 2021: Update the ADR to match updated spec from @cason by @williambanfield + - Nov 10 2021: Additional language updates by @williambanfield per feedback from @cason ## Status @@ -68,7 +36,7 @@ However, their currently known Unix time may be greatly divergent from the block The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues. Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways: -1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block. +1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block instead of the `BFTTime`. 1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time. The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power. @@ -111,45 +79,9 @@ Implementing proposer-based timestamps will require a few changes to Tendermint These changes will be to the following components: * The `internal/consensus/` package. * The `state/` package. -* The `Vote`, `CommitSig`, `Commit` and `Header` types. +* The `Vote`, `CommitSig` and `Header` types. * The consensus parameters. -### Proposal Timestamp and Block Timestamp - -This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message. -The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses. - -The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31). -This timestamp is the current Unix time known to the proposer when sending the `Proposal` message. -This timestamp is not currently used as part of consensus. -The changes in this ADR will begin using the proposal message timestamp as part of consensus. -We will refer to this as the **proposal timestamp** throughout this design. - -The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338). -This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm. -It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block. -This field will continue to be used but the logic for creating and validating this timestamp will change. -We will refer to this as the **block timestamp** throughout this design. - -At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`. -The following image shows this relationship. -The rest of this document describes the code changes that will make this possible. - -![](./img/pbts-message.png) - -### Saving the timestamp across heights - -Currently, `BFTtime` uses `LastCommit` to construct the block timestamp. -The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`. -`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`. - -For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`. -Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`. -To enable this, we will add a `Timestamp` field to the `Commit` struct. -This field will be populated at each height with the proposal timestamp decided on at the previous height. -This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes. -Changes to the `CommitSig` and `Commit` struct are detailed below. - ### Changes to `CommitSig` The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp. @@ -167,32 +99,14 @@ type CommitSig struct { } ``` -### Changes to `Commit` - -The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp. -The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp. -With these timestamps removed, the commit time will instead be stored in the `Commit` struct. - -`Commit` will be updated as follows. - -```diff -type Commit struct { - Height int64 `json:"height"` - Round int32 `json:"round"` -++ Timestamp time.Time `json:"timestamp"` - BlockID BlockID `json:"block_id"` - Signatures []CommitSig `json:"signatures"` -} -``` - ### Changes to `Vote` messages `Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50). This struct currently contains a timestamp. This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator. For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field. -For prevotes, this field is unused. -Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`. +For prevotes, this field is currently unused. +Proposer-based timestamps will use the timestamp that the proposer sets into the block and will therefore no longer require that a timestamp be included in the vote messages. This timestamp is therefore no longer useful and will be dropped. `Vote` will be updated as follows: @@ -250,58 +164,28 @@ type TimestampParams struct { } ``` -### Changes to `Header` - -The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp. -This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps. -This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`. -This timestamp will therfore be identical in both the `Header` and the `LastCommit`. -To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`. - -`Header` will be updated as follows: - -```diff -type Header struct { - // basic block info - Version version.Consensus `json:"version"` - ChainID string `json:"chain_id"` - Height int64 `json:"height"` --- Time time.Time `json:"time"` -++ LastTimestamp time.Time `json:"last_timestamp"` - - // prev block info - LastBlockID BlockID `json:"last_block_id"` - - // hashes of block data - LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"` - DataHash tmbytes.HexBytes `json:"data_hash"` - - // hashes from the app output from the prev block - ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` - NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` - ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` - AppHash tmbytes.HexBytes `json:"app_hash"` - - // root hash of all results from the txs from the previous block - LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` - - // consensus info - EvidenceHash tmbytes.HexBytes `json:"evidence_hash"` - ProposerAddress Address `json:"proposer_address"` -} -``` - ### Changes to the block proposal step -#### Proposer selects proposal timestamp - -The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message. -This satisfies the proposer-based timestamp specification and does not need to change. - #### Proposer selects block timestamp -The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field. -The proposer will select this timestamp to use as the block timestamp at height `H`. +Tendermint currently uses the `BFTTime` algorithm to produce the block's `Header.Timestamp`. +The [proposal logic](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/internal/state/state.go#L269) sets the weighted median of the times in the `LastCommit.CommitSigs` as the proposed block's `Header.Timestamp`. + +In proposer-based timestamps, the proposer will still set a timestamp into the `Header.Timestamp`. +The timestamp the proposer sets into the `Header` will change depending on if the block has previously received a [polka](https://github.com/tendermint/tendermint/blob/053651160f496bb44b107a434e3e6482530bb287/docs/introduction/what-is-tendermint.md#consensus-overview) or not. + +#### Proposal of a block that has not previously received a polka + +If a proposer is proposing a new block, then it will set the Unix time currently known to the proposer into the `Header.Timestamp` field. +The proposer will also set this same timestamp into the `Timestamp` field of the `Proposal` message that it issues. + +#### Re-proposal of a block that has previously received a polka + +If a proposer is re-proposing a block that has previously received a polka on the network, then the proposer does not update the `Header.Timestamp` of that block. +Instead, the proposer simply re-proposes the exact same block. +This way, the proposed block has the exact same block ID as the previously proposed block and the validators that have already received that block do not need to attempt to receive it again. + +The proposer will set the re-proposed block's `Header.Timestamp` as the `Proposal` message's `Timestamp`. #### Proposer waits @@ -310,72 +194,94 @@ In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millis A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works. Validator clocks will not be perfectly in sync. -Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`. -If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`. +Therefore, the proposer’s current known Unix time may be less than the previous block's `Header.Time`. +If the proposer’s current known Unix time is less than the previous block's `Header.Time`, the proposer will sleep until its known Unix time exceeds it. This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method. -This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`. +This method should now schedule a timeout that fires when the proposer’s time is greater than the previous block's `Header.Time`. +When the timeout fires, the proposer will finally issue the `Proposal` message. #### Changes to the propose step timeout Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen. -Proposer-based timestamps requires changing this timeout logic. +Proposer-based timestamps requires changing this timeout logic. -The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block. +The proposer will now wait until its current known Unix time exceeds the previous block's `Header.Time` to propose a block. The validators must now take this and some other factors into account when deciding when to timeout the propose step. Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer. Additionally, there may be a delay communicating the proposal message from the proposer to the other validators. -Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out. -To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`. +Therefore, validators waiting for a proposal must wait until after the previous block's `Header.Time` before timing out. +To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until the previous block's `Header.Time + 2*ACCURACY + MSGDELAY`. The spec defines this as `waitingTime`. The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`. `enterPropose` will be changed to calculate waiting time using the new consensus parameters. The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013). -### Changes to validation rules +### Changes to proposal validation rules -The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps. -Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well. +The rules for validating a proposed block will be modification to implement proposer-based timestamps. +We will change the validation logic to ensure that a proposal is `timely`. -#### Proposal timestamp validation +Per the proposer-based timestamps spec, `timely` only needs to be checked if a block has not received a +2/3 majority of `Prevotes` in a round. +If a block previously received a +2/3 majority of prevotes in a previous round, then +2/3 of the voting power considered the block's timestamp near enough to their own currently known Unix time in that round. -Adding proposal timestamp validation is a reasonably straightforward change. -The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31). -Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field. -The precommit and prevote validation logic does not currently use this timestamp. -This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators. -If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid. -The validator will also check that the proposal time is greater than the block timestamp from the previous height. +The validation logic will be updated to check `timely` for blocks that did not previously receive +2/3 prevotes in a round. +Receiving +2/3 prevotes in a round is frequently referred to as a 'polka' and we will use this term for simplicity. -If no valid proposal is received by the proposal timeout, the validator will prevote nil. -This is identical to the current logic. +#### Current timestamp validation logic -#### Block timestamp validation +To provide a better understanding of the changes needed to timestamp validation, we will first detail how timestamp validation works currently in Tendermint. The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118). First, the validation logic checks that this timestamp is greater than the previous block’s timestamp. -Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). -Finally, the logic also authenticates the timestamps in the `LastCommit`. -The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key. -One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`. -To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature. + +Second, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). + +Finally, the validation logic authenticates the timestamps in the `LastCommit.CommitSig`. +The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the voting validator’s private key. +One of the items in this `signedBytes` hash is the timestamp in the `CommitSig`. +To authenticate the `CommitSig` timestamp, the validator authenticating votes builds a hash of fields that includes the `CommitSig` timestamp and checks this hash against the signature. This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25). -The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change. +#### Remove unused timestamp validation logic `BFTTime` validation is no longer applicable and will be removed. -Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. -This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). +This means that validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. +Specifically, we will remove the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). The `MedianTime` function can be completely removed. -The `LastCommit` timestamps may also be removed. -The `signedBytes` validation logic in `VerifyCommit` will be slightly altered. -The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp. -The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`. -The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp. -Specifically, the `VerifyCommit` function will be updated to use this new timestamp. +Since `CommitSig`s will no longer contain a timestamp, the validator authenticating a commit will no longer include the `CommitSig` timestamp in the hash of fields it builds to check against the cryptographic signature. + +#### Timestamp validation when a block has not received a polka + +The [POLRound](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/types/proposal.go#L29) in the `Proposal` message indicates which round the block received a polka. +A negative value in the `POLRound` field indicates that the block has not previously been proposed on the network. +Therefore the validation logic will check for timely when `POLRound < 0`. + +When a validator receives a `Proposal` message, the validator will check that the `Proposal.Timestamp` is at most `PRECISION` greater than the current Unix time known to the validator, and at minimum `PRECISION + MSGDELAY` less than the current Unix time known to the validator. +If the timestamp is not within these bounds, the proposed block will not be considered `timely`. + +Once a full block matching the `Proposal` message is received, the validator will also check that the timestamp in the `Header.Timestamp` of the block matches this `Proposal.Timestamp`. +Using the `Proposal.Timestamp` to check `timely` allows for the `MSGDELAY` parameter to be more finely tuned since `Proposal` messages do not change sizes and are therefore faster to gossip than full blocks across the network. + +A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +#### Timestamp validation when a block has received a polka + +When a block is re-proposed that has already received a +2/3 majority of `Prevote`s on the network, the `Proposal` message for the re-proposed block is created with a `POLRound` that is `>= 0`. +A validator will not check that the `Proposal` is `timely` if the propose message has a non-negative `POLRound`. +If the `POLRound` is non-negative, each validator will simply ensure that it received the `Prevote` messages for the proposed block in the round indicated by `POLRound`. + +If the validator did not receive `Prevote` messages for the proposed block in `POLRound`, then it will prevote nil. +Validators already check that +2/3 prevotes were seen in `POLRound`, so this does not represent a change to the prevote logic. + +A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +Additionally, this validation logic can be updated to check that the `Proposal.Timestamp` matches the `Header.Timestamp` of the proposed block, but it is less relevant since checking that votes were received is sufficient to ensure the block timestamp is correct. ### Changes to the prevote step @@ -383,26 +289,14 @@ Currently, a validator will prevote a proposal in one of three cases: * Case 1: Validator has no locked block and receives a valid proposal. * Case 2: Validator has a locked block and receives a valid proposal matching its locked block. -* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the new proposal’s block. +* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the proposal’s block, either in the current round or in a round greater than or equal to the round in which it locked its locked block. The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above. ### Changes to the precommit step The precommit step will not require much modification. -Its proposal validation rules will change in the same ways that validation will change in the prevote step. - -### Changes to locking a block -When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field. -In each subsequent round it will prevote that block. -A validator will only change which block it has locked if it sees +2/3 prevotes for a different block. - -This mechanism will remain largely unchanged. -The only difference is the addition of proposal timestamp validation. -A validator will prevote nil in a round if the proposal message it received is not `timely`. -Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block. -This difference is an incidental result of the changes to prevote validation. -It is included in this design for completeness and to clarify that no additional changes will be made to block locking. +Its proposal validation rules will change in the same ways that validation will change in the prevote step with the exception of the `timely` check: precommit validation will never check that the timestamp is `timely`. ### Remove voteTime Completely From a15ae5b53af59be514bb4b4095aa5e8ec2a3b9fe Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 15 Nov 2021 13:28:52 -0500 Subject: [PATCH 164/174] node+consensus: handshaker initialization (#7283) This mostly just pushes more of initialization out of the node package. --- cmd/tendermint/commands/replay.go | 9 +++--- internal/consensus/replay.go | 24 +++++++-------- internal/consensus/replay_file.go | 49 ++++++++++++++++++++----------- internal/consensus/replay_test.go | 22 +++++++------- node/node.go | 5 +++- node/setup.go | 20 ------------- 6 files changed, 63 insertions(+), 66 deletions(-) diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index e92274042..558208ab3 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -9,8 +9,9 @@ import ( var ReplayCmd = &cobra.Command{ Use: "replay", Short: "Replay messages from WAL", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, false) + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(logger, config.BaseConfig, config.Consensus, false) + }, } @@ -19,7 +20,7 @@ var ReplayCmd = &cobra.Command{ var ReplayConsoleCmd = &cobra.Command{ Use: "replay-console", Short: "Replay messages from WAL in a console", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(logger, config.BaseConfig, config.Consensus, true) }, } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 315fe0d9a..75945d3dc 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -211,30 +211,26 @@ type Handshaker struct { nBlocks int // number of blocks applied to the state } -func NewHandshaker(stateStore sm.Store, state sm.State, - store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { +func NewHandshaker( + logger log.Logger, + stateStore sm.Store, + state sm.State, + store sm.BlockStore, + eventBus types.BlockEventPublisher, + genDoc *types.GenesisDoc, +) *Handshaker { return &Handshaker{ stateStore: stateStore, initialState: state, store: store, - eventBus: eventbus.NopEventBus{}, + eventBus: eventBus, genDoc: genDoc, - logger: log.NewNopLogger(), + logger: logger, nBlocks: 0, } } -func (h *Handshaker) SetLogger(l log.Logger) { - h.logger = l -} - -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { - h.eventBus = eventBus -} - // NBlocks returns the number of blocks applied to the state. func (h *Handshaker) NBlocks() int { return h.nBlocks diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index c03b234aa..8243e4f22 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -32,12 +32,22 @@ const ( // replay messages interactively or all at once // replay the wal file -func RunReplayFile(cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(cfg, csConfig) +func RunReplayFile( + logger log.Logger, + cfg config.BaseConfig, + csConfig *config.ConsensusConfig, + console bool, +) error { + consensusState, err := newConsensusStateForReplay(cfg, logger, csConfig) + if err != nil { + return err + } if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) + return fmt.Errorf("consensus replay: %w", err) } + + return nil } // Replay msgs in file or start the console @@ -293,28 +303,34 @@ func (pb *playback) replayConsoleLoop() int { //-------------------------------------------------------------------------------- // convenience for replay mode -func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.ConsensusConfig) *State { +func newConsensusStateForReplay( + cfg config.BaseConfig, + logger log.Logger, + csConfig *config.ConsensusConfig, +) (*State, error) { dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get State stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + stateStore := sm.NewStore(stateDB) gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + state, err := sm.MakeGenesisState(gdoc) if err != nil { - tmos.Exit(err.Error()) + return nil, err } // Create proxyAppConn connection (consensus, mempool, query) @@ -322,27 +338,26 @@ func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.Consensu proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err = proxyApp.Start() if err != nil { - tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + return nil, fmt.Errorf("starting proxy app conns: %w", err) } eventBus := eventbus.NewDefault() if err := eventBus.Start(); err != nil { - tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) + return nil, fmt.Errorf("failed to start event bus: %w", err) } - handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) - handshaker.SetEventBus(eventBus) - err = handshaker.Handshake(proxyApp) - if err != nil { - tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + handshaker := NewHandshaker(logger, stateStore, state, blockStore, eventBus, gdoc) + + if err = handshaker.Handshake(proxyApp); err != nil { + return nil, err } mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore) consensusState := NewState(csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) - return consensusState + return consensusState, nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 49b1b9092..a91d52b8a 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -25,6 +25,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -772,7 +773,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) - handshaker := NewHandshaker(stateStore, state, store, genDoc) + handshaker := NewHandshaker(log.TestingLogger(), stateStore, state, store, eventbus.NopEventBus{}, genDoc) proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) @@ -962,6 +963,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { blocks := sf.MakeBlocks(3, &state, privVal) store.chain = blocks + logger := log.TestingLogger() + // 2. Tendermint must panic if app returns wrong hash for the first block // - RANDOM HASH // - 0x02 @@ -979,7 +982,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { }) assert.Panics(t, func() { - h := NewHandshaker(stateStore, state, store, genDoc) + h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) if err = h.Handshake(proxyApp); err != nil { t.Log(err) } @@ -1003,7 +1006,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { }) assert.Panics(t, func() { - h := NewHandshaker(stateStore, state, store, genDoc) + h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) if err = h.Handshake(proxyApp); err != nil { t.Log(err) } @@ -1251,17 +1254,16 @@ func TestHandshakeUpdatesValidators(t *testing.T) { oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) - handshaker := NewHandshaker(stateStore, state, store, genDoc) + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) + + handshaker := NewHandshaker(log.TestingLogger(), stateStore, state, store, eventbus.NopEventBus{}, genDoc) proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { require.NoError(t, proxyApp.Stop()) }) + if err := handshaker.Handshake(proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } diff --git a/node/node.go b/node/node.go index fa703f1d6..3fe6ea08e 100644 --- a/node/node.go +++ b/node/node.go @@ -224,7 +224,10 @@ func makeNode(cfg *config.Config, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync tendermint with the app. if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, logger); err != nil { + if err := consensus.NewHandshaker( + logger.With("module", "handshaker"), + stateStore, state, blockStore, eventBus, genDoc, + ).Handshake(proxyApp); err != nil { return nil, combineCloseError(err, makeCloser(closers)) } diff --git a/node/setup.go b/node/setup.go index 8d5fb21b9..e254571e3 100644 --- a/node/setup.go +++ b/node/setup.go @@ -134,26 +134,6 @@ func createAndStartIndexerService( return indexerService, eventSinks, nil } -func doHandshake( - stateStore sm.Store, - state sm.State, - blockStore sm.BlockStore, - genDoc *types.GenesisDoc, - eventBus types.BlockEventPublisher, - proxyApp proxy.AppConns, - logger log.Logger, -) error { - - handshaker := consensus.NewHandshaker(stateStore, state, blockStore, genDoc) - handshaker.SetLogger(logger.With("module", "handshaker")) - handshaker.SetEventBus(eventBus) - - if err := handshaker.Handshake(proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) - } - return nil -} - func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger, mode string) { // Log the version info. logger.Info("Version info", From 2a455be46c2ee47a7e4fb2957eaa5544e4ac0099 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Mon, 15 Nov 2021 14:25:29 -0500 Subject: [PATCH 165/174] libs/os: remove arbitrary os.Exit (#7284) I think calling os.Exit at arbitrary points is _bad_ and is good to delete. I think panics in the case of data courruption have a chance of providing useful information. --- internal/consensus/replay_file.go | 23 +++++++++++++---------- internal/state/store.go | 13 ++++--------- libs/os/os.go | 5 ----- rpc/jsonrpc/test/main.go | 7 +++++-- 4 files changed, 22 insertions(+), 26 deletions(-) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 8243e4f22..1f5e8e375 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -18,7 +18,6 @@ import ( sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" ) @@ -92,7 +91,10 @@ func (cs *State) ReplayFile(file string, console bool) error { var msg *TimedWALMessage for { if nextN == 0 && console { - nextN = pb.replayConsoleLoop() + nextN, err = pb.replayConsoleLoop() + if err != nil { + return err + } } msg, err = pb.dec.Decode() @@ -194,16 +196,17 @@ func (cs *State) startForReplay() { }()*/ } -// console function for parsing input and running commands -func (pb *playback) replayConsoleLoop() int { +// console function for parsing input and running commands. The integer +// return value is invalid unless the error is nil. +func (pb *playback) replayConsoleLoop() (int, error) { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) line, more, err := bufReader.ReadLine() if more { - tmos.Exit("input is too long") + return 0, fmt.Errorf("input is too long") } else if err != nil { - tmos.Exit(err.Error()) + return 0, err } tokens := strings.Split(string(line), " ") @@ -217,13 +220,13 @@ func (pb *playback) replayConsoleLoop() int { // "next N" -> replay next N messages if len(tokens) == 1 { - return 0 + return 0, nil } i, err := strconv.Atoi(tokens[1]) if err != nil { fmt.Println("next takes an integer argument") } else { - return i + return i, nil } case "back": @@ -233,7 +236,7 @@ func (pb *playback) replayConsoleLoop() int { // NOTE: "back" is not supported in the state machine design, // so we restart and replay up to - ctx := context.Background() + ctx := context.TODO() // ensure all new step events are regenerated as expected newStepSub, err := pb.cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ @@ -241,7 +244,7 @@ func (pb *playback) replayConsoleLoop() int { Query: types.EventQueryNewRoundStep, }) if err != nil { - tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + return 0, fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } defer func() { args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} diff --git a/internal/state/store.go b/internal/state/store.go index aff165aa1..0f1d2b444 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -11,7 +11,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" - tmos "github.com/tendermint/tendermint/libs/os" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -126,8 +125,7 @@ func (store dbStore) loadState(key []byte) (state State, err error) { err = proto.Unmarshal(buf, sp) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } sm, err := FromProto(sp) @@ -424,8 +422,7 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er err = abciResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has - changed: %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -544,8 +541,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error err = v.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -632,8 +628,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa paramsInfo := new(tmstate.ConsensusParamsInfo) if err = paramsInfo.Unmarshal(buf); err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf(`data has been corrupted or its spec has changed: %+v`, err)) } // TODO: ensure that buf is completely read. diff --git a/libs/os/os.go b/libs/os/os.go index f4b0f1810..02b98c52a 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -29,11 +29,6 @@ func TrapSignal(logger logger, cb func()) { }() } -func Exit(s string) { - fmt.Printf(s + "\n") - os.Exit(1) -} - // EnsureDir ensures the given directory exists, creating it if necessary. // Errors if the path already exists as a non-directory. func EnsureDir(dir string, mode os.FileMode) error { diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index d348e1639..64f9de87a 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -3,6 +3,7 @@ package main import ( "fmt" "net/http" + "os" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" @@ -35,10 +36,12 @@ func main() { config := rpcserver.DefaultConfig() listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { - tmos.Exit(err.Error()) + logger.Error("rpc listening", "err", err) + os.Exit(1) } if err = rpcserver.Serve(listener, mux, logger, config); err != nil { - tmos.Exit(err.Error()) + logger.Error("rpc serve", "err", err) + os.Exit(1) } } From d5df412b26c7dfbffbedfbfb3085cc51a1582640 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Mon, 15 Nov 2021 15:06:15 -0500 Subject: [PATCH 166/174] proto: update the mechanism for generating protos from spec repo (#7269) This pull request updates the `protocgen.sh` script to insert the `go_package` option to all of the downloaded proto files. A related pull request into the spec repo removes this options from the .proto files: https://github.com/tendermint/spec/pull/358 This pull requests, along with the related spec PR, aim to move the creation of the `tendermintdev/docker-build-proto` container into the spec repo. This change also relies on several fixes to that container that are made in the PR into the spec repo. --- .github/workflows/proto-docker.yml | 51 --------------------------- Makefile | 12 +++---- buf.gen.yaml | 13 ------- buf.yaml | 16 --------- scripts/protocgen.sh | 55 ++++++++++++++++-------------- scripts/protopackage.sh | 23 +++++++++++++ tools/proto/Dockerfile | 27 --------------- 7 files changed, 59 insertions(+), 138 deletions(-) delete mode 100644 .github/workflows/proto-docker.yml delete mode 100644 buf.gen.yaml delete mode 100644 buf.yaml create mode 100755 scripts/protopackage.sh delete mode 100644 tools/proto/Dockerfile diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml deleted file mode 100644 index 3f98a4566..000000000 --- a/.github/workflows/proto-docker.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Build & Push TM Proto Builder -on: - pull_request: - paths: - - "tools/proto/*" - push: - branches: - - master - paths: - - "tools/proto/*" - schedule: - # run this job once a month to recieve any go or buf updates - - cron: "* * 1 * *" - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.4.0 - - name: Prepare - id: prep - run: | - DOCKER_IMAGE=tendermintdev/docker-build-proto - VERSION=noop - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF#refs/tags/} - elif [[ $GITHUB_REF == refs/heads/* ]]; then - VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') - if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then - VERSION=latest - fi - fi - TAGS="${DOCKER_IMAGE}:${VERSION}" - echo ::set-output name=tags::${TAGS} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1.6.0 - - - name: Login to DockerHub - uses: docker/login-action@v1.10.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Publish to Docker Hub - uses: docker/build-push-action@v2.7.0 - with: - context: ./tools/proto - file: ./tools/proto/Dockerfile - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.prep.outputs.tags }} diff --git a/Makefile b/Makefile index 2bac7f5bf..353b8a5d2 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ endif LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" HTTPS_GIT := https://github.com/tendermint/tendermint.git -DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf +DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto CGO_ENABLED ?= 0 # handle nostrip @@ -85,24 +85,24 @@ proto-all: proto-gen proto-lint proto-check-breaking proto-gen: @docker pull -q tendermintdev/docker-build-proto @echo "Generating Protobuf files" - @docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh + @$(DOCKER_PROTO_BUILDER) sh ./scripts/protocgen.sh .PHONY: proto-gen proto-lint: - @$(DOCKER_BUF) lint --error-format=json + @$(DOCKER_PROTO_BUILDER) buf lint --error-format=json .PHONY: proto-lint proto-format: @echo "Formatting Protobuf files" - docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; + @$(DOCKER_PROTO_BUILDER) find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; .PHONY: proto-format proto-check-breaking: - @$(DOCKER_BUF) breaking --against .git#branch=master + @$(DOCKER_PROTO_BUILDER) buf breaking --against .git#branch=master .PHONY: proto-check-breaking proto-check-breaking-ci: - @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master + @$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT)#branch=master .PHONY: proto-check-breaking-ci ############################################################################### diff --git a/buf.gen.yaml b/buf.gen.yaml deleted file mode 100644 index dc56781dd..000000000 --- a/buf.gen.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# The version of the generation template. -# Required. -# The only currently-valid value is v1beta1. -version: v1beta1 - -# The plugins to run. -plugins: - # The name of the plugin. - - name: gogofaster - # The the relative output directory. - out: proto - # Any options to provide to the plugin. - opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative diff --git a/buf.yaml b/buf.yaml deleted file mode 100644 index cc4aced57..000000000 --- a/buf.yaml +++ /dev/null @@ -1,16 +0,0 @@ -version: v1beta1 - -build: - roots: - - proto - - third_party/proto -lint: - use: - - BASIC - - FILE_LOWER_SNAKE_CASE - - UNARY_RPC - ignore: - - gogoproto -breaking: - use: - - FILE diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 240570d61..8e121448b 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -1,39 +1,44 @@ #!/usr/bin/env bash +set -euo pipefail -set -eo pipefail +# By default, this script runs against the latest commit to the master branch +# in the Tendermint spec repository. To use this script with a different version +# of the spec repository, run it with the $VERS environment variable set to the +# desired branch name or commit hash from the spec repo. : ${VERS:=master} -URL_PATH=archive/ -if [[ VERS -ne master ]]; then - URL_PATH=archive/refs/tags/v -fi -# Edit this line to clone your branch, if you are modifying protobuf files -curl -qL "https://github.com/tendermint/spec/${URL_PATH}${VERS}.tar.gz" | tar -xjf - spec-"$VERS"/proto/ +echo "fetching proto files" -cp -r ./spec-"$VERS"/proto/tendermint/** ./proto/tendermint +# Get shortened ref of commit +REF=$(curl -H "Accept: application/vnd.github.v3.sha" -qL \ + "https://api.github.com/repos/tendermint/spec/commits/${VERS}" \ + | cut -c -7) -buf generate --path proto/tendermint +readonly OUTDIR="tendermint-spec-${REF}" +curl -qL "https://api.github.com/repos/tendermint/spec/tarball/${REF}" | tar -xzf - ${OUTDIR}/ + +cp -r ${OUTDIR}/proto/tendermint/* ./proto/tendermint +cp -r ${OUTDIR}/third_party/** ./third_party + +MODNAME="$(go list -m)" +find ./proto/tendermint -name '*.proto' -not -path "./proto/tendermint/abci/types.proto" \ + -exec sh ./scripts/protopackage.sh {} "$MODNAME" ';' + +# For historical compatibility, the abci file needs to get a slightly different import name +# so that it can be moved into the ./abci/types directory. +sh ./scripts/protopackage.sh ./proto/tendermint/abci/types.proto $MODNAME "abci/types" + +buf generate --path proto/tendermint --template ./${OUTDIR}/buf.gen.yaml --config ./${OUTDIR}/buf.yaml mv ./proto/tendermint/abci/types.pb.go ./abci/types -echo "proto files have been generated" +echo "proto files have been compiled" echo "removing copied files" -rm -rf ./proto/tendermint/abci -rm -rf ./proto/tendermint/blocksync/types.proto -rm -rf ./proto/tendermint/consensus/types.proto -rm -rf ./proto/tendermint/mempool/types.proto -rm -rf ./proto/tendermint/p2p/types.proto -rm -rf ./proto/tendermint/p2p/conn.proto -rm -rf ./proto/tendermint/p2p/pex.proto -rm -rf ./proto/tendermint/statesync/types.proto -rm -rf ./proto/tendermint/types/block.proto -rm -rf ./proto/tendermint/types/evidence.proto -rm -rf ./proto/tendermint/types/params.proto -rm -rf ./proto/tendermint/types/types.proto -rm -rf ./proto/tendermint/types/validator.proto -rm -rf ./proto/tendermint/version/types.proto +find ${OUTDIR}/proto/tendermint/ -name *.proto \ + | sed "s/$OUTDIR\/\(.*\)/\1/g" \ + | xargs -I {} rm {} -rm -rf ./spec-"$VERS" +rm -rf ${OUTDIR} diff --git a/scripts/protopackage.sh b/scripts/protopackage.sh new file mode 100755 index 000000000..a69e758ca --- /dev/null +++ b/scripts/protopackage.sh @@ -0,0 +1,23 @@ +#!/usr/bin/sh +set -eo pipefail + +# This script appends the "option go_package" proto option to the file located at $FNAME. +# This option specifies what the package will be named when imported by other packages. +# This option is not directly included in the proto files to allow the files to more easily +# be hosted in github.com/tendermint/spec and shared between other repos. +# If the option is already specified in the file, it will be replaced using the +# arguments passed to this script. + +FNAME="${1:?missing required .proto filename}" +MODNAME=$(echo $2| sed 's/\//\\\//g') +PACKAGE="$(dirname $FNAME | sed 's/^\.\/\(.*\)/\1/g' | sed 's/\//\\\//g')" +if [[ ! -z "$3" ]]; then + PACKAGE="$(echo $3 | sed 's/\//\\\//g')" +fi + + +if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then + sed -i "s/\(package tendermint.*\)/\1\n\noption go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME +else + sed -i "s/option\s\+go_package\s\+=\s\+.*;/option go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME +fi diff --git a/tools/proto/Dockerfile b/tools/proto/Dockerfile deleted file mode 100644 index 500822690..000000000 --- a/tools/proto/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM bufbuild/buf:latest as buf - -FROM golang:1.14-alpine3.11 as builder - -RUN apk add --update --no-cache build-base curl git upx && \ - rm -rf /var/cache/apk/* - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGO_PROTOBUF_VERSION=1.3.2 - -RUN GO111MODULE=on go get \ - github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogo@v${GOGO_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogofaster@v${GOGO_PROTOBUF_VERSION} && \ - mv /go/bin/protoc-gen-go* /usr/local/bin/ - - -FROM alpine:edge - -WORKDIR /work - -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories && \ - apk add --update --no-cache clang && \ - rm -rf /var/cache/apk/* - -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --from=buf /usr/local/bin /usr/local/bin From dbac109d01108a043d511814d72bd19a9685ab21 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 16 Nov 2021 06:31:10 -0800 Subject: [PATCH 167/174] docs: clarify where doc site config settings must land (#7289) Since the doc site is built from the backport branches, the config changes for a new major release also need to be replicated into the backport branch as well as master. Update the release docs to mention that specifically, since I missed it during the v0.35 release. --- RELEASES.md | 2 ++ docs/DOCS_README.md | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index b2c5ef6d5..a7f862e33 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -146,6 +146,8 @@ If there were no release candidates, begin by creating a backport branch, as des - Add a new entry to `themeConfig.versions` in [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the release in the dropdown versions menu. + - Commit these changes to `master` and backport them into the backport + branch for this release. ## Minor release (point releases) diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index c1ab1580a..da06785d5 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -11,9 +11,9 @@ and other supported release branches. There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml) in the `tendermint/docs` repository that clones and builds the documentation -site from the contents of this `docs` directory, for `master` and for each -supported release branch. Under the hood, this workflow runs `make build-docs` -from the [Makefile](../Makefile#L214). +site from the contents of this `docs` directory, for `master` and for the +backport branch of each supported release. Under the hood, this workflow runs +`make build-docs` from the [Makefile](../Makefile#L214). The list of supported versions are defined in [`config.js`](./.vuepress/config.js), which defines the UI menu on the documentation site, and also in From d7606777cf5a8304c41fd8c77951658ff04ef3c5 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Tue, 16 Nov 2021 11:20:56 -0500 Subject: [PATCH 168/174] libs/service: pass logger explicitly (#7288) This is a very small change, but removes a method from the `service.Service` interface (a win!) and forces callers to explicitly pass loggers in to objects during construction rather than (later) injecting them. There's not a real need for this kind of lazy construction of loggers, and I think a decent potential for confusion for mutable loggers. The main concern I have is that this changes the constructor API for ABCI clients. I think this is fine, and I suspect that as we plumb contexts through, and make changes to the RPC services there'll be a number of similar sorts of changes to various (quasi) public interfaces, which I think we should welcome. --- CHANGELOG_PENDING.md | 2 + abci/client/client.go | 7 +- abci/client/creators.go | 11 +-- abci/client/grpc_client.go | 5 +- abci/client/socket_client.go | 5 +- abci/client/socket_client_test.go | 20 +++-- abci/cmd/abci-cli/abci-cli.go | 7 +- abci/example/example_test.go | 18 +++-- abci/example/kvstore/kvstore_test.go | 14 ++-- abci/server/grpc_server.go | 5 +- abci/server/server.go | 7 +- abci/server/socket_server.go | 14 +--- abci/tests/client_server_test.go | 7 +- internal/blocksync/pool.go | 10 ++- internal/blocksync/pool_test.go | 9 +-- internal/blocksync/reactor.go | 2 +- internal/blocksync/reactor_test.go | 4 +- internal/consensus/byzantine_test.go | 7 +- internal/consensus/common_test.go | 30 ++++---- internal/consensus/invalid_test.go | 3 +- internal/consensus/mempool_test.go | 16 ++-- internal/consensus/reactor_test.go | 11 +-- internal/consensus/replay.go | 2 +- internal/consensus/replay_file.go | 10 +-- internal/consensus/replay_stubs.go | 5 +- internal/consensus/replay_test.go | 33 ++++---- internal/consensus/state.go | 15 +--- internal/consensus/state_test.go | 75 ++++++++++--------- internal/consensus/ticker.go | 6 +- internal/consensus/wal.go | 9 +-- internal/consensus/wal_generator.go | 9 +-- internal/consensus/wal_test.go | 12 ++- internal/eventbus/event_bus.go | 15 ++-- internal/eventbus/event_bus_test.go | 15 ++-- internal/inspect/inspect.go | 24 +++--- internal/mempool/mempool_test.go | 6 +- internal/p2p/conn/connection.go | 19 ++--- internal/p2p/conn/connection_test.go | 50 ++++++------- internal/p2p/transport_mconn.go | 2 +- internal/p2p/trust/store.go | 5 +- internal/p2p/trust/store_test.go | 24 +++--- internal/proxy/app_conn_test.go | 30 ++++---- internal/proxy/client.go | 5 +- internal/proxy/multi_app_conn.go | 17 +++-- internal/proxy/multi_app_conn_test.go | 10 +-- internal/state/execution_test.go | 19 +++-- internal/state/helpers_test.go | 3 +- internal/state/indexer/indexer_service.go | 9 --- .../state/indexer/indexer_service_test.go | 15 +++- libs/pubsub/pubsub.go | 2 +- libs/pubsub/pubsub_test.go | 12 ++- libs/service/service.go | 8 -- node/node.go | 2 +- node/node_test.go | 6 +- node/setup.go | 11 +-- test/e2e/node/main.go | 2 +- test/fuzz/mempool/checktx.go | 2 +- 57 files changed, 347 insertions(+), 356 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 08f53c6e0..cb1a79cac 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -32,6 +32,8 @@ Special thanks to external contributors on this release: - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) + - [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychosih) + - Blockchain Protocol diff --git a/abci/client/client.go b/abci/client/client.go index a38c7f81b..1f0017557 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -68,12 +69,12 @@ type Client interface { // NewClient returns a new ABCI client of the specified transport type. // It returns an error if the transport is not "socket" or "grpc" -func NewClient(addr, transport string, mustConnect bool) (client Client, err error) { +func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) { switch transport { case "socket": - client = NewSocketClient(addr, mustConnect) + client = NewSocketClient(logger, addr, mustConnect) case "grpc": - client = NewGRPCClient(addr, mustConnect) + client = NewGRPCClient(logger, addr, mustConnect) default: err = fmt.Errorf("unknown abci transport %s", transport) } diff --git a/abci/client/creators.go b/abci/client/creators.go index e17b15eca..7cabb2e43 100644 --- a/abci/client/creators.go +++ b/abci/client/creators.go @@ -5,17 +5,18 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" ) // Creator creates new ABCI clients. -type Creator func() (Client, error) +type Creator func(log.Logger) (Client, error) // NewLocalCreator returns a Creator for the given app, // which will be running locally. func NewLocalCreator(app types.Application) Creator { mtx := new(tmsync.Mutex) - return func() (Client, error) { + return func(_ log.Logger) (Client, error) { return NewLocalClient(mtx, app), nil } } @@ -23,9 +24,9 @@ func NewLocalCreator(app types.Application) Creator { // NewRemoteCreator returns a Creator for the given address (e.g. // "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you // want the client to connect before reporting success. -func NewRemoteCreator(addr, transport string, mustConnect bool) Creator { - return func() (Client, error) { - remoteApp, err := NewClient(addr, transport, mustConnect) +func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator { + return func(log.Logger) (Client, error) { + remoteApp, err := NewClient(logger, addr, transport, mustConnect) if err != nil { return nil, fmt.Errorf("failed to connect to proxy: %w", err) } diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index f1123fab5..f4cd5f3e9 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -42,7 +43,7 @@ var _ Client = (*grpcClient)(nil) // which is expensive, but easy - if you want something better, use the socket // protocol! maybe one day, if people really want it, we use grpc streams, but // hopefully not :D -func NewGRPCClient(addr string, mustConnect bool) Client { +func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &grpcClient{ addr: addr, mustConnect: mustConnect, @@ -54,7 +55,7 @@ func NewGRPCClient(addr string, mustConnect bool) Client { // gRPC calls while processing a slow callback at the channel head. chReqRes: make(chan *ReqRes, 64), } - cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli) + cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli) return cli } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 726c554d4..00e981123 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -13,6 +13,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -50,7 +51,7 @@ var _ Client = (*socketClient)(nil) // NewSocketClient creates a new socket client, which connects to a given // address. If mustConnect is true, the client will return an error upon start // if it fails to connect. -func NewSocketClient(addr string, mustConnect bool) Client { +func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &socketClient{ reqQueue: make(chan *reqResWithContext, reqQueueSize), mustConnect: mustConnect, @@ -59,7 +60,7 @@ func NewSocketClient(addr string, mustConnect bool) Client { reqSent: list.New(), resCb: nil, } - cli.BaseService = *service.NewBaseService(nil, "socketClient", cli) + cli.BaseService = *service.NewBaseService(logger, "socketClient", cli) return cli } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 53ba7b672..0e2fdffa3 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -14,6 +14,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -21,8 +22,9 @@ var ctx = context.Background() func TestProperSyncCalls(t *testing.T) { app := slowApp{} + logger := log.TestingLogger() - s, c := setupClientServer(t, app) + s, c := setupClientServer(t, logger, app) t.Cleanup(func() { if err := s.Stop(); err != nil { t.Error(err) @@ -57,8 +59,9 @@ func TestProperSyncCalls(t *testing.T) { func TestHangingSyncCalls(t *testing.T) { app := slowApp{} + logger := log.TestingLogger() - s, c := setupClientServer(t, app) + s, c := setupClientServer(t, logger, app) t.Cleanup(func() { if err := s.Stop(); err != nil { t.Log(err) @@ -99,18 +102,23 @@ func TestHangingSyncCalls(t *testing.T) { } } -func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abciclient.Client) { +func setupClientServer( + t *testing.T, + logger log.Logger, + app types.Application, +) (service.Service, abciclient.Client) { + t.Helper() + // some port between 20k and 30k port := 20000 + rand.Int31()%10000 addr := fmt.Sprintf("localhost:%d", port) - s, err := server.NewServer(addr, "socket", app) + s, err := server.NewServer(logger, addr, "socket", app) require.NoError(t, err) err = s.Start() require.NoError(t, err) - c := abciclient.NewSocketClient(addr, true) + c := abciclient.NewSocketClient(logger, addr, true) err = c.Start() require.NoError(t, err) diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 9fae6fc05..3ffc7dbfa 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -67,11 +67,10 @@ var RootCmd = &cobra.Command{ if client == nil { var err error - client, err = abciclient.NewClient(flagAddress, flagAbci, false) + client, err = abciclient.NewClient(logger.With("module", "abci-client"), flagAddress, flagAbci, false) if err != nil { return err } - client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { return err } @@ -586,11 +585,11 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { } // Start the listener - srv, err := server.NewServer(flagAddress, flagAbci, app) + srv, err := server.NewServer(logger.With("module", "abci-server"), flagAddress, flagAbci, app) if err != nil { return err } - srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { return err } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 64705d0e0..c984fa2fb 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -44,14 +44,16 @@ func TestGRPC(t *testing.T) { } func testStream(t *testing.T, app types.Application) { + t.Helper() + const numDeliverTxs = 20000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) - + logger := log.TestingLogger() // Start the listener - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + err := server.Start() require.NoError(t, err) t.Cleanup(func() { @@ -61,8 +63,8 @@ func testStream(t *testing.T, app types.Application) { }) // Connect to the socket - client := abciclient.NewSocketClient(socket, false) - client.SetLogger(log.TestingLogger().With("module", "abci-client")) + client := abciclient.NewSocketClient(log.TestingLogger().With("module", "abci-client"), socket, false) + err = client.Start() require.NoError(t, err) t.Cleanup(func() { @@ -132,10 +134,10 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) - + logger := log.TestingLogger() // Start the listener - server := abciserver.NewGRPCServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) + if err := server.Start(); err != nil { t.Fatalf("Error starting GRPC server: %v", err.Error()) } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 7c89db1ad..e64e0ed9e 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -234,15 +234,13 @@ func makeSocketClientServer(app types.Application, name string) (abciclient.Clie socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(logger.With("module", "abci-server")) + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) if err := server.Start(); err != nil { return nil, nil, err } // Connect to the socket - client := abciclient.NewSocketClient(socket, false) - client.SetLogger(logger.With("module", "abci-client")) + client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) if err := client.Start(); err != nil { if err = server.Stop(); err != nil { return nil, nil, err @@ -259,14 +257,14 @@ func makeGRPCClientServer(app types.Application, name string) (abciclient.Client logger := log.TestingLogger() gapp := types.NewGRPCApplication(app) - server := abciserver.NewGRPCServer(socket, gapp) - server.SetLogger(logger.With("module", "abci-server")) + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp) + if err := server.Start(); err != nil { return nil, nil, err } - client := abciclient.NewGRPCClient(socket, true) - client.SetLogger(logger.With("module", "abci-client")) + client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true) + if err := client.Start(); err != nil { if err := server.Stop(); err != nil { return nil, nil, err diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 503f0b64f..6d22f43c1 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -6,6 +6,7 @@ import ( "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -22,7 +23,7 @@ type GRPCServer struct { } // NewGRPCServer returns a new gRPC ABCI server -func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service { +func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &GRPCServer{ proto: proto, @@ -30,7 +31,7 @@ func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Se listener: nil, app: app, } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad02..2a6d50fd2 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -12,17 +12,18 @@ import ( "fmt" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) { +func NewServer(logger log.Logger, protoAddr, transport string, app types.Application) (service.Service, error) { var s service.Service var err error switch transport { case "socket": - s = NewSocketServer(protoAddr, app) + s = NewSocketServer(logger, protoAddr, app) case "grpc": - s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app)) + s = NewGRPCServer(logger, protoAddr, types.NewGRPCApplication(app)) default: err = fmt.Errorf("unknown server type %s", transport) } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 85539645b..b24b38e38 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net" - "os" "runtime" "github.com/tendermint/tendermint/abci/types" @@ -19,7 +18,6 @@ import ( type SocketServer struct { service.BaseService - isLoggerSet bool proto string addr string @@ -33,7 +31,7 @@ type SocketServer struct { app types.Application } -func NewSocketServer(protoAddr string, app types.Application) service.Service { +func NewSocketServer(logger tmlog.Logger, protoAddr string, app types.Application) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &SocketServer{ proto: proto, @@ -42,15 +40,10 @@ func NewSocketServer(protoAddr string, app types.Application) service.Service { app: app, conns: make(map[int]net.Conn), } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } -func (s *SocketServer) SetLogger(l tmlog.Logger) { - s.BaseService.SetLogger(l) - s.isLoggerSet = true -} - func (s *SocketServer) OnStart() error { ln, err := net.Listen(s.proto, s.addr) if err != nil { @@ -164,9 +157,6 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] err := fmt.Errorf("recovered from panic: %v\n%s", r, buf) - if !s.isLoggerSet { - fmt.Fprintln(os.Stderr, err) - } closeConn <- err s.appMtx.Unlock() } diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 62dc6e07e..6b4750b33 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -8,6 +8,7 @@ import ( abciclientent "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/libs/log" ) func TestClientServerNoAddrPrefix(t *testing.T) { @@ -15,12 +16,14 @@ func TestClientServerNoAddrPrefix(t *testing.T) { transport := "socket" app := kvstore.NewApplication() - server, err := abciserver.NewServer(addr, transport, app) + logger := log.TestingLogger() + + server, err := abciserver.NewServer(logger, addr, transport, app) assert.NoError(t, err, "expected no error on NewServer") err = server.Start() assert.NoError(t, err, "expected no error on server.Start") - client, err := abciclientent.NewClient(addr, transport, true) + client, err := abciclientent.NewClient(logger, addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") err = client.Start() assert.NoError(t, err, "expected no error on client.Start") diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index 8117fe2d4..66ed24a79 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -91,7 +91,13 @@ type BlockPool struct { // NewBlockPool returns a new BlockPool with the height equal to start. Block // requests and errors will be sent to requestsCh and errorsCh accordingly. -func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { +func NewBlockPool( + logger log.Logger, + start int64, + requestsCh chan<- BlockRequest, + errorsCh chan<- peerError, +) *BlockPool { + bp := &BlockPool{ peers: make(map[types.NodeID]*bpPeer), @@ -104,7 +110,7 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p errorsCh: errorsCh, lastSyncRate: 0, } - bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp) + bp.BaseService = *service.NewBaseService(logger, "BlockPool", bp) return bp } diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index cbe19acbe..b53699f97 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -82,8 +82,7 @@ func TestBlockPoolBasic(t *testing.T) { peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) + pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) err := pool.Start() if err != nil { @@ -142,8 +141,7 @@ func TestBlockPoolTimeout(t *testing.T) { peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) + pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) err := pool.Start() if err != nil { t.Error(err) @@ -210,8 +208,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) - pool := NewBlockPool(1, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) + pool := NewBlockPool(log.TestingLogger(), 1, requestsCh, errorsCh) err := pool.Start() require.NoError(t, err) t.Cleanup(func() { diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 43c3e83cd..f18ed86b7 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -127,7 +127,7 @@ func NewReactor( initialState: state, blockExec: blockExec, store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), + pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh), consReactor: consReactor, blockSync: tmsync.NewBool(blockSync), requestsCh: requestsCh, diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 78e61020c..2b567aae7 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -97,8 +97,10 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) { t.Helper() + logger := log.TestingLogger() + rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics()) + rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics()) require.NoError(t, rts.app[nodeID].Start()) blockDB := dbm.NewMemDB() diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 59ab56bbb..df11067b9 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -87,20 +87,17 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Make State blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(cs.Logger) + cs := NewState(logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) // set private validator pv := privVals[i] cs.SetPrivValidator(pv) - eventBus := eventbus.NewDefault() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) err = eventBus.Start() require.NoError(t, err) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger) states[i] = cs }() diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 8b25a2af7..780ec8804 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -389,25 +389,28 @@ func subscribeToVoter(t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Mess //------------------------------------------------------------------------------- // consensus states -func newState(state sm.State, pv types.PrivValidator, app abci.Application) (*State, error) { +func newState(logger log.Logger, state sm.State, pv types.PrivValidator, app abci.Application) (*State, error) { cfg, err := config.ResetTestRoot("consensus_state_test") if err != nil { return nil, err } - return newStateWithConfig(cfg, state, pv, app), nil + + return newStateWithConfig(logger, cfg, state, pv, app), nil } func newStateWithConfig( + logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, ) *State { blockStore := store.NewBlockStore(dbm.NewMemDB()) - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore) + return newStateWithConfigAndBlockStore(logger, thisConfig, state, pv, app, blockStore) } func newStateWithConfigAndBlockStore( + logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, @@ -422,7 +425,7 @@ func newStateWithConfigAndBlockStore( // Make Mempool mempool := mempool.NewTxMempool( - log.TestingLogger().With("module", "mempool"), + logger.With("module", "mempool"), thisConfig.Mempool, proxyAppConnMem, 0, @@ -441,13 +444,11 @@ func newStateWithConfigAndBlockStore( panic(err) } - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore) + cs := NewState(logger.With("module", "consensus"), thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetPrivValidator(pv) - eventBus := eventbus.NewDefault() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) + eventBus := eventbus.NewDefault(logger.With("module", "events")) err := eventBus.Start() if err != nil { panic(err) @@ -468,13 +469,13 @@ func loadPrivValidator(cfg *config.Config) *privval.FilePV { return privValidator } -func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub, error) { +func randState(cfg *config.Config, logger log.Logger, nValidators int) (*State, []*validatorStub, error) { // Get State state, privVals := randGenesisState(cfg, nValidators, false, 10) vss := make([]*validatorStub, nValidators) - cs, err := newState(state, privVals[0], kvstore.NewApplication()) + cs, err := newState(logger, state, privVals[0], kvstore.NewApplication()) if err != nil { return nil, nil, err } @@ -759,9 +760,9 @@ func randConsensusState( vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore) + l := logger.With("validator", i, "module", "consensus") + css[i] = newStateWithConfigAndBlockStore(l, thisConfig, state, privVals[i], app, blockStore) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, func() { @@ -829,9 +830,8 @@ func randConsensusNetWithPeers( app.InitChain(abci.RequestInitChain{Validators: vals}) // sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above - css[i] = newStateWithConfig(thisConfig, state, privVal, app) + css[i] = newStateWithConfig(logger.With("validator", i, "module", "consensus"), thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, genDoc, peer0Config, func() { for _, dir := range configRootDirs { diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index f06692b77..fae89f227 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -26,8 +26,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { t.Cleanup(cleanup) for i := 0; i < 4; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) + ticker := NewTimeoutTicker(states[i].Logger) states[i].SetTimeoutTicker(ticker) } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index b31e1d901..78f42f993 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -34,7 +35,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) @@ -57,7 +58,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() @@ -78,7 +79,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) @@ -124,11 +125,14 @@ func deliverTxsRange(cs *State, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { config := configSetup(t) - + logger := log.TestingLogger() state, privVals := randGenesisState(config, 1, false, 10) stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) + + cs := newStateWithConfigAndBlockStore( + logger, config, state, privVals[0], NewCounterApplication(), blockStore) + err := stateStore.Save(state) require.NoError(t, err) newBlockHeaderCh := subscribe(t, cs.eventBus, types.EventQueryNewBlockHeader) @@ -155,7 +159,7 @@ func TestMempoolRmBadTx(t *testing.T) { app := NewCounterApplication() stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore) + cs := newStateWithConfigAndBlockStore(log.TestingLogger(), config, state, privVals[0], app, blockStore) err := stateStore.Save(state) require.NoError(t, err) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 3d5168da7..8fc562b06 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -411,18 +411,15 @@ func TestReactorWithEvidence(t *testing.T) { evpool2 := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) + cs := NewState(logger.With("validator", i, "module", "consensus"), + thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) cs.SetPrivValidator(pv) - eventBus := eventbus.NewDefault() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() - require.NoError(t, err) + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) + require.NoError(t, eventBus.Start()) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger.With("validator", i, "module", "consensus")) states[i] = cs } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 75945d3dc..563e5ca64 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -421,7 +421,7 @@ func (h *Handshaker) ReplayBlocks( if err != nil { return nil, err } - mockApp := newMockProxyApp(appHash, abciResponses) + mockApp := newMockProxyApp(h.logger, appHash, abciResponses) h.logger.Info("Replay last block using mock app") state, err = h.replayBlock(state, storeBlockHeight, mockApp) return state.AppHash, err diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 1f5e8e375..6f1e64b2a 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -147,7 +147,7 @@ func (pb *playback) replayReset(count int, newStepSub eventbus.Subscription) err } pb.cs.Wait() - newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, + newCS := NewState(pb.cs.Logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool) newCS.SetEventBus(pb.cs.eventBus) newCS.startForReplay() @@ -337,14 +337,14 @@ func newConsensusStateForReplay( } // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) + clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) err = proxyApp.Start() if err != nil { return nil, fmt.Errorf("starting proxy app conns: %w", err) } - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(logger) if err := eventBus.Start(); err != nil { return nil, fmt.Errorf("failed to start event bus: %w", err) } @@ -358,7 +358,7 @@ func newConsensusStateForReplay( mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(csConfig, state.Copy(), blockExec, + consensusState := NewState(logger, csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 1235baccb..679aba611 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -54,12 +55,12 @@ func (emptyMempool) CloseWAL() {} // Useful because we don't want to call Commit() twice for the same block on // the real app. -func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { +func newMockProxyApp(logger log.Logger, appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ appHash: appHash, abciResponses: abciResponses, }) - cli, _ := clientCreator() + cli, _ := clientCreator(logger) err := cli.Start() if err != nil { panic(err) diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index a91d52b8a..f5f5d1633 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -64,13 +64,13 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Co privValidator := loadPrivValidator(consensusReplayConfig) blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) bytes, _ := os.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) @@ -164,13 +164,13 @@ LOOP: require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) cs := newStateWithConfigAndBlockStore( + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) // start sending transactions ctx, cancel := context.WithCancel(context.Background()) @@ -639,7 +639,7 @@ func TestMockProxyApp(t *testing.T) { err = proto.Unmarshal(bytes, loadedAbciRes) require.NoError(t, err) - mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) + mock := newMockProxyApp(logger, []byte("mock_hash"), loadedAbciRes) abciRes := new(tmstate.ABCIResponses) abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) @@ -696,6 +696,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod cfg := sim.Config + logger := log.TestingLogger() if testValidatorsChange { testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) require.NoError(t, err) @@ -719,9 +720,8 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) - wal, err := NewWAL(walFile) + wal, err := NewWAL(logger, walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) err = wal.Start() require.NoError(t, err) t.Cleanup(func() { @@ -742,7 +742,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(cfg, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) + state = buildTMStateFromChain(cfg, logger, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) latestAppHash := state.AppHash // make a new client creator @@ -754,7 +754,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) stateDB1 := dbm.NewMemDB() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) @@ -773,8 +773,8 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) - handshaker := NewHandshaker(log.TestingLogger(), stateStore, state, store, eventbus.NopEventBus{}, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) + handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } @@ -888,6 +888,7 @@ func buildAppStateFromChain( func buildTMStateFromChain( cfg *config.Config, + logger log.Logger, mempool mempool.Mempool, evpool sm.EvidencePool, stateStore sm.Store, @@ -895,14 +896,15 @@ func buildTMStateFromChain( chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) sm.State { + blockStore *mockBlockStore, +) sm.State { // run the whole chain against this client to build up the tendermint state kvstoreApp := kvstore.NewPersistentKVStoreApplication( filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() clientCreator := abciclient.NewLocalCreator(kvstoreApp) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { panic(err) } @@ -972,7 +974,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { { app := &badApp{numBlocks: 3, allHashesAreWrong: true} clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) t.Cleanup(func() { @@ -996,7 +998,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) t.Cleanup(func() { @@ -1257,8 +1259,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - handshaker := NewHandshaker(log.TestingLogger(), stateStore, state, store, eventbus.NopEventBus{}, genDoc) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) + logger := log.TestingLogger() + handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 6ebc54086..6cef4ac8f 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -153,6 +153,7 @@ type StateOption func(*State) // NewState returns a new State. func NewState( + logger log.Logger, cfg *config.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, @@ -168,7 +169,7 @@ func NewState( txNotifier: txNotifier, peerMsgQueue: make(chan msgInfo, msgQueueSize), internalMsgQueue: make(chan msgInfo, msgQueueSize), - timeoutTicker: NewTimeoutTicker(), + timeoutTicker: NewTimeoutTicker(logger), statsMsgQueue: make(chan msgInfo, msgQueueSize), done: make(chan struct{}), doWALCatchup: true, @@ -193,7 +194,7 @@ func NewState( // NOTE: we do not call scheduleRound0 yet, we do that upon Start() - cs.BaseService = *service.NewBaseService(nil, "State", cs) + cs.BaseService = *service.NewBaseService(logger, "State", cs) for _, option := range options { option(cs) } @@ -201,12 +202,6 @@ func NewState( return cs } -// SetLogger implements Service. -func (cs *State) SetLogger(l log.Logger) { - cs.BaseService.Logger = l - cs.timeoutTicker.SetLogger(l) -} - // SetEventBus sets event bus. func (cs *State) SetEventBus(b *eventbus.EventBus) { cs.eventBus = b @@ -481,14 +476,12 @@ func (cs *State) Wait() { // OpenWAL opens a file to log all consensus messages and timeouts for // deterministic accountability. func (cs *State) OpenWAL(walFile string) (WAL, error) { - wal, err := NewWAL(walFile) + wal, err := NewWAL(cs.Logger.With("wal", walFile), walFile) if err != nil { cs.Logger.Error("failed to open WAL", "file", walFile, "err", err) return nil, err } - wal.SetLogger(cs.Logger.With("wal", walFile)) - if err := wal.Start(); err != nil { cs.Logger.Error("failed to start WAL", "err", err) return nil, err diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index c97acbe43..1d0741b1f 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -58,7 +58,7 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh func TestStateProposerSelection0(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) height, round := cs1.Height, cs1.Round @@ -102,7 +102,7 @@ func TestStateProposerSelection0(t *testing.T) { func TestStateProposerSelection2(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) // test needs more work for more than 3 validators + cs1, vss, err := randState(config, log.TestingLogger(), 4) // test needs more work for more than 3 validators require.NoError(t, err) height := cs1.Height @@ -143,7 +143,7 @@ func TestStateProposerSelection2(t *testing.T) { func TestStateEnterProposeNoPrivValidator(t *testing.T) { config := configSetup(t) - cs, _, err := randState(config, 1) + cs, _, err := randState(config, log.TestingLogger(), 1) require.NoError(t, err) cs.SetPrivValidator(nil) height, round := cs.Height, cs.Round @@ -165,7 +165,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { func TestStateEnterProposeYesPrivValidator(t *testing.T) { config := configSetup(t) - cs, _, err := randState(config, 1) + cs, _, err := randState(config, log.TestingLogger(), 1) require.NoError(t, err) height, round := cs.Height, cs.Round @@ -198,7 +198,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { func TestStateBadProposal(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 2) + cs1, vss, err := randState(config, log.TestingLogger(), 2) require.NoError(t, err) height, round := cs1.Height, cs1.Round vs2 := vss[1] @@ -259,7 +259,7 @@ func TestStateBadProposal(t *testing.T) { func TestStateOversizedBlock(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 2) + cs1, vss, err := randState(config, log.TestingLogger(), 2) require.NoError(t, err) cs1.state.ConsensusParams.Block.MaxBytes = 2000 height, round := cs1.Height, cs1.Round @@ -323,8 +323,9 @@ func TestStateOversizedBlock(t *testing.T) { // propose, prevote, and precommit a block func TestStateFullRound1(t *testing.T) { config := configSetup(t) + logger := log.TestingLogger() - cs, vss, err := randState(config, 1) + cs, vss, err := randState(config, logger, 1) require.NoError(t, err) height, round := cs.Height, cs.Round @@ -333,8 +334,8 @@ func TestStateFullRound1(t *testing.T) { if err := cs.eventBus.Stop(); err != nil { t.Error(err) } - eventBus := eventbus.NewDefault() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + cs.SetEventBus(eventBus) if err := eventBus.Start(); err != nil { t.Error(err) @@ -367,7 +368,7 @@ func TestStateFullRound1(t *testing.T) { func TestStateFullRoundNil(t *testing.T) { config := configSetup(t) - cs, vss, err := randState(config, 1) + cs, vss, err := randState(config, log.TestingLogger(), 1) require.NoError(t, err) height, round := cs.Height, cs.Round @@ -388,7 +389,7 @@ func TestStateFullRoundNil(t *testing.T) { func TestStateFullRound2(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 2) + cs1, vss, err := randState(config, log.TestingLogger(), 2) require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round @@ -431,7 +432,7 @@ func TestStateFullRound2(t *testing.T) { func TestStateLockNoPOL(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 2) + cs1, vss, err := randState(config, log.TestingLogger(), 2) require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round @@ -570,7 +571,7 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - cs2, _, err := randState(config, 2) // needed so generated block is different than locked block + cs2, _, err := randState(config, log.TestingLogger(), 2) // needed so generated block is different than locked block require.NoError(t, err) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) @@ -622,8 +623,9 @@ func TestStateLockNoPOL(t *testing.T) { // the others prevote a new block hence v1 changes lock and precommits the new block with the others func TestStateLockPOLRelock(t *testing.T) { config := configSetup(t) + logger := log.TestingLogger() - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, logger, 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -668,7 +670,7 @@ func TestStateLockPOLRelock(t *testing.T) { signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs2, err := newState(cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(logger, cs1.state, vs2, kvstore.NewApplication()) require.NoError(t, err) prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) @@ -725,7 +727,7 @@ func TestStateLockPOLRelock(t *testing.T) { func TestStateLockPOLUnlock(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -819,8 +821,9 @@ func TestStateLockPOLUnlock(t *testing.T) { // prevote and now v1 can lock onto the third block and precommit that func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { config := configSetup(t) + logger := log.TestingLogger() - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, logger, 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -861,7 +864,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs2, err := newState(cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(logger, cs1.state, vs2, kvstore.NewApplication()) require.NoError(t, err) prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { @@ -906,7 +909,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs3, err := newState(cs1.state, vs3, kvstore.NewApplication()) + cs3, err := newState(logger, cs1.state, vs3, kvstore.NewApplication()) require.NoError(t, err) prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1) if prop == nil || propBlock == nil { @@ -951,7 +954,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1075,7 +1078,7 @@ func TestStateLockPOLSafety1(t *testing.T) { func TestStateLockPOLSafety2(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1175,7 +1178,7 @@ func TestStateLockPOLSafety2(t *testing.T) { func TestProposeValidBlock(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1268,7 +1271,7 @@ func TestProposeValidBlock(t *testing.T) { func TestSetValidBlockOnDelayedPrevote(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1333,7 +1336,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { func TestSetValidBlockOnDelayedProposal(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1392,7 +1395,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { func TestWaitingTimeoutOnNilPolka(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1416,7 +1419,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1455,7 +1458,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1494,7 +1497,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1524,7 +1527,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1561,7 +1564,7 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { func TestCommitFromPreviousRound(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1618,7 +1621,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config := configSetup(t) config.Consensus.SkipTimeoutCommit = false - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} @@ -1682,7 +1685,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { config := configSetup(t) config.Consensus.SkipTimeoutCommit = false - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] @@ -1826,7 +1829,7 @@ func TestStateSlashingPrecommits(t *testing.T) { func TestStateHalt1(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) + cs1, vss, err := randState(config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -1897,7 +1900,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { config := configSetup(t) // create dummy peer - cs, _, err := randState(config, 1) + cs, _, err := randState(config, log.TestingLogger(), 1) require.NoError(t, err) peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) @@ -1943,7 +1946,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { config := configSetup(t) - cs, vss, err := randState(config, 2) + cs, vss, err := randState(config, log.TestingLogger(), 2) require.NoError(t, err) // create dummy peer peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") @@ -1980,7 +1983,7 @@ func TestStateOutputVoteStats(t *testing.T) { func TestSignSameVoteTwice(t *testing.T) { config := configSetup(t) - _, vss, err := randState(config, 2) + _, vss, err := randState(config, log.TestingLogger(), 2) require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index fb3571ac8..0226889c9 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -19,8 +19,6 @@ type TimeoutTicker interface { Stop() error Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer - - SetLogger(log.Logger) } // timeoutTicker wraps time.Timer, @@ -37,13 +35,13 @@ type timeoutTicker struct { } // NewTimeoutTicker returns a new TimeoutTicker. -func NewTimeoutTicker() TimeoutTicker { +func NewTimeoutTicker(logger log.Logger) TimeoutTicker { tt := &timeoutTicker{ timer: time.NewTimer(0), tickChan: make(chan timeoutInfo, tickTockBufferSize), tockChan: make(chan timeoutInfo, tickTockBufferSize), } - tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) + tt.BaseService = *service.NewBaseService(logger, "TimeoutTicker", tt) tt.stopTimer() // don't want to fire until the first scheduled timeout return tt } diff --git a/internal/consensus/wal.go b/internal/consensus/wal.go index 0d9efb839..24fef294d 100644 --- a/internal/consensus/wal.go +++ b/internal/consensus/wal.go @@ -88,7 +88,7 @@ var _ WAL = &BaseWAL{} // NewWAL returns a new write-ahead logger based on `baseWAL`, which implements // WAL. It's flushed and synced to disk every 2s and once when stopped. -func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { +func NewWAL(logger log.Logger, walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { err := tmos.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) @@ -103,7 +103,7 @@ func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) enc: NewWALEncoder(group), flushInterval: walDefaultFlushInterval, } - wal.BaseService = *service.NewBaseService(nil, "baseWAL", wal) + wal.BaseService = *service.NewBaseService(logger, "baseWAL", wal) return wal, nil } @@ -116,11 +116,6 @@ func (wal *BaseWAL) Group() *auto.Group { return wal.group } -func (wal *BaseWAL) SetLogger(l log.Logger) { - wal.BaseService.Logger = l - wal.group.SetLogger(l) -} - func (wal *BaseWAL) OnStart() error { size, err := wal.group.Head.Size() if err != nil { diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 77c6ce574..20cf0fae2 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -66,8 +66,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) - proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), proxy.NopMetrics()) - proxyApp.SetLogger(logger.With("module", "proxy")) + proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) } @@ -77,8 +76,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } }) - eventBus := eventbus.NewDefault() - eventBus.SetLogger(logger.With("module", "events")) + eventBus := eventbus.NewDefault(logger.With("module", "events")) if err := eventBus.Start(); err != nil { return fmt.Errorf("failed to start event bus: %w", err) } @@ -90,8 +88,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) - consensusState.SetLogger(logger) + consensusState := NewState(logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) if privValidator != nil && privValidator != (*privval.FilePV)(nil) { consensusState.SetPrivValidator(privValidator) diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 114bb21b1..6c1feb670 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -25,17 +25,17 @@ const ( func TestWALTruncate(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") + logger := log.TestingLogger() // this magic number 4K can truncate the content when RotateFile. // defaultHeadSizeLimit(10M) is hard to simulate. // this magic number 1 * time.Millisecond make RotateFile check frequently. // defaultGroupCheckDuration(5s) is hard to simulate. - wal, err := NewWAL(walFile, + wal, err := NewWAL(logger, walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond), ) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) err = wal.Start() require.NoError(t, err) t.Cleanup(func() { @@ -105,7 +105,7 @@ func TestWALWrite(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile) + wal, err := NewWAL(log.TestingLogger(), walFile) require.NoError(t, err) err = wal.Start() require.NoError(t, err) @@ -148,9 +148,8 @@ func TestWALSearchForEndHeight(t *testing.T) { } walFile := tempWALWithData(walBody) - wal, err := NewWAL(walFile) + wal, err := NewWAL(log.TestingLogger(), walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) @@ -170,12 +169,11 @@ func TestWALSearchForEndHeight(t *testing.T) { func TestWALPeriodicSync(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + wal, err := NewWAL(log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond)) require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) - wal.SetLogger(log.TestingLogger()) // Generate some data err = WALGenerateNBlocks(t, wal.Group(), 5) diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go index 263267461..95c8876d4 100644 --- a/internal/eventbus/event_bus.go +++ b/internal/eventbus/event_bus.go @@ -27,18 +27,17 @@ type EventBus struct { } // NewDefault returns a new event bus with default options. -func NewDefault() *EventBus { - pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(0)) +func NewDefault(l log.Logger) *EventBus { + logger := l.With("module", "eventbus") + pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(0), + func(s *tmpubsub.Server) { + s.Logger = logger + }) b := &EventBus{pubsub: pubsub} - b.BaseService = *service.NewBaseService(nil, "EventBus", b) + b.BaseService = *service.NewBaseService(logger, "EventBus", b) return b } -func (b *EventBus) SetLogger(l log.Logger) { - b.BaseService.SetLogger(l) - b.pubsub.SetLogger(l.With("module", "pubsub")) -} - func (b *EventBus) OnStart() error { return b.pubsub.Start() } diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go index e280d2bc4..64e6f3344 100644 --- a/internal/eventbus/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -12,13 +12,14 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) func TestEventBusPublishEventTx(t *testing.T) { - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -75,7 +76,7 @@ func TestEventBusPublishEventTx(t *testing.T) { } func TestEventBusPublishEventNewBlock(t *testing.T) { - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -135,7 +136,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -242,7 +243,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { } func TestEventBusPublishEventNewBlockHeader(t *testing.T) { - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -299,7 +300,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } func TestEventBusPublishEventNewEvidence(t *testing.T) { - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -343,7 +344,7 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) { } func TestEventBusPublish(t *testing.T) { - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -433,7 +434,7 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes // for random* functions mrand.Seed(time.Now().Unix()) - eventBus := eventbus.NewDefault() // set buffer capacity to 0 so we are not testing cache + eventBus := eventbus.NewDefault(log.TestingLogger()) // set buffer capacity to 0 so we are not testing cache err := eventBus.Start() if err != nil { b.Error(err) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 66e9c9421..59f0d92d2 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -44,20 +44,18 @@ type Inspector struct { /// //nolint:lll func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { - routes := rpc.Routes(*cfg, ss, bs, es, logger) - eb := eventbus.NewDefault() - eb.SetLogger(logger.With("module", "events")) - is := indexer.NewService(indexer.ServiceArgs{ - Sinks: es, - EventBus: eb, - Logger: logger.With("module", "txindex"), - }) + eb := eventbus.NewDefault(logger.With("module", "events")) + return &Inspector{ - routes: routes, - config: cfg, - logger: logger, - eventBus: eb, - indexerService: is, + routes: rpc.Routes(*cfg, ss, bs, es, logger), + config: cfg, + logger: logger, + eventBus: eb, + indexerService: indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eb, + Logger: logger.With("module", "txindex"), + }), } } diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 23c8c378e..428204214 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -77,12 +77,12 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { app := &application{kvstore.NewApplication()} cc := abciclient.NewLocalCreator(app) + logger := log.TestingLogger() cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) require.NoError(t, err) cfg.Mempool.CacheSize = cacheSize - - appConnMem, err := cc() + appConnMem, err := cc(logger) require.NoError(t, err) require.NoError(t, appConnMem.Start()) @@ -91,7 +91,7 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { require.NoError(t, appConnMem.Stop()) }) - return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) + return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) } func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 4782d7717..a7efe54e3 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -146,12 +146,14 @@ func DefaultMConnConfig() MConnConfig { // NewMConnection wraps net.Conn and creates multiplex connection func NewMConnection( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, ) *MConnection { return NewMConnectionWithConfig( + logger, conn, chDescs, onReceive, @@ -161,6 +163,7 @@ func NewMConnection( // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config func NewMConnectionWithConfig( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, @@ -185,6 +188,8 @@ func NewMConnectionWithConfig( created: time.Now(), } + mconn.BaseService = *service.NewBaseService(logger, "MConnection", mconn) + // Create channels var channelsIdx = map[ChannelID]*channel{} var channels = []*channel{} @@ -197,21 +202,12 @@ func NewMConnectionWithConfig( mconn.channels = channels mconn.channelsIdx = channelsIdx - mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn) - // maxPacketMsgSize() is a bit heavy, so call just once mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() return mconn } -func (c *MConnection) SetLogger(l log.Logger) { - c.BaseService.SetLogger(l) - for _, ch := range c.channels { - ch.SetLogger(l) - } -} - // OnStart implements BaseService func (c *MConnection) OnStart() error { if err := c.BaseService.OnStart(); err != nil { @@ -670,13 +666,10 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, + Logger: conn.Logger, } } -func (ch *channel) SetLogger(l log.Logger) { - ch.Logger = l -} - // Queues message to send to this channel. // Goroutine-safe // Times out (and returns false) after defaultSendTimeout diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 1ed179ad8..5c4cdb156 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -19,17 +19,18 @@ import ( const maxPingPongPacketSize = 1024 // bytes -func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID ChannelID, msgBytes []byte) { - } - onError := func(r interface{}) { - } - c := createMConnectionWithCallbacks(conn, onReceive, onError) - c.SetLogger(log.TestingLogger()) - return c +func createTestMConnection(logger log.Logger, conn net.Conn) *MConnection { + return createMConnectionWithCallbacks(logger, conn, + // onRecieve + func(chID ChannelID, msgBytes []byte) { + }, + // onError + func(r interface{}) { + }) } func createMConnectionWithCallbacks( + logger log.Logger, conn net.Conn, onReceive func(chID ChannelID, msgBytes []byte), onError func(r interface{}), @@ -38,8 +39,7 @@ func createMConnectionWithCallbacks( cfg.PingInterval = 90 * time.Millisecond cfg.PongTimeout = 45 * time.Millisecond chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} - c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg) - c.SetLogger(log.TestingLogger()) + c := NewMConnectionWithConfig(logger, conn, chDescs, onReceive, onError, cfg) return c } @@ -47,7 +47,7 @@ func TestMConnectionSendFlushStop(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - clientConn := createTestMConnection(client) + clientConn := createTestMConnection(log.TestingLogger(), client) err := clientConn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, clientConn)) @@ -81,7 +81,7 @@ func TestMConnectionSend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - mconn := createTestMConnection(client) + mconn := createTestMConnection(log.TestingLogger(), client) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -117,12 +117,13 @@ func TestMConnectionReceive(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } - mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) + logger := log.TestingLogger() + mconn1 := createMConnectionWithCallbacks(logger, client, onReceive, onError) err := mconn1.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn1)) - mconn2 := createTestMConnection(server) + mconn2 := createTestMConnection(logger, server) err = mconn2.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn2)) @@ -152,7 +153,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -190,7 +191,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -244,7 +245,7 @@ func TestMConnectionMultiplePings(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -291,7 +292,7 @@ func TestMConnectionPingPongs(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -348,7 +349,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -379,19 +380,18 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) ( {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, } - mconnClient := NewMConnection(client, chDescs, onReceive, onError) - mconnClient.SetLogger(log.TestingLogger().With("module", "client")) + logger := log.TestingLogger() + mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError) err := mconnClient.Start() require.Nil(t, err) // create server conn with 1 channel // it fires on chOnErr when there's an error - serverLogger := log.TestingLogger().With("module", "server") + serverLogger := logger.With("module", "server") onError = func(r interface{}) { chOnErr <- struct{}{} } - mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) - mconnServer.SetLogger(serverLogger) + mconnServer := createMConnectionWithCallbacks(serverLogger, server, onReceive, onError) err = mconnServer.Start() require.Nil(t, err) return mconnClient, mconnServer @@ -488,7 +488,7 @@ func TestMConnectionTrySend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - mconn := createTestMConnection(client) + mconn := createTestMConnection(log.TestingLogger(), client) err := mconn.Start() require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 3e0281c39..736f5360a 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -336,13 +336,13 @@ func (c *mConnConnection) handshake( } mconn := conn.NewMConnectionWithConfig( + c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID)), secretConn, c.channelDescs, c.onReceive, c.onError, c.mConnConfig, ) - mconn.SetLogger(c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID))) return mconn, peerInfo, secretConn.RemotePubKey(), nil } diff --git a/internal/p2p/trust/store.go b/internal/p2p/trust/store.go index 9f200b9dd..158354e14 100644 --- a/internal/p2p/trust/store.go +++ b/internal/p2p/trust/store.go @@ -11,6 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -38,14 +39,14 @@ type MetricStore struct { // NewTrustMetricStore returns a store that saves data to the DB // and uses the config when creating new trust metrics. // Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore { +func NewTrustMetricStore(db dbm.DB, tmc MetricConfig, logger log.Logger) *MetricStore { tms := &MetricStore{ peerMetrics: make(map[string]*Metric), db: db, config: tmc, } - tms.BaseService = *service.NewBaseService(nil, "MetricStore", tms) + tms.BaseService = *service.NewBaseService(logger, "MetricStore", tms) return tms } diff --git a/internal/p2p/trust/store_test.go b/internal/p2p/trust/store_test.go index ecf17dc4a..d1420b1dc 100644 --- a/internal/p2p/trust/store_test.go +++ b/internal/p2p/trust/store_test.go @@ -16,17 +16,16 @@ import ( func TestTrustMetricStoreSaveLoad(t *testing.T) { dir := t.TempDir() + logger := log.TestingLogger() historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) require.NoError(t, err) // 0 peers saved - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) + store := NewTrustMetricStore(historyDB, DefaultConfig(), logger) store.saveToDB() // Load the data from the file - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) + store = NewTrustMetricStore(historyDB, DefaultConfig(), logger) err = store.Start() require.NoError(t, err) // Make sure we still have 0 entries @@ -64,8 +63,8 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { require.NoError(t, err) // Load the data from the DB - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) + store = NewTrustMetricStore(historyDB, DefaultConfig(), logger) + err = store.Start() require.NoError(t, err) @@ -88,9 +87,10 @@ func TestTrustMetricStoreConfig(t *testing.T) { IntegralWeight: 0.5, } + logger := log.TestingLogger() // Create a store with custom config - store := NewTrustMetricStore(historyDB, config) - store.SetLogger(log.TestingLogger()) + store := NewTrustMetricStore(historyDB, config, logger) + err = store.Start() require.NoError(t, err) @@ -108,8 +108,8 @@ func TestTrustMetricStoreLookup(t *testing.T) { historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) + store := NewTrustMetricStore(historyDB, DefaultConfig(), log.TestingLogger()) + err = store.Start() require.NoError(t, err) @@ -131,8 +131,8 @@ func TestTrustMetricStorePeerScore(t *testing.T) { historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) + store := NewTrustMetricStore(historyDB, DefaultConfig(), log.TestingLogger()) + err = store.Start() require.NoError(t, err) diff --git a/internal/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go index f1ae7fe1a..4b4abe607 100644 --- a/internal/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -48,11 +48,11 @@ var SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) if err := s.Start(); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } @@ -63,11 +63,11 @@ func TestEcho(t *testing.T) { }) // Start client - cli, err := clientCreator() + cli, err := clientCreator(logger.With("module", "abci-client")) if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := cli.Start(); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } @@ -96,11 +96,11 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) if err := s.Start(); err != nil { b.Fatalf("Error starting socket server: %v", err.Error()) } @@ -111,11 +111,11 @@ func BenchmarkEcho(b *testing.B) { }) // Start client - cli, err := clientCreator() + cli, err := clientCreator(logger.With("module", "abci-client")) if err != nil { b.Fatalf("Error creating ABCI client: %v", err.Error()) } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := cli.Start(); err != nil { b.Fatalf("Error starting ABCI client: %v", err.Error()) } @@ -149,11 +149,11 @@ func BenchmarkEcho(b *testing.B) { func TestInfo(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) if err := s.Start(); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } @@ -164,11 +164,11 @@ func TestInfo(t *testing.T) { }) // Start client - cli, err := clientCreator() + cli, err := clientCreator(logger.With("module", "abci-client")) if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) + if err := cli.Start(); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } diff --git a/internal/proxy/client.go b/internal/proxy/client.go index ddb9a928d..4e034802e 100644 --- a/internal/proxy/client.go +++ b/internal/proxy/client.go @@ -6,6 +6,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/app" ) @@ -15,7 +16,7 @@ import ( // // The Closer is a noop except for persistent_kvstore applications, // which will clean up the store. -func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io.Closer) { +func DefaultClientCreator(logger log.Logger, addr, transport, dbDir string) (abciclient.Creator, io.Closer) { switch addr { case "kvstore": return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} @@ -32,7 +33,7 @@ func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} default: mustConnect := false // loop retrying - return abciclient.NewRemoteCreator(addr, transport, mustConnect), noopCloser{} + return abciclient.NewRemoteCreator(logger, addr, transport, mustConnect), noopCloser{} } } diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go index 0bcc64af6..62862d66e 100644 --- a/internal/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -6,7 +6,7 @@ import ( "syscall" abciclient "github.com/tendermint/tendermint/abci/client" - tmlog "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -33,8 +33,8 @@ type AppConns interface { } // NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator abciclient.Creator, metrics *Metrics) AppConns { - return NewMultiAppConn(clientCreator, metrics) +func NewAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { + return NewMultiAppConn(clientCreator, logger, metrics) } // multiAppConn implements AppConns. @@ -60,12 +60,12 @@ type multiAppConn struct { } // NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator abciclient.Creator, metrics *Metrics) AppConns { +func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { multiAppConn := &multiAppConn{ metrics: metrics, clientCreator: clientCreator, } - multiAppConn.BaseService = *service.NewBaseService(nil, "multiAppConn", multiAppConn) + multiAppConn.BaseService = *service.NewBaseService(logger, "multiAppConn", multiAppConn) return multiAppConn } @@ -128,7 +128,7 @@ func (app *multiAppConn) OnStop() { } func (app *multiAppConn) killTMOnClientError() { - killFn := func(conn string, err error, logger tmlog.Logger) { + killFn := func(conn string, err error, logger log.Logger) { logger.Error( fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), "err", err) @@ -181,11 +181,12 @@ func (app *multiAppConn) stopAllClients() { } func (app *multiAppConn) abciClientFor(conn string) (abciclient.Client, error) { - c, err := app.clientCreator() + c, err := app.clientCreator(app.Logger.With( + "module", "abci-client", + "connection", conn)) if err != nil { return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) } - c.SetLogger(app.Logger.With("module", "abci-client", "connection", conn)) if err := c.Start(); err != nil { return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) } diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go index 25ed692ab..af9c30091 100644 --- a/internal/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -14,24 +14,24 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abcimocks "github.com/tendermint/tendermint/abci/client/mocks" + "github.com/tendermint/tendermint/libs/log" ) func TestAppConns_Start_Stop(t *testing.T) { quitCh := make(<-chan struct{}) clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return().Times(4) clientMock.On("Start").Return(nil).Times(4) clientMock.On("Stop").Return(nil).Times(4) clientMock.On("Quit").Return(quitCh).Times(4) creatorCallCount := 0 - creator := func() (abciclient.Client, error) { + creator := func(logger log.Logger) (abciclient.Client, error) { creatorCallCount++ return clientMock, nil } - appConns := NewAppConns(creator, NopMetrics()) + appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) err := appConns.Start() require.NoError(t, err) @@ -68,11 +68,11 @@ func TestAppConns_Failure(t *testing.T) { clientMock.On("Quit").Return(recvQuitCh) clientMock.On("Error").Return(errors.New("EOF")).Once() - creator := func() (abciclient.Client, error) { + creator := func(log.Logger) (abciclient.Client, error) { return clientMock, nil } - appConns := NewAppConns(creator, NopMetrics()) + appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) err := appConns.Start() require.NoError(t, err) diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index fb70668fd..80b1b6e58 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -38,7 +38,8 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -46,7 +47,7 @@ func TestApplyBlock(t *testing.T) { state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) block := sf.MakeBlock(state, 1, new(types.Commit)) @@ -63,7 +64,7 @@ func TestApplyBlock(t *testing.T) { func TestBeginBlockValidators(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // no need to check error again @@ -126,7 +127,7 @@ func TestBeginBlockValidators(t *testing.T) { func TestBeginBlockByzantineValidators(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -351,7 +352,8 @@ func TestUpdateValidators(t *testing.T) { func TestEndBlockValidatorUpdates(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -362,14 +364,14 @@ func TestEndBlockValidatorUpdates(t *testing.T) { blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), + logger, proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore, ) - eventBus := eventbus.NewDefault() + eventBus := eventbus.NewDefault(logger) err = eventBus.Start() require.NoError(t, err) defer eventBus.Stop() //nolint:errcheck // ignore for tests @@ -420,7 +422,8 @@ func TestEndBlockValidatorUpdates(t *testing.T) { func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 0cedebb00..821c0757e 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -16,6 +16,7 @@ import ( sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -31,7 +32,7 @@ type paramsChangeTestCase struct { func newTestApp() proxy.AppConns { app := &testApp{} cc := abciclient.NewLocalCreator(app) - return proxy.NewAppConns(cc, proxy.NopMetrics()) + return proxy.NewAppConns(cc, log.NewNopLogger(), proxy.NopMetrics()) } func makeAndCommitGoodBlock( diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index 8f69f488b..d5c230e81 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -41,15 +41,6 @@ func NewService(args ServiceArgs) *Service { return is } -// NewIndexerService returns a new service instance. -// Deprecated: Use NewService instead. -func NewIndexerService(es []EventSink, eventBus *eventbus.EventBus) *Service { - return NewService(ServiceArgs{ - Sinks: es, - EventBus: eventBus, - }) -} - // publish publishes a pubsub message to the service. The service blocks until // the message has been fully processed. func (is *Service) publish(msg pubsub.Message) error { diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index d62ebffac..a986530f0 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -38,10 +38,18 @@ var ( dbName = "postgres" ) +// NewIndexerService returns a new service instance. +func NewIndexerService(es []indexer.EventSink, eventBus *eventbus.EventBus) *indexer.Service { + return indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eventBus, + }) +} + func TestIndexerServiceIndexesBlocks(t *testing.T) { + logger := tmlog.TestingLogger() // event bus - eventBus := eventbus.NewDefault() - eventBus.SetLogger(tmlog.TestingLogger()) + eventBus := eventbus.NewDefault(logger) err := eventBus.Start() require.NoError(t, err) t.Cleanup(func() { @@ -62,8 +70,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) - service := indexer.NewIndexerService(eventSinks, eventBus) - service.SetLogger(tmlog.TestingLogger()) + service := NewIndexerService(eventSinks, eventBus) err = service.Start() require.NoError(t, err) t.Cleanup(func() { diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index edf59bac4..c1224c642 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -136,10 +136,10 @@ type Option func(*Server) // provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { s := new(Server) + s.BaseService = *service.NewBaseService(nil, "PubSub", s) for _, opt := range options { opt(s) } - s.BaseService = *service.NewBaseService(nil, "PubSub", s) // The queue receives items to be published. s.queue = make(chan item, s.queueCap) diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 357264e8a..8dcf8b3d9 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -357,8 +357,10 @@ func TestUnsubscribeAll(t *testing.T) { } func TestBufferCapacity(t *testing.T) { - s := pubsub.NewServer(pubsub.BufferCapacity(2)) - s.SetLogger(log.TestingLogger()) + s := pubsub.NewServer(pubsub.BufferCapacity(2), + func(s *pubsub.Server) { + s.Logger = log.TestingLogger() + }) require.Equal(t, 2, s.BufferCapacity()) @@ -376,8 +378,10 @@ func TestBufferCapacity(t *testing.T) { func newTestServer(t testing.TB) *pubsub.Server { t.Helper() - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) + s := pubsub.NewServer(func(s *pubsub.Server) { + s.Logger = log.TestingLogger() + }) + require.NoError(t, s.Start()) t.Cleanup(func() { assert.NoError(t, s.Stop()) diff --git a/libs/service/service.go b/libs/service/service.go index 0af243995..88c25d804 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -48,9 +48,6 @@ type Service interface { // String representation of the service String() string - // SetLogger sets a logger. - SetLogger(log.Logger) - // Wait blocks until the service is stopped. Wait() } @@ -122,11 +119,6 @@ func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { } } -// SetLogger implements Service by setting a logger. -func (bs *BaseService) SetLogger(l log.Logger) { - bs.Logger = l -} - // Start implements Service by calling OnStart (if defined). An error will be // returned if the service is already running or stopped. Not to start the // stopped service, you need to call Reset. diff --git a/node/node.go b/node/node.go index 3fe6ea08e..a0b77823b 100644 --- a/node/node.go +++ b/node/node.go @@ -106,7 +106,7 @@ func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, err pval = nil } - appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + appClient, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) return makeNode(cfg, pval, diff --git a/node/node_test.go b/node/node_test.go index 90a585a63..e9c2159ed 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -249,7 +249,7 @@ func TestCreateProposalBlock(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) err = proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -343,7 +343,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) err = proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -407,7 +407,7 @@ func TestMaxProposalBlockSize(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) err = proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests diff --git a/node/setup.go b/node/setup.go index e254571e3..297ed0265 100644 --- a/node/setup.go +++ b/node/setup.go @@ -90,17 +90,17 @@ func initDBs( // nolint:lll func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator, metrics) - proxyApp.SetLogger(logger.With("module", "proxy")) + proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), metrics) + if err := proxyApp.Start(); err != nil { return nil, fmt.Errorf("error starting proxy app connections: %v", err) } + return proxyApp, nil } func createAndStartEventBus(logger log.Logger) (*eventbus.EventBus, error) { - eventBus := eventbus.NewDefault() - eventBus.SetLogger(logger.With("module", "events")) + eventBus := eventbus.NewDefault(logger.With("module", "events")) if err := eventBus.Start(); err != nil { return nil, err } @@ -309,6 +309,7 @@ func createConsensusReactor( logger = logger.With("module", "consensus") consensusState := consensus.NewState( + logger, cfg.Consensus, state.Copy(), blockExec, @@ -317,7 +318,7 @@ func createConsensusReactor( evidencePool, consensus.StateMetrics(csMetrics), ) - consensusState.SetLogger(logger) + if privValidator != nil && cfg.Mode == config.ModeValidator { consensusState.SetPrivValidator(privValidator) } diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index b5d9debe9..7a9e67915 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -102,7 +102,7 @@ func startApp(cfg *Config) error { if err != nil { return err } - server, err := server.NewServer(cfg.Listen, cfg.Protocol, app) + server, err := server.NewServer(logger, cfg.Listen, cfg.Protocol, app) if err != nil { return err } diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index c0f207546..406e062fd 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -16,7 +16,7 @@ var getMp func() mempool.Mempool func init() { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc() + appConnMem, _ := cc(log.NewNopLogger()) err := appConnMem.Start() if err != nil { panic(err) From 44b5d330b07103d079dfbbe610f4cdba914fdbc6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Nov 2021 16:36:20 +0000 Subject: [PATCH 169/174] build(deps): Bump github.com/tendermint/tm-db from 0.6.4 to 0.6.6 (#7287) Bumps [github.com/tendermint/tm-db](https://github.com/tendermint/tm-db) from 0.6.4 to 0.6.6.
Release notes

Sourced from github.com/tendermint/tm-db's releases.

Release v0.6.6

This release is the same API as v0.6.4, but bypasses the accidental v0.6.5 release that exported a different API. Any existing users of v0.6.5 may continue to use that tag.

https://github.com/tendermint/tm-db/blob/v0.6.6/CHANGELOG.md

Changelog

Sourced from github.com/tendermint/tm-db's changelog.

0.6.6

2021-11-08

Important note: Version v0.6.5 was accidentally tagged and should be avoided. This version is identical to v0.6.4 in package structure and API, but has updated the version marker so that normal go get upgrades will not require modifying existing use of v0.6.4.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/tendermint/tm-db&package-manager=go_modules&previous-version=0.6.4&new-version=0.6.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- go.mod | 5 ++++- go.sum | 9 +++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 1c789ef0d..2725c3df9 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,9 @@ require ( github.com/adlio/schema v1.1.14 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce + github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 @@ -31,7 +34,7 @@ require ( github.com/spf13/cobra v1.2.1 github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 - github.com/tendermint/tm-db v0.6.4 + github.com/tendermint/tm-db v0.6.6 github.com/vektra/mockery/v2 v2.9.4 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b diff --git a/go.sum b/go.sum index ece67f19e..9b411330e 100644 --- a/go.sum +++ b/go.sum @@ -956,8 +956,8 @@ github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhF github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= -github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= +github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM= +github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= @@ -1017,8 +1017,8 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1260,6 +1260,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From 1c34d172406a01b94a1ea1b2917e1cea05275e46 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 16 Nov 2021 19:06:19 -0500 Subject: [PATCH 170/174] proto: rebuild the proto files from the spec repository (#7291) * proto: rebuild the proto files from the spec repository * remove proto checks --- .github/workflows/proto.yml | 23 --- Makefile | 16 -- abci/types/types.pb.go | 200 +++++-------------------- proto/tendermint/blocksync/types.pb.go | 33 +--- proto/tendermint/consensus/types.pb.go | 59 ++------ proto/tendermint/consensus/wal.pb.go | 25 +--- proto/tendermint/crypto/keys.pb.go | 5 +- proto/tendermint/crypto/keys.proto | 18 --- proto/tendermint/crypto/proof.pb.go | 25 +--- proto/tendermint/crypto/proof.proto | 41 ----- proto/tendermint/libs/bits/types.pb.go | 5 +- proto/tendermint/libs/bits/types.proto | 9 -- proto/tendermint/mempool/types.pb.go | 10 +- proto/tendermint/p2p/conn.pb.go | 25 +--- proto/tendermint/p2p/pex.pb.go | 20 +-- proto/tendermint/p2p/types.pb.go | 25 +--- proto/tendermint/privval/service.proto | 2 +- proto/tendermint/privval/types.pb.go | 55 ++----- proto/tendermint/state/types.pb.go | 25 +--- proto/tendermint/statesync/types.pb.go | 45 ++---- proto/tendermint/types/block.pb.go | 5 +- proto/tendermint/types/canonical.pb.go | 20 +-- proto/tendermint/types/events.pb.go | 5 +- proto/tendermint/types/evidence.pb.go | 26 +--- proto/tendermint/types/params.pb.go | 34 +---- proto/tendermint/types/types.pb.go | 71 +++------ proto/tendermint/types/validator.pb.go | 15 +- proto/tendermint/version/types.pb.go | 11 +- proto/tendermint/version/types.proto | 16 -- 29 files changed, 164 insertions(+), 705 deletions(-) delete mode 100644 .github/workflows/proto.yml delete mode 100644 proto/tendermint/crypto/keys.proto delete mode 100644 proto/tendermint/crypto/proof.proto delete mode 100644 proto/tendermint/libs/bits/types.proto delete mode 100644 proto/tendermint/version/types.proto diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml deleted file mode 100644 index 5188b46da..000000000 --- a/.github/workflows/proto.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Protobuf -# Protobuf runs buf (https://buf.build/) lint and check-breakage -# This workflow is only run when a .proto file has been modified -on: - workflow_dispatch: # allow running workflow manually - pull_request: - paths: - - "**.proto" -jobs: - proto-lint: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.4.0 - - name: lint - run: make proto-lint - proto-breakage: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.4.0 - - name: check-breakage - run: make proto-check-breaking-ci diff --git a/Makefile b/Makefile index 353b8a5d2..043b2031d 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,6 @@ endif LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" -HTTPS_GIT := https://github.com/tendermint/tendermint.git DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto CGO_ENABLED ?= 0 @@ -79,32 +78,17 @@ $(BUILDDIR)/: ### Protobuf ### ############################################################################### -proto-all: proto-gen proto-lint proto-check-breaking -.PHONY: proto-all - proto-gen: @docker pull -q tendermintdev/docker-build-proto @echo "Generating Protobuf files" @$(DOCKER_PROTO_BUILDER) sh ./scripts/protocgen.sh .PHONY: proto-gen -proto-lint: - @$(DOCKER_PROTO_BUILDER) buf lint --error-format=json -.PHONY: proto-lint - proto-format: @echo "Formatting Protobuf files" @$(DOCKER_PROTO_BUILDER) find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; .PHONY: proto-format -proto-check-breaking: - @$(DOCKER_PROTO_BUILDER) buf breaking --against .git#branch=master -.PHONY: proto-check-breaking - -proto-check-breaking-ci: - @$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT)#branch=master -.PHONY: proto-check-breaking-ci - ############################################################################### ### Build ABCI ### ############################################################################### diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index f424009bc..6e4b53a1d 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -7715,10 +7715,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -7800,10 +7797,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -7853,10 +7847,7 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8008,10 +7999,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8249,10 +8237,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8407,10 +8392,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8594,10 +8576,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8700,10 +8679,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8787,10 +8763,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8859,10 +8832,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8912,10 +8882,7 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8965,10 +8932,7 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9088,10 +9052,7 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9198,10 +9159,7 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9336,10 +9294,7 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9914,10 +9869,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9999,10 +9951,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10084,10 +10033,7 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10137,10 +10083,7 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10326,10 +10269,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10483,10 +10423,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10793,10 +10730,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10880,10 +10814,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11237,10 +11168,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11511,10 +11439,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11668,10 +11593,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11774,10 +11696,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11861,10 +11780,7 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11933,10 +11849,7 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12020,10 +11933,7 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12200,10 +12110,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12306,10 +12213,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12425,10 +12329,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12562,10 +12463,7 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12720,10 +12618,7 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12826,10 +12721,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12931,10 +12823,7 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13037,10 +12926,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13213,10 +13099,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13391,10 +13274,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index 43ad139ec..c00200322 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -68,7 +68,8 @@ func (m *BlockRequest) GetHeight() int64 { return 0 } -// NoBlockResponse informs the node that the peer does not have block at the requested height +// NoBlockResponse informs the node that the peer does not have block at the +// requested height type NoBlockResponse struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } @@ -899,10 +900,7 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -971,10 +969,7 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1060,10 +1055,7 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1113,10 +1105,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1204,10 +1193,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1432,10 +1418,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 67efd1c2c..4ae9abc9e 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -103,8 +103,10 @@ func (m *NewRoundStep) GetLastCommitRound() int32 { return 0 } -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// NewValidBlock is sent when a validator observes a valid block B in some round +// r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in +// the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlock struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` @@ -532,7 +534,8 @@ func (m *VoteSetMaj23) GetBlockID() types.BlockID { return types.BlockID{} } -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +// VoteSetBits is sent to communicate the bit-array of votes seen for the +// BlockID. type VoteSetBits struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` @@ -1932,10 +1935,7 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2112,10 +2112,7 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2198,10 +2195,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2322,10 +2316,7 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2446,10 +2437,7 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2535,10 +2523,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2664,10 +2649,7 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2807,10 +2789,7 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2983,10 +2962,7 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3351,10 +3327,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index 86ff1be01..fd80819cd 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,10 +921,7 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1064,10 +1061,7 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1136,10 +1130,7 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1329,10 +1320,7 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1451,10 +1439,7 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 8ff4c4a4f..24c6c1b1b 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,10 +687,7 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthKeys - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.proto b/proto/tendermint/crypto/keys.proto deleted file mode 100644 index d66f9fc0c..000000000 --- a/proto/tendermint/crypto/keys.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; -package tendermint.crypto; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; - -import "gogoproto/gogo.proto"; - -// PublicKey defines the keys available for use with Tendermint Validators -message PublicKey { - option (gogoproto.compare) = true; - option (gogoproto.equal) = true; - - oneof sum { - bytes ed25519 = 1; - bytes secp256k1 = 2; - bytes sr25519 = 3; - } -} diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 97350c64c..82fb943fc 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,10 +820,7 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -943,10 +940,7 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1092,10 +1086,7 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1245,10 +1236,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1332,10 +1320,7 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthProof - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto deleted file mode 100644 index 975df7685..000000000 --- a/proto/tendermint/crypto/proof.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; -package tendermint.crypto; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; - -import "gogoproto/gogo.proto"; - -message Proof { - int64 total = 1; - int64 index = 2; - bytes leaf_hash = 3; - repeated bytes aunts = 4; -} - -message ValueOp { - // Encoded in ProofOp.Key. - bytes key = 1; - - // To encode in ProofOp.Data - Proof proof = 2; -} - -message DominoOp { - string key = 1; - string input = 2; - string output = 3; -} - -// ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data -// for example neighbouring node hash -message ProofOp { - string type = 1; - bytes key = 2; - bytes data = 3; -} - -// ProofOps is Merkle proof defined by the list of ProofOps -message ProofOps { - repeated ProofOp ops = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index ad87f854f..c0ebcb976 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,10 +307,7 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.proto b/proto/tendermint/libs/bits/types.proto deleted file mode 100644 index 3111d113a..000000000 --- a/proto/tendermint/libs/bits/types.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; -package tendermint.libs.bits; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits"; - -message BitArray { - int64 bits = 1; - repeated uint64 elems = 2; -} diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 3487652bc..11e259551 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,10 +370,7 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -458,10 +455,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 7c26d3fcd..47a3bb0cd 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,10 +723,7 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -776,10 +773,7 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -902,10 +896,7 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1060,10 +1051,7 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1180,10 +1168,7 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthConn - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 25d636e43..15ccce15e 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -587,10 +587,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -640,10 +637,7 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -727,10 +721,7 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -850,10 +841,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index a0e647ee7..bffa6884f 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,10 +917,7 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1230,10 +1227,7 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1347,10 +1341,7 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1502,10 +1493,7 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1678,10 +1666,7 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/privval/service.proto b/proto/tendermint/privval/service.proto index 63e9afca7..2c699e1cd 100644 --- a/proto/tendermint/privval/service.proto +++ b/proto/tendermint/privval/service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package tendermint.privval; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; import "tendermint/privval/types.proto"; diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index da30f7527..56b35e727 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,10 +1708,7 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1793,10 +1790,7 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1915,10 +1909,7 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2036,10 +2027,7 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2158,10 +2146,7 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2279,10 +2264,7 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2401,10 +2383,7 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2454,10 +2433,7 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2507,10 +2483,7 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2840,10 +2813,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2960,10 +2930,7 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index d94724fff..85f38cada 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -1069,10 +1069,7 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1177,10 +1174,7 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1282,10 +1276,7 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1400,10 +1391,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1869,10 +1857,7 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 93e844730..5541c2803 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,10 +1740,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1793,10 +1790,7 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1971,10 +1965,7 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2081,10 +2072,7 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2245,10 +2233,7 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2317,10 +2302,7 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2406,10 +2388,7 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2478,10 +2457,7 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2583,10 +2559,7 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index aacb90fab..f2077aad8 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -389,10 +389,7 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthBlock - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index 38b17ddb1..709831043 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -775,10 +775,7 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -881,10 +878,7 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1093,10 +1087,7 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1286,10 +1277,7 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthCanonical - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index 1c49aef64..a9aa26a79 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,10 +285,7 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index 3d9e8f2c5..052fb0e6b 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -112,7 +112,8 @@ func (*Evidence) XXX_OneofWrappers() []interface{} { } } -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting +// votes. type DuplicateVoteEvidence struct { VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` @@ -189,7 +190,8 @@ func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { return time.Time{} } -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +// LightClientAttackEvidence contains evidence of a set of validators attempting +// to mislead a light client. type LightClientAttackEvidence struct { ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` @@ -825,10 +827,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1021,10 +1020,7 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1215,10 +1211,7 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { @@ -1302,10 +1295,7 @@ func (m *EvidenceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEvidence } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index a295bca9e..3bdf4cb6f 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -167,8 +167,8 @@ type EvidenceParams struct { // mechanism for handling [Nothing-At-Stake // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. + // This sets the maximum size of total evidence in bytes that can be committed + // in a single block. and should fall comfortably under the max block bytes. // Default is 1048576 or 1MB MaxBytes int64 `protobuf:"varint,3,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` } @@ -1123,10 +1123,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1214,10 +1211,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1338,10 +1332,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1423,10 +1414,7 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1495,10 +1483,7 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1586,10 +1571,7 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 27a936097..653497c56 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -565,7 +565,8 @@ func (m *Vote) GetSignature() []byte { return nil } -// Commit contains the evidence that a block was committed by a set of validators. +// Commit contains the evidence that a block was committed by a set of +// validators. type Commit struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` @@ -967,7 +968,8 @@ func (m *BlockMeta) GetNumTxs() int64 { return 0 } -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +// TxProof represents a Merkle proof of the presence of a transaction in the +// Merkle tree. type TxProof struct { RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2267,10 +2269,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2406,10 +2405,7 @@ func (m *Part) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2526,10 +2522,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3035,10 +3028,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3120,10 +3110,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3383,10 +3370,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3541,10 +3525,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3714,10 +3695,7 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3943,10 +3921,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4068,10 +4043,7 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4193,10 +4165,7 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4350,10 +4319,7 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4507,10 +4473,7 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 2c3468b83..23b30ed3c 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,10 +583,7 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -741,10 +738,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -849,10 +843,7 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthValidator - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 9aeb3ae1a..76a94fd3c 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -23,9 +23,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. +// Consensus captures the consensus rules for processing a block in the +// blockchain, including all blockchain data structures and the rules of the +// application's state transition machine. type Consensus struct { Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"` App uint64 `protobuf:"varint,2,opt,name=app,proto3" json:"app,omitempty"` @@ -265,10 +265,7 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto deleted file mode 100644 index 3c4e4cc53..000000000 --- a/proto/tendermint/version/types.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package tendermint.version; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/version"; - -import "gogoproto/gogo.proto"; - -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. -message Consensus { - option (gogoproto.equal) = true; - - uint64 block = 1; - uint64 app = 2; -} From 6ab62fe7b66f284e9bfc0add4dca02b493ee8b03 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Thu, 18 Nov 2021 17:56:21 -0500 Subject: [PATCH 171/174] service: remove stop method and use contexts (#7292) --- abci/client/grpc_client.go | 2 +- abci/client/mocks/client.go | 78 +- abci/client/socket_client.go | 26 +- abci/client/socket_client_test.go | 76 +- abci/cmd/abci-cli/abci-cli.go | 66 +- abci/example/example_test.go | 45 +- abci/example/kvstore/kvstore_test.go | 88 +- abci/server/grpc_server.go | 3 +- abci/server/socket_server.go | 70 +- abci/tests/client_server_test.go | 15 +- abci/tests/server/client.go | 10 +- cmd/tendermint/commands/light.go | 8 +- cmd/tendermint/commands/replay.go | 5 +- cmd/tendermint/commands/run_node.go | 23 +- config/db.go | 4 +- docs/architecture/adr-006-trust-metric.md | 2 +- internal/blocksync/pool.go | 15 +- internal/blocksync/pool_test.go | 35 +- internal/blocksync/reactor.go | 25 +- internal/blocksync/reactor_test.go | 47 +- internal/consensus/byzantine_test.go | 21 +- internal/consensus/common_test.go | 95 ++- internal/consensus/invalid_test.go | 28 +- internal/consensus/mempool_test.go | 53 +- internal/consensus/reactor.go | 11 +- internal/consensus/reactor_test.go | 173 ++-- internal/consensus/replay.go | 11 +- internal/consensus/replay_file.go | 15 +- internal/consensus/replay_stubs.go | 9 +- internal/consensus/replay_test.go | 235 +++--- internal/consensus/state.go | 34 +- internal/consensus/state_test.go | 720 +++++++++-------- internal/consensus/ticker.go | 6 +- .../consensus/types/height_vote_set_test.go | 21 +- internal/consensus/wal.go | 21 +- internal/consensus/wal_generator.go | 30 +- internal/consensus/wal_test.go | 47 +- internal/eventbus/event_bus.go | 9 +- internal/eventbus/event_bus_test.go | 150 ++-- internal/evidence/reactor.go | 3 +- internal/evidence/reactor_test.go | 37 +- internal/inspect/inspect.go | 20 +- internal/inspect/inspect_test.go | 20 +- internal/libs/autofile/cmd/logjack.go | 23 +- internal/libs/autofile/group.go | 3 +- internal/mempool/mempool_bench_test.go | 5 +- internal/mempool/mempool_test.go | 70 +- internal/mempool/reactor.go | 2 +- internal/mempool/reactor_test.go | 49 +- internal/p2p/address_test.go | 7 + internal/p2p/conn/connection.go | 5 +- internal/p2p/conn/connection_test.go | 87 +- internal/p2p/p2p_test.go | 3 - internal/p2p/p2ptest/network.go | 13 +- internal/p2p/p2ptest/require.go | 8 +- internal/p2p/peermanager_test.go | 46 +- internal/p2p/pex/reactor.go | 3 +- internal/p2p/pex/reactor_test.go | 88 +- internal/p2p/router.go | 2 +- internal/p2p/router_test.go | 86 +- internal/p2p/transport_mconn.go | 2 +- internal/p2p/transport_mconn_test.go | 13 + internal/p2p/transport_test.go | 95 ++- internal/p2p/trust/metric.go | 5 +- internal/p2p/trust/metric_test.go | 18 +- internal/p2p/trust/store.go | 15 +- internal/p2p/trust/store_test.go | 33 +- internal/proxy/app_conn_test.go | 41 +- internal/proxy/multi_app_conn.go | 56 +- internal/proxy/multi_app_conn_test.go | 41 +- internal/state/execution.go | 2 +- internal/state/execution_test.go | 36 +- internal/state/indexer/indexer_service.go | 2 +- .../state/indexer/indexer_service_test.go | 21 +- internal/state/indexer/sink/psql/psql_test.go | 7 +- internal/state/state_test.go | 4 +- internal/state/validation_test.go | 24 +- internal/statesync/dispatcher_test.go | 8 +- internal/statesync/reactor.go | 49 +- internal/statesync/reactor_test.go | 117 ++- internal/statesync/syncer.go | 2 +- internal/statesync/syncer_test.go | 133 ++- libs/events/event_cache_test.go | 6 +- libs/events/events.go | 4 +- libs/events/events_test.go | 198 ++--- libs/pubsub/example_test.go | 5 +- libs/pubsub/pubsub.go | 2 +- libs/pubsub/pubsub_test.go | 92 ++- libs/service/service.go | 99 +-- libs/service/service_test.go | 25 +- libs/strings/string.go | 4 + light/proxy/proxy.go | 12 +- light/rpc/client.go | 4 +- node/node.go | 138 ++-- node/node_test.go | 117 +-- node/public.go | 17 +- node/setup.go | 22 +- privval/grpc/util.go | 2 +- privval/signer_client.go | 4 +- privval/signer_client_test.go | 557 ++++++------- privval/signer_listener_endpoint.go | 3 +- privval/signer_listener_endpoint_test.go | 41 +- privval/signer_server.go | 16 +- rpc/client/http/ws.go | 2 +- rpc/client/interface.go | 2 +- rpc/client/main_test.go | 1 - rpc/client/mocks/client.go | 756 ++++++++++++++++++ rpc/client/rpc_test.go | 2 +- rpc/test/helpers.go | 19 +- test/e2e/node/main.go | 44 +- test/fuzz/mempool/checktx.go | 2 +- .../internal/test_harness.go | 4 +- .../internal/test_harness_test.go | 33 +- tools/tm-signer-harness/main.go | 10 +- types/block_test.go | 5 +- 115 files changed, 3613 insertions(+), 2271 deletions(-) create mode 100644 rpc/client/mocks/client.go diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index f4cd5f3e9..ef88736ab 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -63,7 +63,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func (cli *grpcClient) OnStart() error { +func (cli *grpcClient) OnStart(ctx context.Context) error { // This processes asynchronous request/response messages and dispatches // them to callbacks. go func() { diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 664646e61..f0d82a50e 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -7,8 +7,6 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" - log "github.com/tendermint/tendermint/libs/log" - mock "github.com/stretchr/testify/mock" types "github.com/tendermint/tendermint/abci/types" @@ -636,39 +634,6 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS return r0, r1 } -// OnReset provides a mock function with given fields: -func (_m *Client) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Client) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Client) OnStop() { - _m.Called() -} - // QueryAsync provides a mock function with given fields: _a0, _a1 func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) @@ -731,51 +696,18 @@ func (_m *Client) Quit() <-chan struct{} { return r0 } -// Reset provides a mock function with given fields: -func (_m *Client) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - // SetResponseCallback provides a mock function with given fields: _a0 func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 00e981123..8dfee0c8d 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -66,7 +66,7 @@ func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client { // OnStart implements Service by connecting to the server and spawning reading // and writing goroutines. -func (cli *socketClient) OnStart() error { +func (cli *socketClient) OnStart(ctx context.Context) error { var ( err error conn net.Conn @@ -85,8 +85,8 @@ func (cli *socketClient) OnStart() error { } cli.conn = conn - go cli.sendRequestsRoutine(conn) - go cli.recvResponseRoutine(conn) + go cli.sendRequestsRoutine(ctx, conn) + go cli.recvResponseRoutine(ctx, conn) return nil } @@ -114,17 +114,25 @@ func (cli *socketClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *socketClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() + defer cli.mtx.Unlock() cli.resCb = resCb - cli.mtx.Unlock() } //---------------------------------------- -func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { +func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer) { bw := bufio.NewWriter(conn) for { select { + case <-ctx.Done(): + return + case <-cli.Quit(): + return case reqres := <-cli.reqQueue: + if ctx.Err() != nil { + return + } + if reqres.C.Err() != nil { cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) continue @@ -139,16 +147,16 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { cli.stopForError(fmt.Errorf("flush buffer: %w", err)) return } - - case <-cli.Quit(): - return } } } -func (cli *socketClient) recvResponseRoutine(conn io.Reader) { +func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader) { r := bufio.NewReader(conn) for { + if ctx.Err() != nil { + return + } var res = &types.Response{} err := types.ReadMessage(r, res) if err != nil { diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 0e2fdffa3..a3469ddd1 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -18,30 +18,21 @@ import ( "github.com/tendermint/tendermint/libs/service" ) -var ctx = context.Background() - func TestProperSyncCalls(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := slowApp{} logger := log.TestingLogger() - s, c := setupClientServer(t, logger, app) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) + _, c := setupClientServer(ctx, t, logger, app) resp := make(chan error, 1) go func() { // This is BeginBlockSync unrolled.... reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) assert.NoError(t, err) - err = c.FlushSync(context.Background()) + err = c.FlushSync(ctx) assert.NoError(t, err) res := reqres.Response.GetBeginBlock() assert.NotNil(t, res) @@ -57,52 +48,8 @@ func TestProperSyncCalls(t *testing.T) { } } -func TestHangingSyncCalls(t *testing.T) { - app := slowApp{} - logger := log.TestingLogger() - - s, c := setupClientServer(t, logger, app) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Log(err) - } - }) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Log(err) - } - }) - - resp := make(chan error, 1) - go func() { - // Start BeginBlock and flush it - reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) - assert.NoError(t, err) - flush, err := c.FlushAsync(ctx) - assert.NoError(t, err) - // wait 20 ms for all events to travel socket, but - // no response yet from server - time.Sleep(20 * time.Millisecond) - // kill the server, so the connections break - err = s.Stop() - assert.NoError(t, err) - - // wait for the response from BeginBlock - reqres.Wait() - flush.Wait() - resp <- c.Error() - }() - - select { - case <-time.After(time.Second): - require.Fail(t, "No response arrived") - case err, ok := <-resp: - require.True(t, ok, "Must not close channel") - assert.Error(t, err, "We should get EOF error") - } -} - func setupClientServer( + ctx context.Context, t *testing.T, logger log.Logger, app types.Application, @@ -115,12 +62,15 @@ func setupClientServer( s, err := server.NewServer(logger, addr, "socket", app) require.NoError(t, err) - err = s.Start() - require.NoError(t, err) + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) c := abciclient.NewSocketClient(logger, addr, true) - err = c.Start() - require.NoError(t, err) + require.NoError(t, c.Start(ctx)) + t.Cleanup(c.Wait) + + require.True(t, s.IsRunning()) + require.True(t, c.IsRunning()) return s, c } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 3ffc7dbfa..783c41dbb 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -2,18 +2,18 @@ package main import ( "bufio" - "context" "encoding/hex" "errors" "fmt" "io" "os" + "os/signal" "strings" + "syscall" "github.com/spf13/cobra" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" @@ -29,8 +29,6 @@ import ( var ( client abciclient.Client logger log.Logger - - ctx = context.Background() ) // flags @@ -71,7 +69,8 @@ var RootCmd = &cobra.Command{ if err != nil { return err } - if err := client.Start(); err != nil { + + if err := client.Start(cmd.Context()); err != nil { return err } } @@ -291,23 +290,24 @@ func compose(fs []func() error) error { } func cmdTest(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() return compose( []func() error{ - func() error { return servertest.InitChain(client) }, - func() error { return servertest.Commit(client, nil) }, - func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) }, - func() error { return servertest.Commit(client, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) }, - func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, - func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, + func() error { return servertest.InitChain(ctx, client) }, + func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte("abc"), code.CodeTypeBadNonce, nil) }, + func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeOK, nil) }, + func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x01}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, func() error { - return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, + func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, }) } @@ -442,13 +442,15 @@ func cmdEcho(cmd *cobra.Command, args []string) error { if len(args) > 0 { msg = args[0] } - res, err := client.EchoSync(ctx, msg) + res, err := client.EchoSync(cmd.Context(), msg) if err != nil { return err } + printResponse(cmd, args, response{ Data: []byte(res.Message), }) + return nil } @@ -458,7 +460,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { if len(args) == 1 { version = args[0] } - res, err := client.InfoSync(ctx, types.RequestInfo{Version: version}) + res, err := client.InfoSync(cmd.Context(), types.RequestInfo{Version: version}) if err != nil { return err } @@ -483,7 +485,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) + res, err := client.DeliverTxSync(cmd.Context(), types.RequestDeliverTx{Tx: txBytes}) if err != nil { return err } @@ -509,7 +511,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTxSync(cmd.Context(), types.RequestCheckTx{Tx: txBytes}) if err != nil { return err } @@ -524,7 +526,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.CommitSync(ctx) + res, err := client.CommitSync(cmd.Context()) if err != nil { return err } @@ -549,7 +551,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.QuerySync(ctx, types.RequestQuery{ + resQuery, err := client.QuerySync(cmd.Context(), types.RequestQuery{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), @@ -590,20 +592,16 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { return err } - if err := srv.Start(); err != nil { + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + + if err := srv.Start(ctx); err != nil { return err } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - if err := srv.Stop(); err != nil { - logger.Error("Error while stopping server", "err", err) - } - }) - // Run forever. - select {} + <-ctx.Done() + return nil } //-------------------------------------------------------------------------------- diff --git a/abci/example/example_test.go b/abci/example/example_test.go index c984fa2fb..80d5a3130 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -29,21 +29,29 @@ func init() { } func TestKVStore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fmt.Println("### Testing KVStore") - testStream(t, kvstore.NewApplication()) + testStream(ctx, t, kvstore.NewApplication()) } func TestBaseApp(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() fmt.Println("### Testing BaseApp") - testStream(t, types.NewBaseApplication()) + testStream(ctx, t, types.NewBaseApplication()) } func TestGRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fmt.Println("### Testing GRPC") - testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication())) + testGRPCSync(ctx, t, types.NewGRPCApplication(types.NewBaseApplication())) } -func testStream(t *testing.T, app types.Application) { +func testStream(ctx context.Context, t *testing.T, app types.Application) { t.Helper() const numDeliverTxs = 20000 @@ -53,25 +61,16 @@ func testStream(t *testing.T, app types.Application) { logger := log.TestingLogger() // Start the listener server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) - - err := server.Start() + t.Cleanup(server.Wait) + err := server.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) // Connect to the socket client := abciclient.NewSocketClient(log.TestingLogger().With("module", "abci-client"), socket, false) + t.Cleanup(client.Wait) - err = client.Start() + err = client.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) done := make(chan struct{}) counter := 0 @@ -100,8 +99,6 @@ func testStream(t *testing.T, app types.Application) { } }) - ctx := context.Background() - // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request @@ -129,7 +126,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { +func testGRPCSync(ctx context.Context, t *testing.T, app types.ABCIApplicationServer) { numDeliverTxs := 2000 socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) @@ -138,15 +135,11 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { // Start the listener server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) - if err := server.Start(); err != nil { + if err := server.Start(ctx); err != nil { t.Fatalf("Error starting GRPC server: %v", err.Error()) } - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { server.Wait() }) // Connect to the socket conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index e64e0ed9e..664e628b0 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -24,8 +24,6 @@ const ( testValue = "def" ) -var ctx = context.Background() - func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { req := types.RequestDeliverTx{Tx: tx} ar := app.DeliverTx(req) @@ -229,101 +227,103 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { } } -func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { +func makeSocketClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) - if err := server.Start(); err != nil { + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } // Connect to the socket client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) - if err := client.Start(); err != nil { - if err = server.Stop(); err != nil { - return nil, nil, err - } + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { +func makeGRPCClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() gapp := types.NewGRPCApplication(app) server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp) - if err := server.Start(); err != nil { + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true) - if err := client.Start(); err != nil { - if err := server.Stop(); err != nil { - return nil, nil, err - } + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } func TestClientServer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.TestingLogger() + // set up socket app kvstore := NewApplication() - client, server, err := makeSocketClientServer(kvstore, "kvstore-socket") + client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket") require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); server.Wait() }) + t.Cleanup(func() { cancel(); client.Wait() }) - runClientTests(t, client) + runClientTests(ctx, t, client) // set up grpc app kvstore = NewApplication() - gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc") + gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc") require.NoError(t, err) - t.Cleanup(func() { - if err := gserver.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := gclient.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); gserver.Wait() }) + t.Cleanup(func() { cancel(); gclient.Wait() }) - runClientTests(t, gclient) + runClientTests(ctx, t, gclient) } -func runClientTests(t *testing.T, client abciclient.Client) { +func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) { // run some tests.... key := testKey value := key tx := []byte(key) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) value = testValue tx = []byte(key + "=" + value) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) } -func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) { +func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) { ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 6d22f43c1..78da22cdb 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -1,6 +1,7 @@ package server import ( + "context" "net" "google.golang.org/grpc" @@ -36,7 +37,7 @@ func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicatio } // OnStart starts the gRPC service. -func (s *GRPCServer) OnStart() error { +func (s *GRPCServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index b24b38e38..29d912671 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -2,6 +2,7 @@ package server import ( "bufio" + "context" "fmt" "io" "net" @@ -44,14 +45,14 @@ func NewSocketServer(logger tmlog.Logger, protoAddr string, app types.Applicatio return s } -func (s *SocketServer) OnStart() error { +func (s *SocketServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } s.listener = ln - go s.acceptConnectionsRoutine() + go s.acceptConnectionsRoutine(ctx) return nil } @@ -63,6 +64,7 @@ func (s *SocketServer) OnStop() { s.connsMtx.Lock() defer s.connsMtx.Unlock() + for id, conn := range s.conns { delete(s.conns, id) if err := conn.Close(); err != nil { @@ -96,8 +98,13 @@ func (s *SocketServer) rmConn(connID int) error { return conn.Close() } -func (s *SocketServer) acceptConnectionsRoutine() { +func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) { for { + if ctx.Err() != nil { + return + + } + // Accept a connection s.Logger.Info("Waiting for new connection...") conn, err := s.listener.Accept() @@ -117,35 +124,46 @@ func (s *SocketServer) acceptConnectionsRoutine() { responses := make(chan *types.Response, 1000) // A channel to buffer responses // Read requests from conn and deal with them - go s.handleRequests(closeConn, conn, responses) + go s.handleRequests(ctx, closeConn, conn, responses) // Pull responses from 'responses' and write them to conn. - go s.handleResponses(closeConn, conn, responses) + go s.handleResponses(ctx, closeConn, conn, responses) // Wait until signal to close connection - go s.waitForClose(closeConn, connID) + go s.waitForClose(ctx, closeConn, connID) } } -func (s *SocketServer) waitForClose(closeConn chan error, connID int) { - err := <-closeConn - switch { - case err == io.EOF: - s.Logger.Error("Connection was closed by client") - case err != nil: - s.Logger.Error("Connection error", "err", err) - default: - // never happens - s.Logger.Error("Connection was closed") - } +func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) { + defer func() { + // Close the connection + if err := s.rmConn(connID); err != nil { + s.Logger.Error("Error closing connection", "err", err) + } + }() - // Close the connection - if err := s.rmConn(connID); err != nil { - s.Logger.Error("Error closing connection", "err", err) + select { + case <-ctx.Done(): + return + case err := <-closeConn: + switch { + case err == io.EOF: + s.Logger.Error("Connection was closed by client") + case err != nil: + s.Logger.Error("Connection error", "err", err) + default: + // never happens + s.Logger.Error("Connection was closed") + } } } // Read requests from conn and deal with them -func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) { +func (s *SocketServer) handleRequests( + ctx context.Context, + closeConn chan error, + conn io.Reader, + responses chan<- *types.Response, +) { var count int var bufReader = bufio.NewReader(conn) @@ -163,6 +181,9 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp }() for { + if ctx.Err() != nil { + return + } var req = &types.Request{} err := types.ReadMessage(bufReader, req) @@ -229,7 +250,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types } // Pull responses from 'responses' and write them to conn. -func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { +func (s *SocketServer) handleResponses( + ctx context.Context, + closeConn chan error, + conn io.Writer, + responses <-chan *types.Response, +) { bw := bufio.NewWriter(conn) for res := range responses { if err := types.WriteMessage(res, bw); err != nil { diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 6b4750b33..2dfa68c63 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -12,19 +13,23 @@ import ( ) func TestClientServerNoAddrPrefix(t *testing.T) { - addr := "localhost:26658" - transport := "socket" - app := kvstore.NewApplication() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const ( + addr = "localhost:26658" + transport = "socket" + ) + app := kvstore.NewApplication() logger := log.TestingLogger() server, err := abciserver.NewServer(logger, addr, transport, app) assert.NoError(t, err, "expected no error on NewServer") - err = server.Start() + err = server.Start(ctx) assert.NoError(t, err, "expected no error on server.Start") client, err := abciclientent.NewClient(logger, addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") - err = client.Start() + err = client.Start(ctx) assert.NoError(t, err, "expected no error on client.Start") } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 23adbe80d..5062083f0 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -12,9 +12,7 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" ) -var ctx = context.Background() - -func InitChain(client abciclient.Client) error { +func InitChain(ctx context.Context, client abciclient.Client) error { total := 10 vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { @@ -34,7 +32,7 @@ func InitChain(client abciclient.Client) error { return nil } -func Commit(client abciclient.Client, hashExp []byte) error { +func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error { res, err := client.CommitSync(ctx) data := res.Data if err != nil { @@ -51,7 +49,7 @@ func Commit(client abciclient.Client, hashExp []byte) error { return nil } -func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { @@ -70,7 +68,7 @@ func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp return nil } -func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index f4c9a21da..0e1894ccf 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -6,8 +6,10 @@ import ( "fmt" "net/http" "os" + "os/signal" "path/filepath" "strings" + "syscall" "time" "github.com/spf13/cobra" @@ -191,8 +193,12 @@ func runProxy(cmd *cobra.Command, args []string) error { p.Listener.Close() }) + // this might be redundant to the above, eventually. + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + logger.Info("Starting proxy...", "laddr", listenAddr) - if err := p.ListenAndServe(); err != http.ErrServerClosed { + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) } diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index 558208ab3..2cd4c966a 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -10,8 +10,7 @@ var ReplayCmd = &cobra.Command{ Use: "replay", Short: "Replay messages from WAL", RunE: func(cmd *cobra.Command, args []string) error { - return consensus.RunReplayFile(logger, config.BaseConfig, config.Consensus, false) - + return consensus.RunReplayFile(cmd.Context(), logger, config.BaseConfig, config.Consensus, false) }, } @@ -21,6 +20,6 @@ var ReplayConsoleCmd = &cobra.Command{ Use: "replay-console", Short: "Replay messages from WAL in a console", RunE: func(cmd *cobra.Command, args []string) error { - return consensus.RunReplayFile(logger, config.BaseConfig, config.Consensus, true) + return consensus.RunReplayFile(cmd.Context(), logger, config.BaseConfig, config.Consensus, true) }, } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index d5a3de04e..feffbc2d0 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -6,11 +6,12 @@ import ( "fmt" "io" "os" + "os/signal" + "syscall" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" - tmos "github.com/tendermint/tendermint/libs/os" ) var ( @@ -103,28 +104,22 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command { return err } - n, err := nodeProvider(config, logger) + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + + n, err := nodeProvider(ctx, config, logger) if err != nil { return fmt.Errorf("failed to create node: %w", err) } - if err := n.Start(); err != nil { + if err := n.Start(ctx); err != nil { return fmt.Errorf("failed to start node: %w", err) } logger.Info("started node", "node", n.String()) - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - if n.IsRunning() { - if err := n.Stop(); err != nil { - logger.Error("unable to stop the node", "error", err) - } - } - }) - - // Run forever. - select {} + <-ctx.Done() + return nil }, } diff --git a/config/db.go b/config/db.go index 8f489a87a..f508354e0 100644 --- a/config/db.go +++ b/config/db.go @@ -1,6 +1,8 @@ package config import ( + "context" + dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" @@ -8,7 +10,7 @@ import ( ) // ServiceProvider takes a config and a logger and returns a ready to go Node. -type ServiceProvider func(*Config, log.Logger) (service.Service, error) +type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error) // DBContext specifies config information for loading a new DB. type DBContext struct { diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md index 6fa77a609..608978207 100644 --- a/docs/architecture/adr-006-trust-metric.md +++ b/docs/architecture/adr-006-trust-metric.md @@ -178,7 +178,7 @@ type TrustMetricStore struct { } // OnStart implements Service -func (tms *TrustMetricStore) OnStart() error {} +func (tms *TrustMetricStore) OnStart(context.Context) error { return nil } // OnStop implements Service func (tms *TrustMetricStore) OnStop() {} diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index 66ed24a79..6f06c9883 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -1,6 +1,7 @@ package blocksync import ( + "context" "errors" "fmt" "math" @@ -116,15 +117,15 @@ func NewBlockPool( // OnStart implements service.Service by spawning requesters routine and recording // pool's start time. -func (pool *BlockPool) OnStart() error { +func (pool *BlockPool) OnStart(ctx context.Context) error { pool.lastAdvance = time.Now() pool.lastHundredBlockTimeStamp = pool.lastAdvance - go pool.makeRequestersRoutine() + go pool.makeRequestersRoutine(ctx) return nil } // spawns requesters as needed -func (pool *BlockPool) makeRequestersRoutine() { +func (pool *BlockPool) makeRequestersRoutine(ctx context.Context) { for { if !pool.IsRunning() { break @@ -144,7 +145,7 @@ func (pool *BlockPool) makeRequestersRoutine() { pool.removeTimedoutPeers() default: // request for more blocks. - pool.makeNextRequester() + pool.makeNextRequester(ctx) } } } @@ -397,7 +398,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { return nil } -func (pool *BlockPool) makeNextRequester() { +func (pool *BlockPool) makeNextRequester(ctx context.Context) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -411,7 +412,7 @@ func (pool *BlockPool) makeNextRequester() { pool.requesters[nextHeight] = request atomic.AddInt32(&pool.numPending, 1) - err := request.Start() + err := request.Start(ctx) if err != nil { request.Logger.Error("Error starting request", "err", err) } @@ -570,7 +571,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester { return bpr } -func (bpr *bpRequester) OnStart() error { +func (bpr *bpRequester) OnStart(ctx context.Context) error { go bpr.requestRoutine() return nil } diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index b53699f97..0718fee16 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -1,6 +1,7 @@ package blocksync import ( + "context" "fmt" mrand "math/rand" "testing" @@ -78,22 +79,20 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { } func TestBlockPoolBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) - err := pool.Start() - if err != nil { + if err := pool.Start(ctx); err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) peers.start() defer peers.stop() @@ -137,20 +136,19 @@ func TestBlockPoolBasic(t *testing.T) { } func TestBlockPoolTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) - err := pool.Start() + err := pool.Start(ctx) if err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) for _, peer := range peers { t.Logf("Peer %v", peer.id) @@ -199,6 +197,9 @@ func TestBlockPoolTimeout(t *testing.T) { } func TestBlockPoolRemovePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peers := make(testPeers, 10) for i := 0; i < 10; i++ { peerID := types.NodeID(fmt.Sprintf("%d", i+1)) @@ -209,13 +210,9 @@ func TestBlockPoolRemovePeer(t *testing.T) { errorsCh := make(chan peerError) pool := NewBlockPool(log.TestingLogger(), 1, requestsCh, errorsCh) - err := pool.Start() + err := pool.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) // add peers for peerID, peer := range peers { diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index f18ed86b7..a6845b719 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -1,6 +1,7 @@ package blocksync import ( + "context" "fmt" "runtime/debug" "sync" @@ -49,7 +50,7 @@ func GetChannelDescriptor() *p2p.ChannelDescriptor { type consensusReactor interface { // For when we switch from block sync reactor to the consensus // machine. - SwitchToConsensus(state sm.State, skipWAL bool) + SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) } type peerError struct { @@ -151,9 +152,9 @@ func NewReactor( // // If blockSync is enabled, we also start the pool and the pool processing // goroutine. If the pool fails to start, an error is returned. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { if r.blockSync.IsSet() { - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } r.poolWG.Add(1) @@ -362,12 +363,12 @@ func (r *Reactor) processPeerUpdates() { // SwitchToBlockSync is called by the state sync reactor when switching to fast // sync. -func (r *Reactor) SwitchToBlockSync(state sm.State) error { +func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error { r.blockSync.Set() r.initialState = state r.pool.height = state.LastBlockHeight + 1 - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } @@ -423,6 +424,17 @@ func (r *Reactor) requestRoutine() { } } +func (r *Reactor) stopCtx() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-r.closeCh + cancel() + }() + + return ctx +} + // poolRoutine handles messages from the poolReactor telling the reactor what to // do. // @@ -441,6 +453,7 @@ func (r *Reactor) poolRoutine(stateSynced bool) { lastRate = 0.0 didProcessCh = make(chan struct{}, 1) + ctx = r.stopCtx() ) defer trySyncTicker.Stop() @@ -488,7 +501,7 @@ FOR_LOOP: r.blockSync.UnSet() if r.consReactor != nil { - r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) + r.consReactor.SwitchToConsensus(ctx, state, blocksSynced > 0 || stateSynced) } break FOR_LOOP diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 2b567aae7..5792d9e78 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -1,6 +1,7 @@ package blocksync import ( + "context" "os" "testing" "time" @@ -41,6 +42,7 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -49,13 +51,16 @@ func setup( ) *reactorTestSuite { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + numNodes := len(maxBlockHeights) require.True(t, numNodes >= 1, "must specify at least one block height (nodes)") rts := &reactorTestSuite{ logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), app: make(map[types.NodeID]proxy.AppConns, numNodes), @@ -70,17 +75,19 @@ func setup( i := 0 for nodeID := range rts.network.Nodes { - rts.addNode(t, nodeID, genDoc, privVal, maxBlockHeights[i]) + rts.addNode(ctx, t, nodeID, genDoc, privVal, maxBlockHeights[i]) i++ } t.Cleanup(func() { + cancel() for _, nodeID := range rts.nodes { rts.peerUpdates[nodeID].Close() if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.NoError(t, rts.app[nodeID].Stop()) + rts.reactors[nodeID].Wait() + rts.app[nodeID].Wait() + require.False(t, rts.reactors[nodeID].IsRunning()) } } @@ -89,7 +96,9 @@ func setup( return rts } -func (rts *reactorTestSuite) addNode(t *testing.T, +func (rts *reactorTestSuite) addNode( + ctx context.Context, + t *testing.T, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -101,7 +110,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, rts.nodes = append(rts.nodes, nodeID) rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics()) - require.NoError(t, rts.app[nodeID].Start()) + require.NoError(t, rts.app[nodeID].Start(ctx)) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() @@ -170,7 +179,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, consensus.NopMetrics()) require.NoError(t, err) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) } @@ -184,6 +193,9 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot("block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -191,7 +203,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) { genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(64) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) @@ -220,6 +232,9 @@ func TestReactor_AbruptDisconnect(t *testing.T) { } func TestReactor_SyncTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot("block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -227,7 +242,7 @@ func TestReactor_SyncTime(t *testing.T) { genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(101) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) rts.start(t) @@ -244,6 +259,9 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot("block_sync_reactor_test") require.NoError(t, err) @@ -252,7 +270,7 @@ func TestReactor_NoBlockResponse(t *testing.T) { genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(65) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) @@ -293,6 +311,9 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot("block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -300,7 +321,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { maxBlockHeight := int64(48) genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) @@ -333,11 +354,11 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // XXX: This causes a potential race condition. // See: https://github.com/tendermint/tendermint/issues/6005 otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30) - newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{ + newNode := rts.network.MakeNode(ctx, t, p2ptest.NodeOptions{ MaxPeers: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1), }) - rts.addNode(t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) + rts.addNode(ctx, t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) // add a fake peer just so we do not wait for the consensus ticker to timeout rts.reactors[newNode.NodeID].pool.SetPeerRange("00ff", 10, 10) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index df11067b9..d9e5e46b8 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -31,6 +31,9 @@ import ( // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) nValidators := 4 @@ -93,7 +96,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { cs.SetPrivValidator(pv) eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) - err = eventBus.Start() + err = eventBus.Start(ctx) require.NoError(t, err) cs.SetEventBus(eventBus) @@ -103,7 +106,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { }() } - rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock var bzNodeID types.NodeID @@ -211,7 +214,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID) p := proposal.ToProto() - if err := lazyNodeState.privValidator.SignProposal(context.Background(), lazyNodeState.state.ChainID, p); err == nil { + if err := lazyNodeState.privValidator.SignProposal(ctx, lazyNodeState.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue @@ -229,15 +232,13 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // Evidence should be submitted and committed at the third height but // we will check the first six just in case evidenceFromEachValidator := make([]types.Evidence, nValidators) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup i := 0 for _, sub := range rts.subs { @@ -246,6 +247,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { go func(j int, s eventbus.Subscription) { defer wg.Done() for { + if ctx.Err() != nil { + return + } + msg, err := s.Next(ctx) if !assert.NoError(t, err) { cancel() @@ -265,7 +270,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { wg.Wait() - pubkey, err := bzNodeState.privValidator.GetPubKey(context.Background()) + pubkey, err := bzNodeState.privValidator.GetPubKey(ctx) require.NoError(t, err) for idx, ev := range evidenceFromEachValidator { @@ -311,7 +316,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // eventBus.SetLogger(logger.With("module", "events", "validator", i)) // var err error - // blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + // blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock) // require.NoError(t, err) // conR := NewReactor(states[i], true) // so we don't start the consensus states diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 780ec8804..bcbbe7c88 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -106,12 +106,14 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida } func (vs *validatorStub) signVote( + ctx context.Context, cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader) (*types.Vote, error) { + header types.PartSetHeader, +) (*types.Vote, error) { - pubKey, err := vs.PrivValidator.GetPubKey(context.Background()) + pubKey, err := vs.PrivValidator.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -126,7 +128,7 @@ func (vs *validatorStub) signVote( BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() - if err := vs.PrivValidator.SignVote(context.Background(), cfg.ChainID(), v); err != nil { + if err := vs.PrivValidator.SignVote(ctx, cfg.ChainID(), v); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } @@ -144,13 +146,15 @@ func (vs *validatorStub) signVote( // Sign vote for type/hash/header func signVote( + ctx context.Context, vs *validatorStub, cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader) *types.Vote { + header types.PartSetHeader, +) *types.Vote { - v, err := vs.signVote(cfg, voteType, hash, header) + v, err := vs.signVote(ctx, cfg, voteType, hash, header) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) } @@ -161,6 +165,7 @@ func signVote( } func signVotes( + ctx context.Context, cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, @@ -168,7 +173,7 @@ func signVotes( vss ...*validatorStub) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, cfg, voteType, hash, header) + votes[i] = signVote(ctx, vs, cfg, voteType, hash, header) } return votes } @@ -192,11 +197,11 @@ func (vss ValidatorStubsByPower) Len() int { } func (vss ValidatorStubsByPower) Less(i, j int) bool { - vssi, err := vss[i].GetPubKey(context.Background()) + vssi, err := vss[i].GetPubKey(context.TODO()) if err != nil { panic(err) } - vssj, err := vss[j].GetPubKey(context.Background()) + vssj, err := vss[j].GetPubKey(context.TODO()) if err != nil { panic(err) } @@ -218,13 +223,14 @@ func (vss ValidatorStubsByPower) Swap(i, j int) { //------------------------------------------------------------------------------- // Functions for transitioning the consensus state -func startTestRound(cs *State, height int64, round int32) { +func startTestRound(ctx context.Context, cs *State, height int64, round int32) { cs.enterNewRound(height, round) - cs.startRoutines(0) + cs.startRoutines(ctx, 0) } // Create proposal block from cs1 but sign it with vs. func decideProposal( + ctx context.Context, cs1 *State, vs *validatorStub, height int64, @@ -243,7 +249,7 @@ func decideProposal( polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal = types.NewProposal(height, round, polRound, propBlockID) p := proposal.ToProto() - if err := vs.SignProposal(context.Background(), chainID, p); err != nil { + if err := vs.SignProposal(ctx, chainID, p); err != nil { panic(err) } @@ -259,6 +265,7 @@ func addVotes(to *State, votes ...*types.Vote) { } func signAddVotes( + ctx context.Context, cfg *config.Config, to *State, voteType tmproto.SignedMsgType, @@ -266,13 +273,19 @@ func signAddVotes( header types.PartSetHeader, vss ...*validatorStub, ) { - votes := signVotes(cfg, voteType, hash, header, vss...) - addVotes(to, votes...) + addVotes(to, signVotes(ctx, cfg, voteType, hash, header, vss...)...) } -func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { +func validatePrevote( + ctx context.Context, + t *testing.T, + cs *State, + round int32, + privVal *validatorStub, + blockHash []byte, +) { prevotes := cs.Votes.Prevotes(round) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pubKey.Address() var vote *types.Vote @@ -290,9 +303,9 @@ func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStu } } -func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { +func validateLastPrecommit(ctx context.Context, t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { votes := cs.LastCommit - pv, err := privVal.GetPubKey(context.Background()) + pv, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() var vote *types.Vote @@ -305,6 +318,7 @@ func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, bloc } func validatePrecommit( + ctx context.Context, t *testing.T, cs *State, thisRound, @@ -314,7 +328,7 @@ func validatePrecommit( lockedBlockHash []byte, ) { precommits := cs.Votes.Precommits(thisRound) - pv, err := privVal.GetPubKey(context.Background()) + pv, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() var vote *types.Vote @@ -353,6 +367,7 @@ func validatePrecommit( } func validatePrevoteAndPrecommit( + ctx context.Context, t *testing.T, cs *State, thisRound, @@ -362,18 +377,18 @@ func validatePrevoteAndPrecommit( lockedBlockHash []byte, ) { // verify the prevote - validatePrevote(t, cs, thisRound, privVal, votedBlockHash) + validatePrevote(ctx, t, cs, thisRound, privVal, votedBlockHash) // verify precommit cs.mtx.Lock() - validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) - cs.mtx.Unlock() + defer cs.mtx.Unlock() + validatePrecommit(ctx, t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) } -func subscribeToVoter(t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message { +func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message { t.Helper() ch := make(chan tmpubsub.Message, 1) - if err := cs.eventBus.Observe(context.Background(), func(msg tmpubsub.Message) error { + if err := cs.eventBus.Observe(ctx, func(msg tmpubsub.Message) error { vote := msg.Data().(types.EventDataVote) // we only fire for our own votes if bytes.Equal(addr, vote.Vote.ValidatorAddress) { @@ -389,27 +404,34 @@ func subscribeToVoter(t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Mess //------------------------------------------------------------------------------- // consensus states -func newState(logger log.Logger, state sm.State, pv types.PrivValidator, app abci.Application) (*State, error) { +func newState( + ctx context.Context, + logger log.Logger, + state sm.State, + pv types.PrivValidator, + app abci.Application, +) (*State, error) { cfg, err := config.ResetTestRoot("consensus_state_test") if err != nil { return nil, err } - return newStateWithConfig(logger, cfg, state, pv, app), nil + return newStateWithConfig(ctx, logger, cfg, state, pv, app), nil } func newStateWithConfig( + ctx context.Context, logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, ) *State { - blockStore := store.NewBlockStore(dbm.NewMemDB()) - return newStateWithConfigAndBlockStore(logger, thisConfig, state, pv, app, blockStore) + return newStateWithConfigAndBlockStore(ctx, logger, thisConfig, state, pv, app, store.NewBlockStore(dbm.NewMemDB())) } func newStateWithConfigAndBlockStore( + ctx context.Context, logger log.Logger, thisConfig *config.Config, state sm.State, @@ -449,7 +471,7 @@ func newStateWithConfigAndBlockStore( cs.SetPrivValidator(pv) eventBus := eventbus.NewDefault(logger.With("module", "events")) - err := eventBus.Start() + err := eventBus.Start(ctx) if err != nil { panic(err) } @@ -469,13 +491,18 @@ func loadPrivValidator(cfg *config.Config) *privval.FilePV { return privValidator } -func randState(cfg *config.Config, logger log.Logger, nValidators int) (*State, []*validatorStub, error) { +func randState( + ctx context.Context, + cfg *config.Config, + logger log.Logger, + nValidators int, +) (*State, []*validatorStub, error) { // Get State state, privVals := randGenesisState(cfg, nValidators, false, 10) vss := make([]*validatorStub, nValidators) - cs, err := newState(logger, state, privVals[0], kvstore.NewApplication()) + cs, err := newState(ctx, logger, state, privVals[0], kvstore.NewApplication()) if err != nil { return nil, nil, err } @@ -719,6 +746,7 @@ func consensusLogger() log.Logger { } func randConsensusState( + ctx context.Context, t *testing.T, cfg *config.Config, nValidators int, @@ -761,7 +789,7 @@ func randConsensusState( app.InitChain(abci.RequestInitChain{Validators: vals}) l := logger.With("validator", i, "module", "consensus") - css[i] = newStateWithConfigAndBlockStore(l, thisConfig, state, privVals[i], app, blockStore) + css[i] = newStateWithConfigAndBlockStore(ctx, l, thisConfig, state, privVals[i], app, blockStore) css[i].SetTimeoutTicker(tickerFunc()) } @@ -777,6 +805,7 @@ func randConsensusState( // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers( + ctx context.Context, cfg *config.Config, nValidators, nPeers int, @@ -830,7 +859,7 @@ func randConsensusNetWithPeers( app.InitChain(abci.RequestInitChain{Validators: vals}) // sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above - css[i] = newStateWithConfig(logger.With("validator", i, "module", "consensus"), thisConfig, state, privVal, app) + css[i] = newStateWithConfig(ctx, logger.With("validator", i, "module", "consensus"), thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) } return css, genDoc, peer0Config, func() { @@ -870,7 +899,7 @@ type mockTicker struct { fired bool } -func (m *mockTicker) Start() error { +func (m *mockTicker) Start(context.Context) error { return nil } diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index fae89f227..fc872a9fa 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -17,10 +17,13 @@ import ( ) func TestReactorInvalidPrecommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) n := 4 - states, cleanup := randConsensusState(t, + states, cleanup := randConsensusState(ctx, t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) @@ -30,11 +33,11 @@ func TestReactorInvalidPrecommit(t *testing.T) { states[i].SetTimeoutTicker(ticker) } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // this val sends a random precommit at each height @@ -48,7 +51,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { byzState.mtx.Lock() privVal := byzState.privValidator byzState.doPrevote = func(height int64, round int32) { - invalidDoPrevoteFunc(t, height, round, byzState, byzReactor, privVal) + invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal) } byzState.mtx.Unlock() @@ -56,8 +59,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { // // TODO: Make this tighter by ensuring the halt happens by block 2. var wg sync.WaitGroup - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + for i := 0; i < 10; i++ { for _, sub := range rts.subs { wg.Add(1) @@ -75,7 +77,15 @@ func TestReactorInvalidPrecommit(t *testing.T) { wg.Wait() } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv types.PrivValidator) { +func invalidDoPrevoteFunc( + ctx context.Context, + t *testing.T, + height int64, + round int32, + cs *State, + r *Reactor, + pv types.PrivValidator, +) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -84,7 +94,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r cs.mtx.Lock() cs.privValidator = pv - pubKey, err := cs.privValidator.GetPubKey(context.Background()) + pubKey, err := cs.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() @@ -105,7 +115,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r } p := precommit.ToProto() - err = cs.privValidator.SignVote(context.Background(), cs.state.ChainID, p) + err = cs.privValidator.SignVote(ctx, cs.state.ChainID, p) require.NoError(t, err) precommit.Signature = p.Signature diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 78f42f993..e24cb1160 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -27,6 +27,9 @@ func assertMempool(txn txNotifier) mempool.Mempool { } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + baseConfig := configSetup(t) config, err := ResetConfig("consensus_mempool_txs_available_test") @@ -35,15 +38,15 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, height, round) ensureNewEventOnChannel(newBlockCh) // first block gets committed ensureNoNewEventOnChannel(newBlockCh) - deliverTxsRange(cs, 0, 1) + deliverTxsRange(ctx, cs, 0, 1) ensureNewEventOnChannel(newBlockCh) // commit txs ensureNewEventOnChannel(newBlockCh) // commit updated app hash ensureNoNewEventOnChannel(newBlockCh) @@ -51,6 +54,8 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config, err := ResetConfig("consensus_mempool_txs_available_test") require.NoError(t, err) @@ -58,12 +63,12 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() - newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, cs.Height, cs.Round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, cs.Height, cs.Round) ensureNewEventOnChannel(newBlockCh) // first block gets committed ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ... @@ -72,6 +77,8 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { func TestMempoolProgressInHigherRound(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config, err := ResetConfig("consensus_mempool_txs_available_test") require.NoError(t, err) @@ -79,12 +86,12 @@ func TestMempoolProgressInHigherRound(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) + cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(t, cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(t, cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(t, cs.eventBus, types.EventQueryTimeoutPropose) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) cs.setProposal = func(proposal *types.Proposal) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and @@ -94,7 +101,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { } return cs.defaultSetProposal(proposal) } - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) ensureNewRound(newRoundCh, height, round) // first round at first height ensureNewEventOnChannel(newBlockCh) // first block gets committed @@ -103,7 +110,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { round = 0 ensureNewRound(newRoundCh, height, round) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + deliverTxsRange(ctx, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) round++ // moving to the next round @@ -111,12 +118,12 @@ func TestMempoolProgressInHigherRound(t *testing.T) { ensureNewEventOnChannel(newBlockCh) // now we can commit the block } -func deliverTxsRange(cs *State, start, end int) { +func deliverTxsRange(ctx context.Context, cs *State, start, end int) { // Deliver some txs. for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) + err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{}) if err != nil { panic(fmt.Sprintf("Error after CheckTx: %v", err)) } @@ -124,6 +131,9 @@ func deliverTxsRange(cs *State, start, end int) { } func TestMempoolTxConcurrentWithCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) logger := log.TestingLogger() state, privVals := randGenesisState(config, 1, false, 10) @@ -131,16 +141,17 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( + ctx, logger, config, state, privVals[0], NewCounterApplication(), blockStore) err := stateStore.Save(state) require.NoError(t, err) - newBlockHeaderCh := subscribe(t, cs.eventBus, types.EventQueryNewBlockHeader) + newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 - go deliverTxsRange(cs, 0, int(numTxs)) + go deliverTxsRange(ctx, cs, 0, int(numTxs)) - startTestRound(cs, cs.Height, cs.Round) + startTestRound(ctx, cs, cs.Height, cs.Round) for n := int64(0); n < numTxs; { select { case msg := <-newBlockHeaderCh: @@ -154,12 +165,14 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() state, privVals := randGenesisState(config, 1, false, 10) app := NewCounterApplication() stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(log.TestingLogger(), config, state, privVals[0], app, blockStore) + cs := newStateWithConfigAndBlockStore(ctx, log.TestingLogger(), config, state, privVals[0], app, blockStore) err := stateStore.Save(state) require.NoError(t, err) @@ -179,7 +192,7 @@ func TestMempoolRmBadTx(t *testing.T) { // Try to send the tx through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, func(r *abci.Response) { + err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.Response) { if r.GetCheckTx().Code != code.CodeTypeBadNonce { t.Errorf("expected checktx to return bad nonce, got %v", r) return diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index a28f54bf9..03865d13d 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "fmt" "runtime/debug" "time" @@ -85,7 +86,7 @@ type ReactorOption func(*Reactor) // NOTE: Temporary interface for switching to block sync, we should get rid of v0. // See: https://github.com/tendermint/tendermint/issues/4595 type BlockSyncReactor interface { - SwitchToBlockSync(sm.State) error + SwitchToBlockSync(context.Context, sm.State) error GetMaxPeerBlockHeight() int64 @@ -174,7 +175,7 @@ func NewReactor( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { r.Logger.Debug("consensus wait sync", "wait_sync", r.WaitSync()) // start routine that computes peer statistics for evaluating peer quality @@ -186,7 +187,7 @@ func (r *Reactor) OnStart() error { r.subscribeToBroadcastEvents() if !r.WaitSync() { - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { return err } } @@ -264,7 +265,7 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { // SwitchToConsensus switches from block-sync mode to consensus mode. It resets // the state, turns off block-sync, and starts the consensus state-machine. -func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { +func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) { r.Logger.Info("switching to consensus") // we have no votes, so reconstruct LastCommit from SeenCommit @@ -287,7 +288,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { r.state.doWALCatchup = false } - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { panic(fmt.Sprintf(`failed to start consensus state: %v conS: diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 8fc562b06..de6465f23 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -59,11 +59,17 @@ func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { } } -func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSuite { +func setup( + ctx context.Context, + t *testing.T, + numNodes int, + states []*State, + size int, +) *reactorTestSuite { t.Helper() rts := &reactorTestSuite{ - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), states: make(map[types.NodeID]*State), reactors: make(map[types.NodeID]*Reactor, numNodes), subs: make(map[types.NodeID]eventbus.Subscription, numNodes), @@ -75,7 +81,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel, size)) rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel, size)) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) // Canceled during cleanup (see below). i := 0 @@ -89,7 +95,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts.dataChannels[nodeID], rts.voteChannels[nodeID], rts.voteSetBitsChannels[nodeID], - node.MakePeerUpdates(t), + node.MakePeerUpdates(ctx, t), true, ) @@ -119,7 +125,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu require.NoError(t, state.blockExec.Store().Save(state.state)) } - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) i++ @@ -131,14 +137,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts.network.Start(t) t.Cleanup(func() { - for nodeID, r := range rts.reactors { - require.NoError(t, rts.states[nodeID].eventBus.Stop()) - require.NoError(t, r.Stop()) - require.False(t, r.IsRunning()) - } - - leaktest.Check(t) cancel() + leaktest.Check(t) }) return rts @@ -162,6 +162,7 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error { } func waitForAndValidateBlock( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, @@ -169,8 +170,9 @@ func waitForAndValidateBlock( states []*State, txs ...[]byte, ) { + t.Helper() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(bctx) defer cancel() fn := func(j int) { msg, err := blocksSubs[j].Next(ctx) @@ -183,7 +185,7 @@ func waitForAndValidateBlock( require.NoError(t, validateBlock(newBlock, activeVals)) for _, tx := range txs { - require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{})) } } @@ -200,6 +202,7 @@ func waitForAndValidateBlock( } func waitForAndValidateBlockWithTx( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, @@ -207,8 +210,9 @@ func waitForAndValidateBlockWithTx( states []*State, txs ...[]byte, ) { + t.Helper() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(bctx) defer cancel() fn := func(j int) { ntxs := 0 @@ -249,15 +253,17 @@ func waitForAndValidateBlockWithTx( } func waitForBlockWithUpdatedValsAndValidateIt( + bctx context.Context, t *testing.T, n int, updatedVals map[string]struct{}, blocksSubs []eventbus.Subscription, css []*State, ) { - - ctx, cancel := context.WithCancel(context.Background()) + t.Helper() + ctx, cancel := context.WithCancel(bctx) defer cancel() + fn := func(j int) { var newBlock *types.Block @@ -299,23 +305,24 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he } func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := configSetup(t) n := 4 - states, cleanup := randConsensusState(t, + states, cleanup := randConsensusState(ctx, t, cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) @@ -351,6 +358,9 @@ func TestReactorBasic(t *testing.T) { } func TestReactorWithEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := configSetup(t) n := 4 @@ -416,7 +426,7 @@ func TestReactorWithEvidence(t *testing.T) { cs.SetPrivValidator(pv) eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) - require.NoError(t, eventBus.Start()) + require.NoError(t, eventBus.Start(ctx)) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) @@ -424,15 +434,13 @@ func TestReactorWithEvidence(t *testing.T) { states[i] = cs } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) @@ -456,10 +464,14 @@ func TestReactorWithEvidence(t *testing.T) { } func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState( + ctx, t, cfg, n, @@ -473,26 +485,24 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // send a tx require.NoError( t, assertMempool(states[3].txNotifier).CheckTx( - context.Background(), + ctx, []byte{1, 2, 3}, nil, mempool.TxInfo{}, ), ) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) @@ -511,23 +521,24 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } func TestReactorRecordsVotesAndBlockParts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := configSetup(t) n := 4 - states, cleanup := randConsensusState(t, + states, cleanup := randConsensusState(ctx, t, cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) @@ -575,10 +586,14 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorVotingPowerChange(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState( + ctx, t, cfg, n, @@ -589,25 +604,23 @@ func TestReactorVotingPowerChange(t *testing.T) { t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < n; i++ { - pubKey, err := states[i].privValidator.GetPubKey(context.Background()) + pubKey, err := states[i].privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() activeVals[string(addr)] = struct{}{} } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) @@ -629,7 +642,7 @@ func TestReactorVotingPowerChange(t *testing.T) { blocksSubs = append(blocksSubs, sub) } - val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) + val1PubKey, err := states[0].privValidator.GetPubKey(ctx) require.NoError(t, err) val1PubKeyABCI, err := encoding.PubKeyToProto(val1PubKey) @@ -638,10 +651,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) previousTotalVotingPower := states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, previousTotalVotingPower, states[0].GetRoundState().LastValidators.TotalVotingPower(), @@ -653,10 +666,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) previousTotalVotingPower = states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, states[0].GetRoundState().LastValidators.TotalVotingPower(), previousTotalVotingPower, @@ -667,10 +680,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) previousTotalVotingPower = states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, previousTotalVotingPower, states[0].GetRoundState().LastValidators.TotalVotingPower(), @@ -681,11 +694,15 @@ func TestReactorVotingPowerChange(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := configSetup(t) nPeers := 7 nVals := 4 states, _, _, cleanup := randConsensusNetWithPeers( + ctx, cfg, nVals, nPeers, @@ -695,11 +712,11 @@ func TestReactorValidatorSetChanges(t *testing.T) { ) t.Cleanup(cleanup) - rts := setup(t, nPeers, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nPeers, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators @@ -711,8 +728,6 @@ func TestReactorValidatorSetChanges(t *testing.T) { activeVals[string(pubKey.Address())] = struct{}{} } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() var wg sync.WaitGroup for _, sub := range rts.subs { wg.Add(1) @@ -729,7 +744,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { wg.Wait() - newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) + newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) @@ -745,24 +760,24 @@ func TestReactorValidatorSetChanges(t *testing.T) { // wait till everyone makes block 2 // ensure the commit includes all validators // send newValTx to change vals in block 3 - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) // wait till everyone makes block 3. // it includes the commit for block 2, which is by the original validator set - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) // wait till everyone makes block 4. // it includes the commit for block 3, which is by the original validator set - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) // the commits for block 4 should be with the updated validator set activeVals[string(newValidatorPubKey1.Address())] = struct{}{} // wait till everyone makes block 5 // it includes the commit for block 4, which should have the updated validator set - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) - updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) + updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) @@ -771,10 +786,10 @@ func TestReactorValidatorSetChanges(t *testing.T) { updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) previousTotalVotingPower := states[nVals].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) require.NotEqualf( t, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), previousTotalVotingPower, @@ -782,7 +797,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { previousTotalVotingPower, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), ) - newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) + newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) @@ -790,7 +805,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) + newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) @@ -798,24 +813,24 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) activeVals[string(newValidatorPubKey2.Address())] = struct{}{} activeVals[string(newValidatorPubKey3.Address())] = struct{}{} - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) delete(activeVals, string(newValidatorPubKey2.Address())) delete(activeVals, string(newValidatorPubKey3.Address())) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 563e5ca64..f40389f2b 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -237,10 +237,10 @@ func (h *Handshaker) NBlocks() int { } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { +func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo) + res, err := proxyApp.Query().InfoSync(ctx, proxy.RequestInfo) if err != nil { return fmt.Errorf("error calling Info: %v", err) } @@ -264,7 +264,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + _, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("error on replay: %v", err) } @@ -281,6 +281,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // matches the current state. // Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks( + ctx context.Context, state sm.State, appHash []byte, appBlockHeight int64, @@ -315,7 +316,7 @@ func (h *Handshaker) ReplayBlocks( Validators: nextVals, AppStateBytes: h.genDoc.AppState, } - res, err := proxyApp.Consensus().InitChainSync(context.Background(), req) + res, err := proxyApp.Consensus().InitChainSync(ctx, req) if err != nil { return nil, err } @@ -421,7 +422,7 @@ func (h *Handshaker) ReplayBlocks( if err != nil { return nil, err } - mockApp := newMockProxyApp(h.logger, appHash, abciResponses) + mockApp := newMockProxyApp(ctx, h.logger, appHash, abciResponses) h.logger.Info("Replay last block using mock app") state, err = h.replayBlock(state, storeBlockHeight, mockApp) return state.AppHash, err diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 6f1e64b2a..1de0ffa0e 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -32,17 +32,18 @@ const ( // replay the wal file func RunReplayFile( + ctx context.Context, logger log.Logger, cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool, ) error { - consensusState, err := newConsensusStateForReplay(cfg, logger, csConfig) + consensusState, err := newConsensusStateForReplay(ctx, cfg, logger, csConfig) if err != nil { return err } - if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { + if err := consensusState.ReplayFile(ctx, csConfig.WalFile(), console); err != nil { return fmt.Errorf("consensus replay: %w", err) } @@ -50,7 +51,7 @@ func RunReplayFile( } // Replay msgs in file or start the console -func (cs *State) ReplayFile(file string, console bool) error { +func (cs *State) ReplayFile(ctx context.Context, file string, console bool) error { if cs.IsRunning() { return errors.New("cs is already running, cannot replay") @@ -63,7 +64,6 @@ func (cs *State) ReplayFile(file string, console bool) error { // ensure all new step events are regenerated as expected - ctx := context.Background() newStepSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: subscriber, Query: types.EventQueryNewRoundStep, @@ -307,6 +307,7 @@ func (pb *playback) replayConsoleLoop() (int, error) { // convenience for replay mode func newConsensusStateForReplay( + ctx context.Context, cfg config.BaseConfig, logger log.Logger, csConfig *config.ConsensusConfig, @@ -339,19 +340,19 @@ func newConsensusStateForReplay( // Create proxyAppConn connection (consensus, mempool, query) clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) - err = proxyApp.Start() + err = proxyApp.Start(ctx) if err != nil { return nil, fmt.Errorf("starting proxy app conns: %w", err) } eventBus := eventbus.NewDefault(logger) - if err := eventBus.Start(); err != nil { + if err := eventBus.Start(ctx); err != nil { return nil, fmt.Errorf("failed to start event bus: %w", err) } handshaker := NewHandshaker(logger, stateStore, state, blockStore, eventBus, gdoc) - if err = handshaker.Handshake(proxyApp); err != nil { + if err = handshaker.Handshake(ctx, proxyApp); err != nil { return nil, err } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 679aba611..8672f8e1e 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -55,13 +55,18 @@ func (emptyMempool) CloseWAL() {} // Useful because we don't want to call Commit() twice for the same block on // the real app. -func newMockProxyApp(logger log.Logger, appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { +func newMockProxyApp( + ctx context.Context, + logger log.Logger, + appHash []byte, + abciResponses *tmstate.ABCIResponses, +) proxy.AppConnConsensus { clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ appHash: appHash, abciResponses: abciResponses, }) cli, _ := clientCreator(logger) - err := cli.Start() + err := cli.Start(ctx) if err != nil { panic(err) } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index f5f5d1633..bf1927742 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -56,7 +56,7 @@ import ( // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config, +func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) @@ -64,6 +64,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Co privValidator := loadPrivValidator(consensusReplayConfig) blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( + ctx, logger, consensusReplayConfig, state, @@ -75,7 +76,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Co bytes, _ := os.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) - err = cs.Start() + err = cs.Start(ctx) require.NoError(t, err) defer func() { if err := cs.Stop(); err != nil { @@ -87,12 +88,12 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Co // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new // block, or else something is wrong. - newBlockSub, err := cs.eventBus.SubscribeWithArgs(context.Background(), pubsub.SubscribeArgs{ + newBlockSub, err := cs.eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: testSubscriber, Query: types.EventQueryNewBlock, }) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + ctx, cancel := context.WithTimeout(ctx, 120*time.Second) defer cancel() _, err = newBlockSub.Next(ctx) if errors.Is(err, context.DeadlineExceeded) { @@ -109,7 +110,7 @@ func sendTxs(ctx context.Context, cs *State) { return default: tx := []byte{byte(i)} - if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { + if err := assertMempool(cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}); err != nil { panic(err) } i++ @@ -119,6 +120,9 @@ func sendTxs(ctx context.Context, cs *State) { // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. func TestWALCrash(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testCases := []struct { name string initFn func(dbm.DB, *State, context.Context) @@ -139,12 +143,12 @@ func TestWALCrash(t *testing.T) { t.Run(tc.name, func(t *testing.T) { consensusReplayConfig, err := ResetConfig(tc.name) require.NoError(t, err) - crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) + crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) } } -func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config, +func crashWALandCheckLiveness(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -164,6 +168,7 @@ LOOP: require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) cs := newStateWithConfigAndBlockStore( + ctx, logger, consensusReplayConfig, state, @@ -173,7 +178,7 @@ LOOP: ) // start sending transactions - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) initFn(stateDB, cs, ctx) // clean up WAL file from the previous iteration @@ -181,7 +186,7 @@ LOOP: os.Remove(walFile) // set crashing WAL - csWal, err := cs.OpenWAL(walFile) + csWal, err := cs.OpenWAL(ctx, walFile) require.NoError(t, err) crashingWal.next = csWal @@ -190,7 +195,7 @@ LOOP: cs.wal = crashingWal // start consensus state - err = cs.Start() + err = cs.Start(ctx) require.NoError(t, err) i++ @@ -200,7 +205,7 @@ LOOP: t.Logf("WAL panicked: %v", err) // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) + startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore) // stop consensus state and transactions sender (initFn) cs.Stop() //nolint:errcheck // Logging this error causes failure @@ -286,9 +291,9 @@ func (w *crashingWAL) SearchForEndHeight( return w.next.SearchForEndHeight(height, options) } -func (w *crashingWAL) Start() error { return w.next.Start() } -func (w *crashingWAL) Stop() error { return w.next.Stop() } -func (w *crashingWAL) Wait() { w.next.Wait() } +func (w *crashingWAL) Start(ctx context.Context) error { return w.next.Start(ctx) } +func (w *crashingWAL) Stop() error { return w.next.Stop() } +func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ type simulatorTestSuite struct { @@ -316,7 +321,7 @@ const ( var modes = []uint{0, 1, 2, 3} // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay -func setupSimulator(t *testing.T) *simulatorTestSuite { +func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { t.Helper() cfg := configSetup(t) @@ -329,6 +334,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { nVals := 4 css, genDoc, cfg, cleanup := randConsensusNetWithPeers( + ctx, cfg, nVals, nPeers, @@ -341,8 +347,8 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(t, css[0].eventBus, types.EventQueryNewRound) - proposalCh := subscribe(t, css[0].eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryCompleteProposal) vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { @@ -351,13 +357,13 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height, round := css[0].Height, css[0].Round // start the machine - startTestRound(css[0], height, round) + startTestRound(ctx, css[0], height, round) incrementHeight(vss...) ensureNewRound(newRoundCh, height, 0) ensureNewProposal(proposalCh, height, round) rs := css[0].GetRoundState() - signAddVotes(sim.Config, css[0], tmproto.PrecommitType, + signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) @@ -366,12 +372,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // HEIGHT 2 height++ incrementHeight(vss...) - newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempool.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) @@ -379,7 +385,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal := types.NewProposal(vss[1].Height, round, -1, blockID) p := proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -390,7 +396,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { } ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(sim.Config, css[0], tmproto.PrecommitType, + signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) @@ -398,12 +404,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // HEIGHT 3 height++ incrementHeight(vss...) - updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempool.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -411,7 +417,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[2].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[2].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[2].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -422,7 +428,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { } ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(sim.Config, css[0], tmproto.PrecommitType, + signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) @@ -430,19 +436,19 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // HEIGHT 4 height++ incrementHeight(vss...) - newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempool.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) - newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempool.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -453,10 +459,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { valIndexFn := func(cssIdx int) int { for i, vs := range newVss { - vsPubKey, err := vs.GetPubKey(context.Background()) + vsPubKey, err := vs.GetPubKey(ctx) require.NoError(t, err) - cssPubKey, err := css[cssIdx].privValidator.GetPubKey(context.Background()) + cssPubKey, err := css[cssIdx].privValidator.GetPubKey(ctx) require.NoError(t, err) if vsPubKey.Equals(cssPubKey) { @@ -470,7 +476,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[3].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[3].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[3].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -482,7 +488,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { ensureNewProposal(proposalCh, height, round) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempool.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) rs = css[0].GetRoundState() @@ -490,7 +496,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { if i == selfIndex { continue } - signAddVotes(sim.Config, css[0], + signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) } @@ -511,7 +517,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { if i == selfIndex { continue } - signAddVotes(sim.Config, css[0], + signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) } @@ -521,7 +527,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempool.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -533,7 +539,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { selfIndex = valIndexFn(0) proposal = types.NewProposal(vss[1].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -548,7 +554,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { if i == selfIndex { continue } - signAddVotes(sim.Config, css[0], + signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) } @@ -569,55 +575,70 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // Sync from scratch func TestHandshakeReplayAll(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, false) + testHandshakeReplay(ctx, t, sim, 0, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, true) + testHandshakeReplay(ctx, t, sim, 0, m, true) } } // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, false) + testHandshakeReplay(ctx, t, sim, 2, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, true) + testHandshakeReplay(ctx, t, sim, 2, m, true) } } // Sync from lagging by one func TestHandshakeReplayOne(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, true) } } // Sync from caught up func TestHandshakeReplayNone(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks, m, true) } } // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx func TestMockProxyApp(t *testing.T) { - sim := setupSimulator(t) // setup config and simulator + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) // setup config and simulator cfg := sim.Config assert.NotNil(t, cfg) @@ -639,7 +660,7 @@ func TestMockProxyApp(t *testing.T) { err = proto.Unmarshal(bytes, loadedAbciRes) require.NoError(t, err) - mock := newMockProxyApp(logger, []byte("mock_hash"), loadedAbciRes) + mock := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes) abciRes := new(tmstate.ABCIResponses) abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) @@ -663,7 +684,7 @@ func TestMockProxyApp(t *testing.T) { mock.SetResponseCallback(proxyCb) someTx := []byte("tx") - _, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx}) + _, err = mock.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: someTx}) assert.NoError(t, err) }) assert.True(t, validTxs == 1) @@ -687,12 +708,23 @@ func tempWALWithData(data []byte) string { // Make some blocks. Start a fresh app and apply nBlocks blocks. // Then restart the app and sync it up with the remaining blocks -func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mode uint, testValidatorsChange bool) { +func testHandshakeReplay( + ctx context.Context, + t *testing.T, + sim *simulatorTestSuite, + nBlocks int, + mode uint, + testValidatorsChange bool, +) { var chain []*types.Block var commits []*types.Commit var store *mockBlockStore var stateDB dbm.DB var genesisState sm.State + var cancel context.CancelFunc + + ctx, cancel = context.WithCancel(ctx) + t.Cleanup(cancel) cfg := sim.Config @@ -712,7 +744,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() - walBody, err := WALWithNBlocks(t, numBlocks) + walBody, err := WALWithNBlocks(ctx, t, numBlocks) require.NoError(t, err) walFile := tempWALWithData(walBody) cfg.Consensus.SetWalFile(walFile) @@ -722,16 +754,12 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod wal, err := NewWAL(logger, walFile) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); wal.Wait() }) chain, commits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion) @@ -742,7 +770,19 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(cfg, logger, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) + state = buildTMStateFromChain( + ctx, + cfg, + logger, + sim.Mempool, + sim.Evpool, + stateStore, + state, + chain, + nBlocks, + mode, + store, + ) latestAppHash := state.AppHash // make a new client creator @@ -759,7 +799,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) - buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) + buildAppStateFromChain(ctx, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) } // Prune block store if requested @@ -775,17 +815,13 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) - err := handshaker.Handshake(proxyApp) + err := handshaker.Handshake(ctx, proxyApp) if expectError { require.Error(t, err) return @@ -794,7 +830,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } // get the latest app hash from the app - res, err := proxyApp.Query().InfoSync(context.Background(), abci.RequestInfo{Version: ""}) + res, err := proxyApp.Query().InfoSync(ctx, abci.RequestInfo{Version: ""}) if err != nil { t.Fatal(err) } @@ -838,6 +874,7 @@ func applyBlock(stateStore sm.Store, } func buildAppStateFromChain( + ctx context.Context, proxyApp proxy.AppConns, stateStore sm.Store, mempool mempool.Mempool, @@ -846,16 +883,16 @@ func buildAppStateFromChain( chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) { + blockStore *mockBlockStore, +) { // start a new app without handshake, play nBlocks blocks - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { panic(err) } - defer proxyApp.Stop() //nolint:errcheck // ignore state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChainSync(ctx, abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) @@ -887,6 +924,7 @@ func buildAppStateFromChain( } func buildTMStateFromChain( + ctx context.Context, cfg *config.Config, logger log.Logger, mempool mempool.Mempool, @@ -905,14 +943,13 @@ func buildTMStateFromChain( clientCreator := abciclient.NewLocalCreator(kvstoreApp) proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { panic(err) } - defer proxyApp.Stop() //nolint:errcheck state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChainSync(ctx, abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) @@ -949,13 +986,17 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x01 // - 0x02 // - 0x03 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := ResetConfig("handshake_test_") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) const appVersion = 0x0 - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) stateDB, state, store := stateAndStore(cfg, pubKey, appVersion) stateStore := sm.NewStore(stateDB) @@ -975,17 +1016,13 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { app := &badApp{numBlocks: 3, allHashesAreWrong: true} clientCreator := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) - err := proxyApp.Start() + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) - if err = h.Handshake(proxyApp); err != nil { + if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } }) @@ -999,17 +1036,13 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} clientCreator := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) - err := proxyApp.Start() + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) - if err = h.Handshake(proxyApp); err != nil { + if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } }) @@ -1237,6 +1270,9 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { // Test handshake/init chain func TestHandshakeUpdatesValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val, _ := factory.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} @@ -1248,7 +1284,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) stateDB, state, store := stateAndStore(cfg, pubKey, 0x0) stateStore := sm.NewStore(stateDB) @@ -1262,12 +1298,11 @@ func TestHandshakeUpdatesValidators(t *testing.T) { logger := log.TestingLogger() handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - t.Cleanup(func() { require.NoError(t, proxyApp.Stop()) }) - if err := handshaker.Handshake(proxyApp); err != nil { + if err := handshaker.Handshake(ctx, proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } // reload the state, check the validator set was updated diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 6cef4ac8f..8cf1a7f9f 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -329,11 +329,11 @@ func (cs *State) LoadCommit(height int64) *types.Commit { // OnStart loads the latest state via the WAL, and starts the timeout and // receive routines. -func (cs *State) OnStart() error { +func (cs *State) OnStart(ctx context.Context) error { // We may set the WAL in testing before calling Start, so only OpenWAL if its // still the nilWAL. if _, ok := cs.wal.(nilWAL); ok { - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } @@ -384,13 +384,13 @@ func (cs *State) OnStart() error { cs.Logger.Info("successful WAL repair") // reload WAL file - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } } - if err := cs.evsw.Start(); err != nil { + if err := cs.evsw.Start(ctx); err != nil { return err } @@ -399,7 +399,7 @@ func (cs *State) OnStart() error { // NOTE: we will get a build up of garbage go routines // firing on the tockChan until the receiveRoutine is started // to deal with them (by that point, at most one will be valid) - if err := cs.timeoutTicker.Start(); err != nil { + if err := cs.timeoutTicker.Start(ctx); err != nil { return err } @@ -420,8 +420,8 @@ func (cs *State) OnStart() error { // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan // receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions -func (cs *State) startRoutines(maxSteps int) { - err := cs.timeoutTicker.Start() +func (cs *State) startRoutines(ctx context.Context, maxSteps int) { + err := cs.timeoutTicker.Start(ctx) if err != nil { cs.Logger.Error("failed to start timeout ticker", "err", err) return @@ -431,8 +431,8 @@ func (cs *State) startRoutines(maxSteps int) { } // loadWalFile loads WAL data from file. It overwrites cs.wal. -func (cs *State) loadWalFile() error { - wal, err := cs.OpenWAL(cs.config.WalFile()) +func (cs *State) loadWalFile(ctx context.Context) error { + wal, err := cs.OpenWAL(ctx, cs.config.WalFile()) if err != nil { cs.Logger.Error("failed to load state WAL", "err", err) return err @@ -457,11 +457,15 @@ func (cs *State) OnStop() { close(cs.onStopCh) if err := cs.evsw.Stop(); err != nil { - cs.Logger.Error("failed trying to stop eventSwitch", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.Logger.Error("failed trying to stop eventSwitch", "error", err) + } } if err := cs.timeoutTicker.Stop(); err != nil { - cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + } } // WAL is stopped in receiveRoutine. } @@ -475,14 +479,14 @@ func (cs *State) Wait() { // OpenWAL opens a file to log all consensus messages and timeouts for // deterministic accountability. -func (cs *State) OpenWAL(walFile string) (WAL, error) { +func (cs *State) OpenWAL(ctx context.Context, walFile string) (WAL, error) { wal, err := NewWAL(cs.Logger.With("wal", walFile), walFile) if err != nil { cs.Logger.Error("failed to open WAL", "file", walFile, "err", err) return nil, err } - if err := wal.Start(); err != nil { + if err := wal.Start(ctx); err != nil { cs.Logger.Error("failed to start WAL", "err", err) return nil, err } @@ -762,7 +766,9 @@ func (cs *State) receiveRoutine(maxSteps int) { // close wal now that we're done writing to it if err := cs.wal.Stop(); err != nil { - cs.Logger.Error("failed trying to stop WAL", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.Logger.Error("failed trying to stop WAL", "error", err) + } } cs.wal.Wait() diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 1d0741b1f..9216577d5 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -56,24 +56,26 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh // ProposeSuite func TestStateProposerSelection0(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) height, round := cs1.Height, cs1.Round - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // Wait for new round so proposer is set. ensureNewRound(newRoundCh, height, round) // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - pv, err := cs1.privValidator.GetPubKey(context.Background()) + pv, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() if !bytes.Equal(prop.Address, address) { @@ -84,13 +86,21 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(config, cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes( + ctx, + config, + cs1, + tmproto.PrecommitType, + rs.ProposalBlock.Hash(), + rs.ProposalBlockParts.Header(), + vss[1:]..., + ) // Wait for new round so next validator is set. ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() - pv1, err := vss[1].GetPubKey(context.Background()) + pv1, err := vss[1].GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() if !bytes.Equal(prop.Address, addr) { @@ -102,25 +112,28 @@ func TestStateProposerSelection0(t *testing.T) { func TestStateProposerSelection2(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, log.TestingLogger(), 4) // test needs more work for more than 3 validators + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) // test needs more work for more than 3 validators require.NoError(t, err) height := cs1.Height - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) incrementRound(vss[1:]...) var round int32 = 2 - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := int32(0); int(i) < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - pvk, err := vss[int(i+round)%len(vss)].GetPubKey(context.Background()) + pvk, err := vss[int(i+round)%len(vss)].GetPubKey(ctx) require.NoError(t, err) addr := pvk.Address() correctProposer := addr @@ -132,7 +145,7 @@ func TestStateProposerSelection2(t *testing.T) { } rs := cs1.GetRoundState() - signAddVotes(config, cs1, tmproto.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -142,16 +155,18 @@ func TestStateProposerSelection2(t *testing.T) { // a non-validator should timeout into the prevote round func TestStateEnterProposeNoPrivValidator(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _, err := randState(config, log.TestingLogger(), 1) + cs, _, err := randState(ctx, config, log.TestingLogger(), 1) require.NoError(t, err) cs.SetPrivValidator(nil) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(t, cs.eventBus, types.EventQueryTimeoutPropose) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) // if we're not a validator, EnterPropose should timeout ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) @@ -164,18 +179,20 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { // a validator should not timeout of the prevote round (TODO: unless the block is really big!) func TestStateEnterProposeYesPrivValidator(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _, err := randState(config, log.TestingLogger(), 1) + cs, _, err := randState(ctx, config, log.TestingLogger(), 1) require.NoError(t, err) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(t, cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(t, cs.eventBus, types.EventQueryCompleteProposal) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) cs.enterNewRound(height, round) - cs.startRoutines(3) + cs.startRoutines(ctx, 3) ensureNewProposal(proposalCh, height, round) @@ -197,16 +214,18 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { func TestStateBadProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 2) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) @@ -225,7 +244,7 @@ func TestStateBadProposal(t *testing.T) { blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(vs2.Height, round, -1, blockID) p := proposal.ToProto() - if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vs2.SignProposal(ctx, config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } @@ -237,29 +256,31 @@ func TestStateBadProposal(t *testing.T) { } // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // wait for proposal ensureProposal(proposalCh, height, round, blockID) // wait for prevote ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) // wait for precommit ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } func TestStateOversizedBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 2) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) cs1.state.ConsensusParams.Block.MaxBytes = 2000 height, round := cs1.Height, cs1.Round @@ -267,8 +288,8 @@ func TestStateOversizedBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} @@ -282,7 +303,7 @@ func TestStateOversizedBlock(t *testing.T) { blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(height, round, -1, blockID) p := proposal.ToProto() - if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vs2.SignProposal(ctx, config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -298,7 +319,7 @@ func TestStateOversizedBlock(t *testing.T) { } // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) @@ -309,12 +330,12 @@ func TestStateOversizedBlock(t *testing.T) { // and then should send nil prevote and precommit regardless of whether other validators prevote and // precommit on it ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -324,8 +345,10 @@ func TestStateOversizedBlock(t *testing.T) { func TestStateFullRound1(t *testing.T) { config := configSetup(t) logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, logger, 1) + cs, vss, err := randState(ctx, config, logger, 1) require.NoError(t, err) height, round := cs.Height, cs.Round @@ -337,16 +360,16 @@ func TestStateFullRound1(t *testing.T) { eventBus := eventbus.NewDefault(logger.With("module", "events")) cs.SetEventBus(eventBus) - if err := eventBus.Start(); err != nil { + if err := eventBus.Start(ctx); err != nil { t.Error(err) } - voteCh := subscribe(t, cs.eventBus, types.EventQueryVote) - propCh := subscribe(t, cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(t, cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) + propCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) // Maybe it would be better to call explicitly startRoutines(4) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) ensureNewRound(newRoundCh, height, round) @@ -354,51 +377,55 @@ func TestStateFullRound1(t *testing.T) { propBlockHash := cs.GetRoundState().ProposalBlock.Hash() ensurePrevote(voteCh, height, round) // wait for prevote - validatePrevote(t, cs, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs, round, vss[0], propBlockHash) ensurePrecommit(voteCh, height, round) // wait for precommit // we're going to roll right into new height ensureNewRound(newRoundCh, height+1, 0) - validateLastPrecommit(t, cs, vss[0], propBlockHash) + validateLastPrecommit(ctx, t, cs, vss[0], propBlockHash) } // nil is proposed, so prevote and precommit nil func TestStateFullRoundNil(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, log.TestingLogger(), 1) + cs, vss, err := randState(ctx, config, log.TestingLogger(), 1) require.NoError(t, err) height, round := cs.Height, cs.Round - voteCh := subscribe(t, cs.eventBus, types.EventQueryVote) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) - cs.startRoutines(4) + cs.startRoutines(ctx, 4) ensurePrevote(voteCh, height, round) // prevote ensurePrecommit(voteCh, height, round) // precommit // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil) + validatePrevoteAndPrecommit(ctx, t, cs, round, -1, vss[0], nil, nil) } // run through propose, prevote, precommit commit with two validators // where the first validator has to wait for votes from the second func TestStateFullRound2(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 2) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(t, cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensurePrevote(voteCh, height, round) // prevote @@ -407,17 +434,17 @@ func TestStateFullRound2(t *testing.T) { propBlockHash, propPartSetHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() // prevote arrives from vs2: - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propPartSetHeader, vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash, propPartSetHeader, vs2) ensurePrevote(voteCh, height, round) // prevote ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propPartSetHeader, vs2) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlockHash, propPartSetHeader, vs2) ensurePrecommit(voteCh, height, round) // wait to finish commit, propose in next height @@ -431,19 +458,21 @@ func TestStateFullRound2(t *testing.T) { // two vals take turns proposing. val1 locks on first one, precommits nil on everything else func TestStateLockNoPOL(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 2) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) vs2 := vss[1] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(t, cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 @@ -451,7 +480,7 @@ func TestStateLockNoPOL(t *testing.T) { // start round and wait for prevote cs1.enterNewRound(height, round) - cs1.startRoutines(0) + cs1.startRoutines(ctx, 0) ensureNewRound(newRoundCh, height, round) @@ -464,19 +493,19 @@ func TestStateLockNoPOL(t *testing.T) { // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, thePartSetHeader, vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, theBlockHash, thePartSetHeader, vs2) ensurePrevote(voteCh, height, round) // prevote ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block hash := make([]byte, len(theBlockHash)) copy(hash, theBlockHash) hash[0] = (hash[0] + 1) % 255 - signAddVotes(config, cs1, tmproto.PrecommitType, hash, thePartSetHeader, vs2) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, hash, thePartSetHeader, vs2) ensurePrecommit(voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) @@ -506,10 +535,10 @@ func TestStateLockNoPOL(t *testing.T) { // wait to finish prevote ensurePrevote(voteCh, height, round) // we should have prevoted our locked block - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + validatePrevote(ctx, t, cs1, round, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator - signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) // now we're going to enter prevote again, but with invalid args @@ -519,10 +548,10 @@ func TestStateLockNoPOL(t *testing.T) { ensurePrecommit(voteCh, height, round) // precommit // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, theBlockHash) // add conflicting precommit from vs2 - signAddVotes(config, cs1, tmproto.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) ensurePrecommit(voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args @@ -550,17 +579,18 @@ func TestStateLockNoPOL(t *testing.T) { } ensurePrevote(voteCh, height, round) // prevote - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + validatePrevote(ctx, t, cs1, round, vss[0], rs.LockedBlock.Hash()) - signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // precommit - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal signAddVotes( + ctx, config, cs1, tmproto.PrecommitType, @@ -571,10 +601,11 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - cs2, _, err := randState(config, log.TestingLogger(), 2) // needed so generated block is different than locked block + // needed so generated block is different than locked block + cs2, _, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -597,17 +628,18 @@ func TestStateLockNoPOL(t *testing.T) { ensureNewProposal(proposalCh, height, round) ensurePrevote(voteCh, height, round) // prevote // prevote for locked block (not proposal) - validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) + validatePrevote(ctx, t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) // prevote for proposed block - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal signAddVotes( + ctx, config, cs1, tmproto.PrecommitType, @@ -625,21 +657,24 @@ func TestStateLockPOLRelock(t *testing.T) { config := configSetup(t) logger := log.TestingLogger() - cs1, vss, err := randState(config, logger, 4) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss, err := randState(ctx, config, logger, 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(t, cs1.eventBus, types.EventQueryNewBlockHeader) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) // everything done from perspective of cs1 @@ -650,7 +685,7 @@ func TestStateLockPOLRelock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -660,20 +695,20 @@ func TestStateLockPOLRelock(t *testing.T) { ensurePrevote(voteCh, height, round) // prevote - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs2, err := newState(logger, cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) require.NoError(t, err) - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -707,17 +742,17 @@ func TestStateLockPOLRelock(t *testing.T) { // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have unlocked and locked on the new block, sending a precommit for this new block - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockHash, propBlockHash) // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) ensureNewBlockHeader(newBlockCh, height, propBlockHash) ensureNewRound(newRoundCh, height+1, 0) @@ -726,22 +761,24 @@ func TestStateLockPOLRelock(t *testing.T) { // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestStateLockPOLUnlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(t, cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryUnlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // everything done from perspective of cs1 @@ -751,7 +788,7 @@ func TestStateLockPOLUnlock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -760,20 +797,20 @@ func TestStateLockPOLUnlock(t *testing.T) { theBlockParts := rs.ProposalBlockParts.Header() ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], theBlockHash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, cs1, vs2, vs2.Height, vs2.Round+1) propBlockParts := propBlock.MakePartSet(partSize) // timeout to new round @@ -799,9 +836,9 @@ func TestStateLockPOLUnlock(t *testing.T) { // go to prevote, prevote for locked block (not proposal) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], lockedBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], lockedBlockHash) // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil ensureNewUnlock(unlockCh, height, round) @@ -809,9 +846,9 @@ func TestStateLockPOLUnlock(t *testing.T) { // we should have unlocked and committed nil // NOTE: since we don't relock on nil, the lock round is -1 - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) ensureNewRound(newRoundCh, height, round+1) } @@ -822,21 +859,23 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { config := configSetup(t) logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, logger, 4) + cs1, vss, err := randState(ctx, config, logger, 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // everything done from perspective of cs1 /* @@ -844,7 +883,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -854,19 +893,19 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { ensurePrevote(voteCh, height, round) // prevote - signAddVotes(config, cs1, tmproto.PrevoteType, firstBlockHash, firstBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, firstBlockHash, firstBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs2, err := newState(logger, cs1.state, vs2, kvstore.NewApplication()) + cs2, err := newState(ctx, logger, cs1.state, vs2, kvstore.NewApplication()) require.NoError(t, err) - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -892,26 +931,26 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], firstBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], firstBlockHash) // now lets add prevotes from everyone else for the new block - signAddVotes(config, cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have unlocked and locked on the new block, sending a precommit for this new block - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) if err := cs1.SetProposalAndBlock(prop, propBlock, secondBlockParts, "some peer"); err != nil { t.Fatal(err) } // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - cs3, err := newState(logger, cs1.state, vs3, kvstore.NewApplication()) + cs3, err := newState(ctx, logger, cs1.state, vs3, kvstore.NewApplication()) require.NoError(t, err) - prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1) + prop, propBlock = decideProposal(ctx, cs3, vs3, vs3.Height, vs3.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } @@ -938,13 +977,13 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { ensurePrevote(voteCh, height, round) // we are no longer locked to the first block so we should be able to prevote - validatePrevote(t, cs1, round, vss[0], thirdPropBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], thirdPropBlockHash) - signAddVotes(config, cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we have a majority, now vs1 can change lock to the third block - validatePrecommit(t, cs1, round, round, vss[0], thirdPropBlockHash, thirdPropBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], thirdPropBlockHash, thirdPropBlockHash) } // 4 vals @@ -953,25 +992,27 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { // then we see the polka from round 1 but shouldn't unlock func TestStateLockPOLSafety1(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -979,17 +1020,17 @@ func TestStateLockPOLSafety1(t *testing.T) { propBlock := rs.ProposalBlock ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) + validatePrevote(ctx, t, cs1, round, vss[0], propBlock.Hash()) // the others sign a polka but we don't see it - prevotes := signVotes(config, tmproto.PrevoteType, + prevotes := signVotes(ctx, config, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // we do see them precommit nil - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // cs1 precommit nil ensurePrecommit(voteCh, height, round) @@ -997,7 +1038,7 @@ func TestStateLockPOLSafety1(t *testing.T) { t.Log("### ONTO ROUND 1") - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) @@ -1026,16 +1067,16 @@ func TestStateLockPOLSafety1(t *testing.T) { // go to prevote, prevote for proposal block ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1055,9 +1096,9 @@ func TestStateLockPOLSafety1(t *testing.T) { // finish prevote ensurePrevote(voteCh, height, round) // we should prevote what we're locked on - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash) - newStepCh := subscribe(t, cs1.eventBus, types.EventQueryNewRoundStep) + newStepCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRoundStep) // before prevotes from the previous round are added // add prevotes from the earlier round @@ -1077,35 +1118,37 @@ func TestStateLockPOLSafety1(t *testing.T) { // dont see P0, lock on P1 at R1, dont unlock using P0 at R2 func TestStateLockPOLSafety2(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(t, cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryUnlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) - _, propBlock0 := decideProposal(cs1, vss[0], height, round) + _, propBlock0 := decideProposal(ctx, cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(config, tmproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) + prevotes := signVotes(ctx, config, tmproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) // the block for round 1 - prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + prop1, propBlock1 := decideProposal(ctx, cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash1 := propBlock1.Hash() propBlockParts1 := propBlock1.MakePartSet(partSize) @@ -1114,7 +1157,7 @@ func TestStateLockPOLSafety2(t *testing.T) { round++ // moving to the next round t.Log("### ONTO Round 1") // jump in at round 1 - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { @@ -1123,17 +1166,17 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNewProposal(proposalCh, height, round) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash1) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) incrementRound(vs2, vs3, vs4) @@ -1144,7 +1187,7 @@ func TestStateLockPOLSafety2(t *testing.T) { // in round 2 we see the polkad block from round 0 newProp := types.NewProposal(height, round, 0, propBlockID0) p := newProp.ToProto() - if err := vs3.SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vs3.SignProposal(ctx, config.ChainID(), p); err != nil { t.Fatal(err) } @@ -1166,7 +1209,7 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNoNewUnlock(unlockCh) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash1) } @@ -1177,26 +1220,28 @@ func TestStateLockPOLSafety2(t *testing.T) { // P0 proposes B0 at R3. func TestProposeValidBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(t, cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryUnlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -1205,16 +1250,18 @@ func TestProposeValidBlock(t *testing.T) { propBlockHash := propBlock.Hash() ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash) // the others sign a polka - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, + propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, + vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1229,20 +1276,20 @@ func TestProposeValidBlock(t *testing.T) { ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewUnlock(unlockCh, height, round) ensurePrecommit(voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round += 2 // moving to the next round @@ -1270,25 +1317,27 @@ func TestProposeValidBlock(t *testing.T) { // P0 miss to lock B but set valid block to B after receiving delayed prevote. func TestSetValidBlockOnDelayedPrevote(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -1298,19 +1347,19 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { propBlockParts := propBlock.MakePartSet(partSize) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], propBlockHash) // vs2 send prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) // vs3 send prevote nil - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) rs = cs1.GetRoundState() @@ -1319,7 +1368,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { assert.True(t, rs.ValidRound == -1) // vs2 send (delayed) prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) ensureNewValidBlock(validBlockCh, height, round) @@ -1335,47 +1384,49 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { // receiving delayed Block Proposal. func TestSetValidBlockOnDelayedProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) - startTestRound(cs1, cs1.Height, round) + startTestRound(ctx, cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(ctx, cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) // vs2, vs3 and vs4 send prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) @@ -1395,19 +1446,22 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { func TestWaitingTimeoutOnNilPolka(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, log.TestingLogger(), 4) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // start round - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) @@ -1418,27 +1472,29 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { // P0 waits for timeoutPropose in the next round before entering prevote func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1449,7 +1505,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) } // 4 vals, 3 Precommits for nil from the higher round. @@ -1457,33 +1513,35 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { // P0 jump to higher round, precommit and start precommit wait func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1496,38 +1554,42 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { // P0 wait for timeoutPropose to expire before sending prevote. func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round in which PO is not proposer - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + validatePrevote(ctx, t, cs1, round, vss[0], nil) } // What we want: // P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1536,19 +1598,19 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) - _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) + _, propBlock := decideProposal(ctx, cs1, vs2, vs2.Height, vs2.Round) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) // start round in which PO is not proposer - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) rs := cs1.GetRoundState() @@ -1563,28 +1625,30 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { // After receiving block, it executes block and moves to the next height. func TestCommitFromPreviousRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(t, cs1.eventBus, types.EventQueryValidBlock) - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) + prop, propBlock := decideProposal(ctx, cs1, vs2, vs2.Height, vs2.Round) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) // start round in which PO is not proposer - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock for the previous round - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) @@ -1619,28 +1683,30 @@ func (n *fakeTxNotifier) Notify() { // start of the next round func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config.Consensus.SkipTimeoutCommit = false - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutPropose) - precommitTimeoutCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(t, cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -1649,17 +1715,17 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { theBlockParts := rs.ProposalBlockParts.Header() ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], theBlockHash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) // wait till timeout occurs ensurePrecommitTimeout(precommitTimeoutCh) @@ -1667,7 +1733,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { ensureNewRound(newRoundCh, height, round+1) // majority is now reached - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) ensureNewBlockHeader(newBlockHeader, height, theBlockHash) @@ -1683,9 +1749,11 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config.Consensus.SkipTimeoutCommit = false - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] @@ -1693,17 +1761,17 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(t, cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -1712,21 +1780,21 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { theBlockParts := rs.ProposalBlockParts.Header() ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + validatePrevote(ctx, t, cs1, round, vss[0], theBlockHash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) ensureNewBlockHeader(newBlockHeader, height, theBlockHash) - prop, propBlock := decideProposal(cs1, vs2, height+1, 0) + prop, propBlock := decideProposal(ctx, cs1, vs2, height+1, 0) propBlockParts := propBlock.MakePartSet(partSize) if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { @@ -1751,10 +1819,10 @@ func TestStateSlashingPrevotes(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(t, cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) @@ -1786,10 +1854,10 @@ func TestStateSlashingPrecommits(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(t, cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(ctx, t, cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote startTestRound(cs1, cs1.Height, 0) @@ -1828,24 +1896,26 @@ func TestStateSlashingPrecommits(t *testing.T) { // we receive a final precommit after going into next round, but others might have gone to commit already! func TestStateHalt1(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, log.TestingLogger(), 4) + cs1, vss, err := randState(ctx, config, log.TestingLogger(), 4) require.NoError(t, err) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(t, cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(t, cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(t, cs1.eventBus, types.EventQueryNewBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(t, cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) ensureNewRound(newRoundCh, height, round) ensureNewProposal(proposalCh, height, round) @@ -1855,17 +1925,17 @@ func TestStateHalt1(t *testing.T) { ensurePrevote(voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(ctx, config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, config, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header()) + precommit4 := signVote(ctx, vs4, config, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header()) incrementRound(vs2, vs3, vs4) @@ -1885,7 +1955,7 @@ func TestStateHalt1(t *testing.T) { // go to prevote, prevote for locked block ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + validatePrevote(ctx, t, cs1, round, vss[0], rs.LockedBlock.Hash()) // now we receive the precommit from the previous round addVotes(cs1, precommit4) @@ -1898,9 +1968,11 @@ func TestStateHalt1(t *testing.T) { func TestStateOutputsBlockPartsStats(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // create dummy peer - cs, _, err := randState(config, log.TestingLogger(), 1) + cs, _, err := randState(ctx, config, log.TestingLogger(), 1) require.NoError(t, err) peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) @@ -1945,8 +2017,10 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, log.TestingLogger(), 2) + cs, vss, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) // create dummy peer peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") @@ -1954,7 +2028,7 @@ func TestStateOutputVoteStats(t *testing.T) { randBytes := tmrand.Bytes(tmhash.Size) - vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) + vote := signVote(ctx, vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) voteMessage := &VoteMessage{vote} cs.handleMsg(msgInfo{voteMessage, peerID}) @@ -1968,7 +2042,7 @@ func TestStateOutputVoteStats(t *testing.T) { // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) + vote = signVote(ctx, vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) cs.handleMsg(msgInfo{&VoteMessage{vote}, peerID}) @@ -1982,20 +2056,26 @@ func TestStateOutputVoteStats(t *testing.T) { func TestSignSameVoteTwice(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, vss, err := randState(config, log.TestingLogger(), 2) + _, vss, err := randState(ctx, config, log.TestingLogger(), 2) require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) - vote := signVote(vss[1], + vote := signVote( + ctx, + vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{Total: 10, Hash: randBytes}, ) - vote2 := signVote(vss[1], + vote2 := signVote( + ctx, + vss[1], config, tmproto.PrecommitType, randBytes, @@ -2006,9 +2086,14 @@ func TestSignSameVoteTwice(t *testing.T) { } // subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(t *testing.T, eventBus *eventbus.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { +func subscribe( + ctx context.Context, + t *testing.T, + eventBus *eventbus.EventBus, + q tmpubsub.Query, +) <-chan tmpubsub.Message { t.Helper() - sub, err := eventBus.SubscribeWithArgs(context.Background(), tmpubsub.SubscribeArgs{ + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: testSubscriber, Query: q, }) @@ -2018,8 +2103,11 @@ func subscribe(t *testing.T, eventBus *eventbus.EventBus, q tmpubsub.Query) <-ch ch := make(chan tmpubsub.Message) go func() { for { - next, err := sub.Next(context.Background()) + next, err := sub.Next(ctx) if err != nil { + if ctx.Err() != nil { + return + } t.Errorf("Subscription for %v unexpectedly terminated: %v", q, err) return } diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index 0226889c9..e8583932d 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "time" "github.com/tendermint/tendermint/libs/log" @@ -15,7 +16,7 @@ var ( // conditional on the height/round/step in the timeoutInfo. // The timeoutInfo.Duration may be non-positive. type TimeoutTicker interface { - Start() error + Start(context.Context) error Stop() error Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer @@ -47,8 +48,7 @@ func NewTimeoutTicker(logger log.Logger) TimeoutTicker { } // OnStart implements service.Service. It starts the timeout routine. -func (t *timeoutTicker) OnStart() error { - +func (t *timeoutTicker) OnStart(gctx context.Context) error { go t.timeoutRoutine() return nil diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index cafb1ed1a..94c36ee3e 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -29,23 +29,26 @@ func TestMain(m *testing.M) { } func TestPeerCatchupRounds(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + valSet, privVals := factory.RandValidatorSet(10, 1) hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet) - vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) + vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1") if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -61,9 +64,15 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote { +func makeVoteHR( + ctx context.Context, + t *testing.T, + height int64, + valIndex, round int32, + privVals []types.PrivValidator, +) *types.Vote { privVal := privVals[valIndex] - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) if err != nil { panic(err) } @@ -82,7 +91,7 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty chainID := cfg.ChainID() v := vote.ToProto() - err = privVal.SignVote(context.Background(), chainID, v) + err = privVal.SignVote(ctx, chainID, v) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) } diff --git a/internal/consensus/wal.go b/internal/consensus/wal.go index 24fef294d..13f29a202 100644 --- a/internal/consensus/wal.go +++ b/internal/consensus/wal.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "encoding/binary" "errors" "fmt" @@ -63,7 +64,7 @@ type WAL interface { SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) // service methods - Start() error + Start(context.Context) error Stop() error Wait() } @@ -116,7 +117,7 @@ func (wal *BaseWAL) Group() *auto.Group { return wal.group } -func (wal *BaseWAL) OnStart() error { +func (wal *BaseWAL) OnStart(ctx context.Context) error { size, err := wal.group.Head.Size() if err != nil { return err @@ -125,7 +126,7 @@ func (wal *BaseWAL) OnStart() error { return err } } - err = wal.group.Start() + err = wal.group.Start(ctx) if err != nil { return err } @@ -159,10 +160,14 @@ func (wal *BaseWAL) FlushAndSync() error { func (wal *BaseWAL) OnStop() { wal.flushTicker.Stop() if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error("error on flush data to disk", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + wal.Logger.Error("error on flush data to disk", "error", err) + } } if err := wal.group.Stop(); err != nil { - wal.Logger.Error("error trying to stop wal", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + wal.Logger.Error("error trying to stop wal", "error", err) + } } wal.group.Close() } @@ -423,6 +428,6 @@ func (nilWAL) FlushAndSync() error { return nil } func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } -func (nilWAL) Start() error { return nil } -func (nilWAL) Stop() error { return nil } -func (nilWAL) Wait() {} +func (nilWAL) Start(context.Context) error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 20cf0fae2..35a539d64 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -3,6 +3,7 @@ package consensus import ( "bufio" "bytes" + "context" "fmt" "io" mrand "math/rand" @@ -30,7 +31,7 @@ import ( // persistent kvstore application and special consensus wal instance // (byteBufferWAL) and waits until numBlocks are created. // If the node fails to produce given numBlocks, it returns an error. -func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { +func WALGenerateNBlocks(ctx context.Context, t *testing.T, wr io.Writer, numBlocks int) (err error) { cfg := getConfig(t) app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator")) @@ -67,24 +68,15 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) eventBus := eventbus.NewDefault(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { + if err := eventBus.Start(ctx); err != nil { return fmt.Errorf("failed to start event bus: %w", err) } - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) @@ -105,7 +97,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { consensusState.wal = wal - if err := consensusState.Start(); err != nil { + if err := consensusState.Start(ctx); err != nil { return fmt.Errorf("failed to start consensus state: %w", err) } @@ -124,11 +116,11 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } // WALWithNBlocks returns a WAL content with numBlocks. -func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { +func WALWithNBlocks(ctx context.Context, t *testing.T, numBlocks int) (data []byte, err error) { var b bytes.Buffer wr := bufio.NewWriter(&b) - if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil { + if err := WALGenerateNBlocks(ctx, t, wr, numBlocks); err != nil { return []byte{}, err } @@ -227,6 +219,6 @@ func (w *byteBufferWAL) SearchForEndHeight( return nil, false, nil } -func (w *byteBufferWAL) Start() error { return nil } -func (w *byteBufferWAL) Stop() error { return nil } -func (w *byteBufferWAL) Wait() {} +func (w *byteBufferWAL) Start(context.Context) error { return nil } +func (w *byteBufferWAL) Stop() error { return nil } +func (w *byteBufferWAL) Wait() {} diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 6c1feb670..c0290fcf8 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "path/filepath" "testing" @@ -18,15 +19,16 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -const ( - walTestFlushInterval = time.Duration(100) * time.Millisecond -) +const walTestFlushInterval = 100 * time.Millisecond func TestWALTruncate(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // this magic number 4K can truncate the content when RotateFile. // defaultHeadSizeLimit(10M) is hard to simulate. // this magic number 1 * time.Millisecond make RotateFile check frequently. @@ -36,21 +38,14 @@ func TestWALTruncate(t *testing.T) { autofile.GroupCheckDuration(1*time.Millisecond), ) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(wal.Wait) // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this // time, RotateFile is called, truncate content exist in each file. - err = WALGenerateNBlocks(t, wal.Group(), 60) + err = WALGenerateNBlocks(ctx, t, wal.Group(), 60) require.NoError(t, err) time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run @@ -105,18 +100,14 @@ func TestWALWrite(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wal, err := NewWAL(log.TestingLogger(), walFile) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(wal.Wait) // 1) Write returns an error if msg is too big msg := &BlockPartMessage{ @@ -142,7 +133,10 @@ func TestWALWrite(t *testing.T) { } func TestWALSearchForEndHeight(t *testing.T) { - walBody, err := WALWithNBlocks(t, 6) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + walBody, err := WALWithNBlocks(ctx, t, 6) if err != nil { t.Fatal(err) } @@ -171,18 +165,21 @@ func TestWALPeriodicSync(t *testing.T) { walFile := filepath.Join(walDir, "wal") wal, err := NewWAL(log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) // Generate some data - err = WALGenerateNBlocks(t, wal.Group(), 5) + err = WALGenerateNBlocks(ctx, t, wal.Group(), 5) require.NoError(t, err) // We should have data in the buffer now assert.NotZero(t, wal.Group().Buffered()) - require.NoError(t, wal.Start()) + require.NoError(t, wal.Start(ctx)) t.Cleanup(func() { if err := wal.Stop(); err != nil { t.Error(err) diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go index 95c8876d4..4b28f6fcd 100644 --- a/internal/eventbus/event_bus.go +++ b/internal/eventbus/event_bus.go @@ -2,6 +2,7 @@ package eventbus import ( "context" + "errors" "fmt" "strings" @@ -38,13 +39,15 @@ func NewDefault(l log.Logger) *EventBus { return b } -func (b *EventBus) OnStart() error { - return b.pubsub.Start() +func (b *EventBus) OnStart(ctx context.Context) error { + return b.pubsub.Start(ctx) } func (b *EventBus) OnStop() { if err := b.pubsub.Stop(); err != nil { - b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) + } } } diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go index 64e6f3344..3e9069718 100644 --- a/internal/eventbus/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -19,14 +19,12 @@ import ( ) func TestEventBusPublishEventTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) tx := types.Tx("foo") result := abci.ResponseDeliverTx{ @@ -37,7 +35,6 @@ func TestEventBusPublishEventTx(t *testing.T) { } // PublishEventTx adds 3 composite keys, so the query below should work - ctx := context.Background() query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) txsSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: "test", @@ -76,14 +73,11 @@ func TestEventBusPublishEventTx(t *testing.T) { } func TestEventBusPublishEventNewBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() eventBus := eventbus.NewDefault(log.TestingLogger()) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()} @@ -99,7 +93,6 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work - ctx := context.Background() query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" blocksSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: "test", @@ -136,14 +129,11 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() eventBus := eventbus.NewDefault(log.TestingLogger()) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) tx := types.Tx("foo") result := abci.ResponseDeliverTx{ @@ -203,54 +193,65 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { } for i, tc := range testCases { - ctx := context.Background() - sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ - ClientID: fmt.Sprintf("client-%d", i), - Query: tmquery.MustParse(tc.query), - }) - require.NoError(t, err) + var name string - gotResult := make(chan bool, 1) - go func() { - defer close(gotResult) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - msg, err := sub.Next(ctx) - if err == nil { - data := msg.Data().(types.EventDataTx) - assert.Equal(t, int64(1), data.Height) - assert.Equal(t, uint32(0), data.Index) - assert.EqualValues(t, tx, data.Tx) - assert.Equal(t, result, data.Result) - gotResult <- true - } - }() - - assert.NoError(t, eventBus.PublishEventTx(types.EventDataTx{ - TxResult: abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }, - })) - - if got := <-gotResult; got != tc.expectResults { - require.Failf(t, "Wrong transaction result", - "got a tx: %v, wanted a tx: %v", got, tc.expectResults) + if tc.expectResults { + name = fmt.Sprintf("ExpetedResultsCase%d", i) + } else { + name = fmt.Sprintf("NoResultsCase%d", i) } + + t.Run(name, func(t *testing.T) { + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: tmquery.MustParse(tc.query), + }) + require.NoError(t, err) + + gotResult := make(chan bool, 1) + go func() { + defer close(gotResult) + tctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := sub.Next(tctx) + if err == nil { + data := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.EqualValues(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + gotResult <- true + } + }() + + assert.NoError(t, eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + })) + + require.NoError(t, ctx.Err(), "context should not have been canceled") + + if got := <-gotResult; got != tc.expectResults { + require.Failf(t, "Wrong transaction result", + "got a tx: %v, wanted a tx: %v", got, tc.expectResults) + } + }) + } } func TestEventBusPublishEventNewBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) resultBeginBlock := abci.ResponseBeginBlock{ @@ -265,7 +266,6 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work - ctx := context.Background() query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" headersSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: "test", @@ -300,18 +300,15 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } func TestEventBusPublishEventNewEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) ev := types.NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") - ctx := context.Background() const query = `tm.event='NewEvidence'` evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: "test", @@ -344,18 +341,15 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) { } func TestEventBusPublish(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) const numEventsExpected = 14 - ctx := context.Background() sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ ClientID: "test", Query: tmquery.Empty{}, @@ -434,8 +428,11 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes // for random* functions mrand.Seed(time.Now().Unix()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) // set buffer capacity to 0 so we are not testing cache - err := eventBus.Start() + err := eventBus.Start(ctx) if err != nil { b.Error(err) } @@ -445,7 +442,6 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes } }) - ctx := context.Background() q := types.EventQueryNewBlock for i := 0; i < numClients; i++ { diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index c2f25bd36..4e37e1d17 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -1,6 +1,7 @@ package evidence import ( + "context" "fmt" "runtime/debug" "sync" @@ -81,7 +82,7 @@ func NewReactor( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. No error is returned. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { go r.processEvidenceCh() go r.processPeerUpdates() diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index d1ae55803..764450cd6 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -1,6 +1,7 @@ package evidence_test import ( + "context" "encoding/hex" "math/rand" "sync" @@ -44,7 +45,7 @@ type reactorTestSuite struct { numStateStores int } -func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { +func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { t.Helper() pID := make([]byte, 16) @@ -55,7 +56,7 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { rts := &reactorTestSuite{ numStateStores: numStateStores, logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}), reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores), pools: make(map[types.NodeID]*evidence.Pool, numStateStores), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores), @@ -93,7 +94,7 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { rts.peerUpdates[nodeID], rts.pools[nodeID]) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) idx++ @@ -233,13 +234,16 @@ func createEvidenceList( } func TestReactorMultiDisconnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val := types.NewMockPV() height := int64(numEvidence) + 10 stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 20) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 20) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -281,7 +285,10 @@ func TestReactorBroadcastEvidence(t *testing.T) { stateDBs[i] = initializeValidatorState(t, val, height) } - rts := setup(t, stateDBs, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, stateDBs, 0) rts.start(t) // Create a series of fixtures where each suite contains a reactor and @@ -335,7 +342,10 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height1) stateDB2 := initializeValidatorState(t, val, height2) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100) rts.start(t) primary := rts.nodes[0] @@ -368,7 +378,10 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -405,7 +418,10 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { stateDB1 := initializeValidatorState(t, val, height) stateDB2 := initializeValidatorState(t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -460,7 +476,10 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { stateDBs[i] = initializeValidatorState(t, val, height) } - rts := setup(t, stateDBs, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, stateDBs, 0) rts.start(t) evList := createEvidenceList(t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 59f0d92d2..3bc744e4c 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -85,26 +85,18 @@ func NewFromConfig(logger log.Logger, cfg *config.Config) (*Inspector, error) { // Run starts the Inspector servers and blocks until the servers shut down. The passed // in context is used to control the lifecycle of the servers. func (ins *Inspector) Run(ctx context.Context) error { - err := ins.eventBus.Start() + err := ins.eventBus.Start(ctx) if err != nil { return fmt.Errorf("error starting event bus: %s", err) } - defer func() { - err := ins.eventBus.Stop() - if err != nil { - ins.logger.Error("event bus stopped with error", "err", err) - } - }() - err = ins.indexerService.Start() + defer ins.eventBus.Wait() + + err = ins.indexerService.Start(ctx) if err != nil { return fmt.Errorf("error starting indexer service: %s", err) } - defer func() { - err := ins.indexerService.Stop() - if err != nil { - ins.logger.Error("indexer service stopped with error", "err", err) - } - }() + defer ins.indexerService.Wait() + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) } diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 5527bf2ac..15a555ab0 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -101,7 +101,7 @@ func TestBlock(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - resultBlock, err := cli.Block(context.Background(), &testHeight) + resultBlock, err := cli.Block(ctx, &testHeight) require.NoError(t, err) require.Equal(t, testBlock.Height, resultBlock.Block.Height) require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) @@ -153,7 +153,7 @@ func TestTxSearch(t *testing.T) { require.NoError(t, err) var page = 1 - resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + resultTxSearch, err := cli.TxSearch(ctx, testQuery, false, &page, &page, "") require.NoError(t, err) require.Len(t, resultTxSearch.Txs, 1) require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) @@ -199,7 +199,7 @@ func TestTx(t *testing.T) { cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Tx(context.Background(), testHash, false) + res, err := cli.Tx(ctx, testHash, false) require.NoError(t, err) require.Equal(t, types.Tx(testTx), res.Tx) @@ -247,7 +247,7 @@ func TestConsensusParams(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - params, err := cli.ConsensusParams(context.Background(), &testHeight) + params, err := cli.ConsensusParams(ctx, &testHeight) require.NoError(t, err) require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) @@ -300,7 +300,7 @@ func TestBlockResults(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockResults(context.Background(), &testHeight) + res, err := cli.BlockResults(ctx, &testHeight) require.NoError(t, err) require.Equal(t, res.TotalGasUsed, testGasUsed) @@ -348,7 +348,7 @@ func TestCommit(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Commit(context.Background(), &testHeight) + res, err := cli.Commit(ctx, &testHeight) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, res.SignedHeader.Commit.Round, testRound) @@ -402,7 +402,7 @@ func TestBlockByHash(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockByHash(context.Background(), testHash) + res, err := cli.BlockByHash(ctx, testHash) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, []byte(res.BlockID.Hash), testHash) @@ -455,7 +455,7 @@ func TestBlockchain(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockchainInfo(context.Background(), 0, 100) + res, err := cli.BlockchainInfo(ctx, 0, 100) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) @@ -511,7 +511,7 @@ func TestValidators(t *testing.T) { testPage := 1 testPerPage := 100 - res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage) + res, err := cli.Validators(ctx, &testHeight, &testPage, &testPerPage) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testVotingPower, res.Validators[0].VotingPower) @@ -571,7 +571,7 @@ func TestBlockSearch(t *testing.T) { testPage := 1 testPerPage := 100 testOrderBy := "desc" - res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + res, err := cli.BlockSearch(ctx, testQuery, &testPage, &testPerPage, testOrderBy) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) diff --git a/internal/libs/autofile/cmd/logjack.go b/internal/libs/autofile/cmd/logjack.go index 1aa8b6a11..0f412a366 100644 --- a/internal/libs/autofile/cmd/logjack.go +++ b/internal/libs/autofile/cmd/logjack.go @@ -1,15 +1,17 @@ package main import ( + "context" "flag" "fmt" "io" "os" + "os/signal" "strconv" "strings" + "syscall" auto "github.com/tendermint/tendermint/internal/libs/autofile" - tmos "github.com/tendermint/tendermint/libs/os" ) const Version = "0.0.1" @@ -32,21 +34,10 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo return } -type fmtLogger struct{} - -func (fmtLogger) Info(msg string, keyvals ...interface{}) { - strs := make([]string, len(keyvals)) - for i, kv := range keyvals { - strs[i] = fmt.Sprintf("%v", kv) - } - fmt.Printf("%s %s\n", msg, strings.Join(strs, ",")) -} - func main() { - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(fmtLogger{}, func() { - fmt.Println("logjack shutting down") - }) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM) + defer cancel() + defer func() { fmt.Println("logjack shutting down") }() // Read options headPath, chopSize, limitSize, version := parseFlags() @@ -62,7 +53,7 @@ func main() { os.Exit(1) } - if err = group.Start(); err != nil { + if err = group.Start(ctx); err != nil { fmt.Printf("logjack couldn't start with file %v\n", headPath) os.Exit(1) } diff --git a/internal/libs/autofile/group.go b/internal/libs/autofile/group.go index 23f27c59b..0e208d8e9 100644 --- a/internal/libs/autofile/group.go +++ b/internal/libs/autofile/group.go @@ -2,6 +2,7 @@ package autofile import ( "bufio" + "context" "errors" "fmt" "io" @@ -135,7 +136,7 @@ func GroupTotalSizeLimit(limit int64) func(*Group) { // OnStart implements service.Service by starting the goroutine that checks file // and group limits. -func (g *Group) OnStart() error { +func (g *Group) OnStart(ctx context.Context) error { g.ticker = time.NewTicker(g.groupCheckDuration) go g.processTicks() return nil diff --git a/internal/mempool/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go index 8a7938fd5..ed4740011 100644 --- a/internal/mempool/mempool_bench_test.go +++ b/internal/mempool/mempool_bench_test.go @@ -11,7 +11,10 @@ import ( ) func BenchmarkTxMempool_CheckTx(b *testing.B) { - txmp := setup(b, 10000) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, b, 10000) rng := rand.New(rand.NewSource(time.Now().UnixNano())) b.ResetTimer() diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 428204214..f06ee18d9 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -72,9 +72,12 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { } } -func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { +func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + app := &application{kvstore.NewApplication()} cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() @@ -84,11 +87,12 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { cfg.Mempool.CacheSize = cacheSize appConnMem, err := cc(logger) require.NoError(t, err) - require.NoError(t, appConnMem.Start()) + require.NoError(t, appConnMem.Start(ctx)) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) - require.NoError(t, appConnMem.Stop()) + cancel() + appConnMem.Wait() }) return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) @@ -128,7 +132,10 @@ func convertTex(in []testTx) types.Txs { } func TestTxMempool_TxsAvailable(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txmp.EnableTxsAvailable() ensureNoTxFire := func() { @@ -182,7 +189,10 @@ func TestTxMempool_TxsAvailable(t *testing.T) { } func TestTxMempool_Size(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txs := checkTxs(t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -206,7 +216,10 @@ func TestTxMempool_Size(t *testing.T) { } func TestTxMempool_Flush(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txs := checkTxs(t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -231,7 +244,10 @@ func TestTxMempool_Flush(t *testing.T) { } func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -281,7 +297,10 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { } func TestTxMempool_ReapMaxTxs(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) tTxs := checkTxs(t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -330,7 +349,10 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { } func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes+1) @@ -347,7 +369,10 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { } func TestTxMempool_CheckTxSamePeer(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -362,7 +387,10 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) { } func TestTxMempool_CheckTxSameSender(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -384,7 +412,10 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { } func TestTxMempool_ConcurrentTxs(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) rng := rand.New(rand.NewSource(time.Now().UnixNano())) checkTxDone := make(chan struct{}) @@ -448,7 +479,10 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { - txmp := setup(t, 500) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 500) txmp.height = 100 txmp.config.TTLNumBlocks = 10 @@ -498,6 +532,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } func TestTxMempool_CheckTxPostCheckError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { name string err error @@ -514,10 +551,13 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { for _, tc := range cases { testCase := tc t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error { return testCase.err } - txmp := setup(t, 0, WithPostCheck(postCheckFn)) + txmp := setup(ctx, t, 0, WithPostCheck(postCheckFn)) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes-1) _, err := rng.Read(tx) @@ -532,7 +572,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { } require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError) } - require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) }) } } diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 6d86b3712..2ddb44e7a 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -112,7 +112,7 @@ func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { if !r.cfg.Broadcast { r.Logger.Info("tx broadcasting is disabled") } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 68753cc69..4456424b5 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -36,7 +36,7 @@ type reactorTestSuite struct { nodes []types.NodeID } -func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { +func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) @@ -45,7 +45,7 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), reactors: make(map[types.NodeID]*Reactor, numNodes), mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), mempools: make(map[types.NodeID]*TxMempool, numNodes), @@ -60,7 +60,7 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { for nodeID := range rts.network.Nodes { rts.kvstores[nodeID] = kvstore.NewApplication() - mempool := setup(t, 0) + mempool := setup(ctx, t, 0) rts.mempools[nodeID] = mempool rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) @@ -78,7 +78,7 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { rts.nodes = append(rts.nodes, nodeID) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) } @@ -147,8 +147,11 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...ty } func TestReactorBroadcastDoesNotPanic(t *testing.T) { - numNodes := 2 - rts := setupReactors(t, numNodes, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numNodes = 2 + rts := setupReactors(ctx, t, numNodes, 0) observePanic := func(r interface{}) { t.Fatal("panic detected in reactor") @@ -192,8 +195,10 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { func TestReactorBroadcastTxs(t *testing.T) { numTxs := 1000 numNodes := 10 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rts := setupReactors(t, numNodes, 0) + rts := setupReactors(ctx, t, numNodes, 0) primary := rts.nodes[0] secondaries := rts.nodes[1:] @@ -215,7 +220,10 @@ func TestReactorConcurrency(t *testing.T) { numTxs := 5 numNodes := 2 - rts := setupReactors(t, numNodes, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -273,7 +281,10 @@ func TestReactorNoBroadcastToSender(t *testing.T) { numTxs := 1000 numNodes := 2 - rts := setupReactors(t, numNodes, uint(numTxs)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, uint(numTxs)) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -296,7 +307,10 @@ func TestReactor_MaxTxBytes(t *testing.T) { numNodes := 2 cfg := config.TestConfig() - rts := setupReactors(t, numNodes, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -305,7 +319,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // second reactor. tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) err := rts.reactors[primary].mempool.CheckTx( - context.Background(), + ctx, tx1, nil, TxInfo{ @@ -321,7 +335,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { // broadcast a tx, which is beyond the max size and ensure it's not sent tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, TxInfo{SenderID: UnknownPeerID}) + err = rts.mempools[primary].CheckTx(ctx, tx2, nil, TxInfo{SenderID: UnknownPeerID}) require.Error(t, err) rts.assertMempoolChannelsDrained(t) @@ -330,7 +344,11 @@ func TestReactor_MaxTxBytes(t *testing.T) { func TestDontExhaustMaxActiveIDs(t *testing.T) { // we're creating a single node network, but not starting the // network. - rts := setupReactors(t, 1, MaxActiveIDs+1) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, 1, MaxActiveIDs+1) nodeID := rts.nodes[0] @@ -395,7 +413,10 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { t.Skip("skipping test in short mode") } - rts := setupReactors(t, 2, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, 2, 0) primary := rts.nodes[0] secondary := rts.nodes[1] diff --git a/internal/p2p/address_test.go b/internal/p2p/address_test.go index 2745faf73..d5f9e498e 100644 --- a/internal/p2p/address_test.go +++ b/internal/p2p/address_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "net" "strings" "testing" @@ -204,6 +205,9 @@ func TestParseNodeAddress(t *testing.T) { func TestNodeAddress_Resolve(t *testing.T) { id := types.NodeID("00112233445566778899aabbccddeeff00112233") + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + testcases := []struct { address p2p.NodeAddress expect p2p.Endpoint @@ -275,6 +279,9 @@ func TestNodeAddress_Resolve(t *testing.T) { for _, tc := range testcases { tc := tc t.Run(tc.address.String(), func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + endpoints, err := tc.address.Resolve(ctx) if !tc.ok { require.Error(t, err) diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index a7efe54e3..9fb330286 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -2,6 +2,7 @@ package conn import ( "bufio" + "context" "errors" "fmt" "io" @@ -209,8 +210,8 @@ func NewMConnectionWithConfig( } // OnStart implements BaseService -func (c *MConnection) OnStart() error { - if err := c.BaseService.OnStart(); err != nil { +func (c *MConnection) OnStart(ctx context.Context) error { + if err := c.BaseService.OnStart(ctx); err != nil { return err } c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle) diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 5c4cdb156..dc198d8bd 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -1,6 +1,7 @@ package conn import ( + "context" "encoding/hex" "net" "testing" @@ -47,8 +48,11 @@ func TestMConnectionSendFlushStop(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + clientConn := createTestMConnection(log.TestingLogger(), client) - err := clientConn.Start() + err := clientConn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, clientConn)) @@ -81,8 +85,11 @@ func TestMConnectionSend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn := createTestMConnection(log.TestingLogger(), client) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -118,13 +125,17 @@ func TestMConnectionReceive(t *testing.T) { errorsCh <- r } logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn1 := createMConnectionWithCallbacks(logger, client, onReceive, onError) - err := mconn1.Start() + err := mconn1.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn1)) mconn2 := createTestMConnection(logger, server) - err = mconn2.Start() + err = mconn2.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn2)) @@ -153,8 +164,12 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -191,8 +206,11 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -245,8 +263,11 @@ func TestMConnectionMultiplePings(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -292,8 +313,12 @@ func TestMConnectionPingPongs(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -349,8 +374,11 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { onError := func(r interface{}) { errorsCh <- r } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -369,7 +397,11 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { } } -func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { +func newClientAndServerConnsForReadErrors( + ctx context.Context, + t *testing.T, + chOnErr chan struct{}, +) (*MConnection, *MConnection) { server, client := NetPipe() onReceive := func(chID ChannelID, msgBytes []byte) {} @@ -381,8 +413,9 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) ( {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, } logger := log.TestingLogger() + mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError) - err := mconnClient.Start() + err := mconnClient.Start(ctx) require.Nil(t, err) // create server conn with 1 channel @@ -391,8 +424,9 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) ( onError = func(r interface{}) { chOnErr <- struct{}{} } + mconnServer := createMConnectionWithCallbacks(serverLogger, server, onReceive, onError) - err = mconnServer.Start() + err = mconnServer.Start(ctx) require.Nil(t, err) return mconnClient, mconnServer } @@ -408,8 +442,11 @@ func expectSend(ch chan struct{}) bool { } func TestMConnectionReadErrorBadEncoding(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) client := mconnClient.conn @@ -421,8 +458,11 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { } func TestMConnectionReadErrorUnknownChannel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) msg := []byte("Ant-Man") @@ -440,7 +480,10 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { @@ -474,8 +517,11 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { } func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) // send msg with unknown msg type @@ -487,9 +533,11 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { func TestMConnectionTrySend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() mconn := createTestMConnection(log.TestingLogger(), client) - err := mconn.Start() + err := mconn.Start(ctx) require.Nil(t, err) t.Cleanup(stopAll(t, mconn)) @@ -535,7 +583,10 @@ func TestMConnectionChannelOverflow(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 642114a1d..d8657b774 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -1,8 +1,6 @@ package p2p_test import ( - "context" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" @@ -13,7 +11,6 @@ import ( // Common setup for P2P tests. var ( - ctx = context.Background() chID = p2p.ChannelID(1) chDesc = &p2p.ChannelDescriptor{ ID: chID, diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 0d92b2619..6ee253b3c 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -47,7 +47,7 @@ func (opts *NetworkOptions) setDefaults() { // MakeNetwork creates a test network with the given number of nodes and // connects them to each other. -func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { +func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network { opts.setDefaults() logger := log.TestingLogger() network := &Network{ @@ -57,7 +57,7 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { } for i := 0; i < opts.NumNodes; i++ { - node := network.MakeNode(t, opts.NodeOpts) + node := network.MakeNode(ctx, t, opts.NodeOpts) network.Nodes[node.NodeID] = node } @@ -221,7 +221,7 @@ type Node struct { // MakeNode creates a new Node configured for the network with a // running peer manager, but does not add it to the existing // network. Callers are responsible for updating peering relationships. -func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { +func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) *Node { privKey := ed25519.GenPrivKey() nodeID := types.NodeIDFromPubKey(privKey.PubKey()) nodeInfo := types.NodeInfo{ @@ -252,8 +252,9 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { transport.Endpoints(), p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, ) + require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) t.Cleanup(func() { if router.IsRunning() { @@ -304,12 +305,12 @@ func (n *Node) MakeChannelNoCleanup( // MakePeerUpdates opens a peer update subscription, with automatic cleanup. // It checks that all updates have been consumed during cleanup. -func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates { +func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdates { t.Helper() sub := n.PeerManager.Subscribe() t.Cleanup(func() { t.Helper() - RequireNoUpdates(t, sub) + RequireNoUpdates(ctx, t, sub) sub.Close() }) diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index a9fc16a34..106063bbd 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -1,6 +1,7 @@ package p2ptest import ( + "context" "testing" "time" @@ -95,11 +96,14 @@ func RequireSendReceive( } // RequireNoUpdates requires that a PeerUpdates subscription is empty. -func RequireNoUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates) { +func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUpdates) { t.Helper() select { case update := <-peerUpdates.Updates(): - require.Fail(t, "unexpected peer updates", "got %v", update) + if ctx.Err() == nil { + require.Fail(t, "unexpected peer updates", "got %v", update) + } + case <-ctx.Done(): default: } } diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 69c798d2d..28efe63dd 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -273,6 +273,9 @@ func TestPeerManager_Add(t *testing.T) { } func TestPeerManager_DialNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -296,6 +299,9 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} options := p2p.PeerManagerOptions{ @@ -311,7 +317,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { // Do five dial retries (six dials total). The retry time should double for // each failure. At the forth retry, MaxRetryTime should kick in. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + ctx, cancel = context.WithTimeout(ctx, 5*time.Second) defer cancel() for i := 0; i <= 5; i++ { @@ -342,6 +348,9 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -356,7 +365,7 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { }() // This will block until peer is added above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err := peerManager.DialNext(ctx) require.NoError(t, err) @@ -364,6 +373,9 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, }) @@ -395,7 +407,7 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { }() // This should make b available for dialing (not a, retries are disabled). - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -403,6 +415,9 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + options := p2p.PeerManagerOptions{MinRetryTime: 200 * time.Millisecond} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options) require.NoError(t, err) @@ -421,7 +436,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { // The retry timer should unblock DialNext and make a available again after // the retry time passes. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -430,6 +445,9 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -450,7 +468,7 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { peerManager.Disconnected(a.NodeID) }() - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -1289,6 +1307,9 @@ func TestPeerManager_Ready(t *testing.T) { // See TryEvictNext for most tests, this just tests blocking behavior. func TestPeerManager_EvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1322,6 +1343,9 @@ func TestPeerManager_EvictNext(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1340,7 +1364,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { }() // This will block until peer errors above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1348,6 +1372,9 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1378,7 +1405,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1386,6 +1413,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1410,7 +1440,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 9a5535a91..f6fcad5e1 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -1,6 +1,7 @@ package pex import ( + "context" "fmt" "runtime/debug" "sync" @@ -139,7 +140,7 @@ func NewReactor( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { go r.processPexCh() go r.processPeerUpdates() return nil diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index 04b347cb5..63b182fc0 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -1,6 +1,7 @@ package pex_test import ( + "context" "strings" "testing" "time" @@ -29,13 +30,15 @@ const ( ) func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // start a network with one mock reactor and one "real" reactor - testNet := setupNetwork(t, testOptions{ + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) // assert that the mock node receives a request from the real node testNet.listenForRequest(t, secondNode, firstNode, shortWait) @@ -47,14 +50,17 @@ func TestReactorBasic(t *testing.T) { } func TestReactorConnectFullNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 4, }) // make every node be only connected with one other node (it actually ends up // being two because of two way connections but oh well) testNet.connectN(t, 1) - testNet.start(t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -63,7 +69,10 @@ func TestReactorConnectFullNetwork(t *testing.T) { } func TestReactorSendsRequestsTooOften(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) badNode := newNodeID(t, "b") @@ -90,12 +99,15 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { } func TestReactorSendsResponseWithoutRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 3, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send @@ -108,14 +120,17 @@ func TestReactorSendsResponseWithoutRequest(t *testing.T) { } func TestReactorNeverSendsTooManyPeers(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) - testNet.addNodes(t, 110) + testNet.addNodes(ctx, t, 110) nodes := make([]int, 110) for i := 0; i < len(nodes); i++ { nodes[i] = i + 2 @@ -128,7 +143,10 @@ func TestReactorNeverSendsTooManyPeers(t *testing.T) { } func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} added, err := r.manager.Add(peer) require.NoError(t, err) @@ -172,14 +190,17 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { } func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 8, MaxPeers: 4, MaxConnected: 3, BufferSize: 8, }) testNet.connectN(t, 1) - testNet.start(t) + testNet.start(ctx, t) // test that all nodes reach full capacity for _, nodeID := range testNet.nodes { @@ -191,14 +212,17 @@ func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { } func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 3, MaxPeers: 25, MaxConnected: 25, BufferSize: 5, }) testNet.connectN(t, 1) - testNet.start(t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -207,12 +231,15 @@ func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { } func TestReactorWithNetworkGrowth(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 5, BufferSize: 5, }) testNet.connectAll(t) - testNet.start(t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -220,10 +247,10 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } // now we inject 10 more nodes - testNet.addNodes(t, 10) + testNet.addNodes(ctx, t, 10) for i := 5; i < testNet.total; i++ { node := testNet.nodes[i] - require.NoError(t, testNet.reactors[node].Start()) + require.NoError(t, testNet.reactors[node].Start(ctx)) require.True(t, testNet.reactors[node].IsRunning()) // we connect all new nodes to a single entry point and check that the // node can distribute the addresses to all the others @@ -247,7 +274,7 @@ type singleTestReactor struct { manager *p2p.PeerManager } -func setupSingle(t *testing.T) *singleTestReactor { +func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { t.Helper() nodeID := newNodeID(t, "a") chBuf := 2 @@ -268,14 +295,11 @@ func setupSingle(t *testing.T) *singleTestReactor { require.NoError(t, err) reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates) - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) t.Cleanup(func() { - err := reactor.Stop() - if err != nil { - t.Fatal(err) - } pexCh.Close() peerUpdates.Close() + reactor.Wait() }) return &singleTestReactor{ @@ -315,7 +339,7 @@ type testOptions struct { // setup setups a test suite with a network of nodes. Mocknodes represent the // hollow nodes that the test can listen and send on -func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { +func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite { t.Helper() require.Greater(t, opts.TotalNodes, opts.MockNodes) @@ -335,7 +359,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, networkOpts), + network: p2ptest.MakeNetwork(ctx, t, networkOpts), reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), @@ -375,7 +399,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { t.Cleanup(func() { for nodeID, reactor := range rts.reactors { if reactor.IsRunning() { - require.NoError(t, reactor.Stop()) + reactor.Wait() require.False(t, reactor.IsRunning()) } rts.pexChannels[nodeID].Close() @@ -391,20 +415,20 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { } // starts up the pex reactors for each node -func (r *reactorTestSuite) start(t *testing.T) { +func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() for _, reactor := range r.reactors { - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) } } -func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { +func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) { t.Helper() for i := 0; i < nodes; i++ { - node := r.network.MakeNode(t, p2ptest.NodeOptions{ + node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{ MaxPeers: r.opts.MaxPeers, MaxConnected: r.opts.MaxConnected, }) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index efd29c0d4..29646e327 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -1023,7 +1023,7 @@ func (r *Router) NodeInfo() types.NodeInfo { } // OnStart implements service.Service. -func (r *Router) OnStart() error { +func (r *Router) OnStart(ctx context.Context) error { for _, transport := range r.transports { for _, endpoint := range r.endpoints { if err := transport.Listen(endpoint); err != nil { diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 77c6f768e..c77e9e44d 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -44,10 +44,13 @@ func echoReactor(channel *p2p.Channel) { } func TestRouter_Network(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel where all peers run echoReactor. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 8}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 8}) local := network.RandomNode() peers := network.Peers(local.NodeID) channels := network.MakeChannels(t, chDesc) @@ -114,10 +117,11 @@ func TestRouter_Channel_Basic(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) - t.Cleanup(func() { - require.NoError(t, router.Stop()) - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, router.Start(ctx)) + t.Cleanup(router.Wait) // Opening a channel should work. channel, err := router.OpenChannel(chDesc) @@ -158,10 +162,13 @@ func TestRouter_Channel_Basic(t *testing.T) { // Channel tests are hairy to mock, so we use an in-memory network instead. func TestRouter_Channel_SendReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] @@ -219,8 +226,11 @@ func TestRouter_Channel_SendReceive(t *testing.T) { func TestRouter_Channel_Broadcast(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 4}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 4}) ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] @@ -247,8 +257,11 @@ func TestRouter_Channel_Broadcast(t *testing.T) { func TestRouter_Channel_Wrapper(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 2}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 2}) ids := network.NodeIDs() aID, bID := ids[0], ids[1] @@ -314,8 +327,11 @@ func (w *wrapperMessage) Unwrap() (proto.Message, error) { func TestRouter_Channel_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) network.Start(t) ids := network.NodeIDs() @@ -324,7 +340,7 @@ func TestRouter_Channel_Error(t *testing.T) { a := channels[aID] // Erroring b should cause it to be disconnected. It will reconnect shortly after. - sub := network.Nodes[aID].MakePeerUpdates(t) + sub := network.Nodes[aID].MakePeerUpdates(ctx, t) p2ptest.RequireError(t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) p2ptest.RequireUpdates(t, sub, []p2p.PeerUpdate{ {NodeID: bID, Status: p2p.PeerStatusDown}, @@ -353,9 +369,16 @@ func TestRouter_AcceptPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Set up a mock transport that handshakes. @@ -398,7 +421,7 @@ func TestRouter_AcceptPeers(t *testing.T) { p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -427,6 +450,9 @@ func TestRouter_AcceptPeers(t *testing.T) { func TestRouter_AcceptPeers_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns an error, which should prevent // the router from calling Accept again. mockTransport := &mocks.Transport{} @@ -452,7 +478,7 @@ func TestRouter_AcceptPeers_Error(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) require.NoError(t, router.Stop()) @@ -487,7 +513,10 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) require.NoError(t, router.Stop()) @@ -497,6 +526,9 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns a connection that blocks during the // handshake. It should be able to accept several of these in parallel, i.e. // a single connection can't halt other connections being accepted. @@ -535,7 +567,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { return len(acceptCh) == 3 @@ -574,10 +606,16 @@ func TestRouter_DialPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(bctx) + defer cancel() address := p2p.NodeAddress{Protocol: "mock", NodeID: tc.dialID} endpoint := p2p.Endpoint{Protocol: "mock", Path: string(tc.dialID)} @@ -635,7 +673,7 @@ func TestRouter_DialPeers(t *testing.T) { p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -664,6 +702,9 @@ func TestRouter_DialPeers(t *testing.T) { func TestRouter_DialPeers_Parallel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -729,7 +770,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { @@ -750,6 +791,9 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { func TestRouter_EvictPeers(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that we can evict. closeCh := make(chan time.Time) closeOnce := sync.Once{} @@ -792,7 +836,7 @@ func TestRouter_EvictPeers(t *testing.T) { p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) // Wait for the mock peer to connect, then evict it by reporting an error. p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -815,6 +859,8 @@ func TestRouter_EvictPeers(t *testing.T) { func TestRouter_ChannelCompatability(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() incompatiblePeer := types.NodeInfo{ NodeID: peerID, @@ -854,7 +900,7 @@ func TestRouter_ChannelCompatability(t *testing.T) { p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(1 * time.Second) require.NoError(t, router.Stop()) require.Empty(t, peerManager.Peers()) @@ -865,6 +911,8 @@ func TestRouter_ChannelCompatability(t *testing.T) { func TestRouter_DontSendOnInvalidChannel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() peer := types.NodeInfo{ NodeID: peerID, @@ -909,7 +957,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ NodeID: peerInfo.NodeID, diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 736f5360a..0580ce1bf 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -291,7 +291,7 @@ func (c *mConnConnection) Handshake( } c.mconn = mconn c.logger = mconn.Logger - if err = c.mconn.Start(); err != nil { + if err = c.mconn.Start(ctx); err != nil { return types.NodeInfo{}, nil, err } return peerInfo, peerKey, nil diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index d33438109..4d9a945cb 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "io" "net" "testing" @@ -58,6 +59,9 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { } func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), @@ -124,6 +128,9 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { } func TestMConnTransport_Listen(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testcases := []struct { endpoint p2p.Endpoint ok bool @@ -145,6 +152,9 @@ func TestMConnTransport_Listen(t *testing.T) { t.Run(tc.endpoint.String(), func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel = context.WithCancel(ctx) + defer cancel() + transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), @@ -185,6 +195,9 @@ func TestMConnTransport_Listen(t *testing.T) { go func() { // Dialing the endpoint should work. var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerConn, err = transport.Dial(ctx, endpoint) require.NoError(t, err) close(dialedChan) diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index cdfb57c70..a53be251d 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -25,20 +25,26 @@ var testTransports = map[string]transportFactory{} // withTransports is a test helper that runs a test against all transports // registered in testTransports. -func withTransports(t *testing.T, tester func(*testing.T, transportFactory)) { +func withTransports(ctx context.Context, t *testing.T, tester func(context.Context, *testing.T, transportFactory)) { t.Helper() for name, transportFactory := range testTransports { transportFactory := transportFactory t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) - tester(t, transportFactory) + tctx, cancel := context.WithCancel(ctx) + defer cancel() + + tester(tctx, t, transportFactory) }) } } func TestTransport_AcceptClose(t *testing.T) { // Just test accept unblock on close, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) // In-progress Accept should error on concurrent close. @@ -75,7 +81,10 @@ func TestTransport_DialEndpoints(t *testing.T) { {[]byte{1, 2, 3, 4, 5}, false}, } - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) endpoints := a.Endpoints() require.NotEmpty(t, endpoints) @@ -149,8 +158,11 @@ func TestTransport_DialEndpoints(t *testing.T) { } func TestTransport_Dial(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Most just tests dial failures, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) @@ -190,7 +202,10 @@ func TestTransport_Dial(t *testing.T) { } func TestTransport_Endpoints(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) @@ -214,7 +229,10 @@ func TestTransport_Endpoints(t *testing.T) { } func TestTransport_Protocols(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) protocols := a.Protocols() endpoints := a.Endpoints() @@ -228,17 +246,23 @@ func TestTransport_Protocols(t *testing.T) { } func TestTransport_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) require.NotEmpty(t, a.String()) }) } func TestConnection_Handshake(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) // A handshake should pass the given keys and NodeInfo. aKey := ed25519.GenPrivKey() @@ -270,7 +294,10 @@ func TestConnection_Handshake(t *testing.T) { assert.Equal(t, aInfo, peerInfo) assert.Equal(t, aKey.PubKey(), peerKey) } - errCh <- err + select { + case errCh <- err: + case <-ctx.Done(): + } }() peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey) @@ -283,12 +310,15 @@ func TestConnection_Handshake(t *testing.T) { } func TestConnection_HandshakeCancel(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) // Handshake should error on context cancellation. - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -298,7 +328,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { _ = ba.Close() // Handshake should error on context timeout. - ab, ba = dialAccept(t, a, b) + ab, ba = dialAccept(ctx, t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -310,10 +340,13 @@ func TestConnection_HandshakeCancel(t *testing.T) { } func TestConnection_FlushClose(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) + ab, _ := dialAcceptHandshake(ctx, t, a, b) err := ab.Close() require.NoError(t, err) @@ -329,10 +362,13 @@ func TestConnection_FlushClose(t *testing.T) { } func TestConnection_LocalRemoteEndpoint(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Local and remote connection endpoints correspond to each other. require.NotEmpty(t, ab.LocalEndpoint()) @@ -343,10 +379,13 @@ func TestConnection_LocalRemoteEndpoint(t *testing.T) { } func TestConnection_SendReceive(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Can send and receive a to b. err := ab.SendMessage(chID, []byte("foo")) @@ -402,10 +441,13 @@ func TestConnection_SendReceive(t *testing.T) { } func TestConnection_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAccept(t, a, b) + ab, _ := dialAccept(ctx, t, a, b) require.NotEmpty(t, ab.String()) }) } @@ -552,7 +594,7 @@ func TestEndpoint_Validate(t *testing.T) { // dialAccept is a helper that dials b from a and returns both sides of the // connection. -func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAccept(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() endpoints := b.Endpoints() @@ -585,13 +627,10 @@ func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connectio // dialAcceptHandshake is a helper that dials and handshakes b from a and // returns both sides of the connection. -func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() - ab, ba := dialAccept(t, a, b) - - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() + ab, ba := dialAccept(ctx, t, a, b) errCh := make(chan error, 1) go func() { diff --git a/internal/p2p/trust/metric.go b/internal/p2p/trust/metric.go index aa0ff5298..8ad814f10 100644 --- a/internal/p2p/trust/metric.go +++ b/internal/p2p/trust/metric.go @@ -4,6 +4,7 @@ package trust import ( + "context" "math" "time" @@ -109,8 +110,8 @@ func NewMetricWithConfig(tmc MetricConfig) *Metric { } // OnStart implements Service -func (tm *Metric) OnStart() error { - if err := tm.BaseService.OnStart(); err != nil { +func (tm *Metric) OnStart(ctx context.Context) error { + if err := tm.BaseService.OnStart(ctx); err != nil { return err } go tm.processRequests() diff --git a/internal/p2p/trust/metric_test.go b/internal/p2p/trust/metric_test.go index 65caf38a2..b7d19da23 100644 --- a/internal/p2p/trust/metric_test.go +++ b/internal/p2p/trust/metric_test.go @@ -1,6 +1,7 @@ package trust import ( + "context" "testing" "time" @@ -9,8 +10,11 @@ import ( ) func TestTrustMetricScores(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tm := NewMetric() - err := tm.Start() + err := tm.Start(ctx) require.NoError(t, err) // Perfect score @@ -27,6 +31,9 @@ func TestTrustMetricScores(t *testing.T) { } func TestTrustMetricConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // 7 days window := time.Minute * 60 * 24 * 7 config := MetricConfig{ @@ -35,7 +42,7 @@ func TestTrustMetricConfig(t *testing.T) { } tm := NewMetricWithConfig(config) - err := tm.Start() + err := tm.Start(ctx) require.NoError(t, err) // The max time intervals should be the TrackingWindow / IntervalLen @@ -52,7 +59,7 @@ func TestTrustMetricConfig(t *testing.T) { config.ProportionalWeight = 0.3 config.IntegralWeight = 0.7 tm = NewMetricWithConfig(config) - err = tm.Start() + err = tm.Start(ctx) require.NoError(t, err) // These weights should be equal to our custom values @@ -74,12 +81,15 @@ func TestTrustMetricCopyNilPointer(t *testing.T) { // XXX: This test fails non-deterministically //nolint:unused,deadcode func _TestTrustMetricStopPause(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // The TestTicker will provide manual control over // the passing of time within the metric tt := NewTestTicker() tm := NewMetric() tm.SetTicker(tt) - err := tm.Start() + err := tm.Start(ctx) require.NoError(t, err) // Allow some time intervals to pass and pause tt.NextTick() diff --git a/internal/p2p/trust/store.go b/internal/p2p/trust/store.go index 158354e14..a01cbab2e 100644 --- a/internal/p2p/trust/store.go +++ b/internal/p2p/trust/store.go @@ -4,6 +4,7 @@ package trust import ( + "context" "encoding/json" "fmt" "time" @@ -51,15 +52,15 @@ func NewTrustMetricStore(db dbm.DB, tmc MetricConfig, logger log.Logger) *Metric } // OnStart implements Service -func (tms *MetricStore) OnStart() error { - if err := tms.BaseService.OnStart(); err != nil { +func (tms *MetricStore) OnStart(ctx context.Context) error { + if err := tms.BaseService.OnStart(ctx); err != nil { return err } tms.mtx.Lock() defer tms.mtx.Unlock() - tms.loadFromDB() + tms.loadFromDB(ctx) go tms.saveRoutine() return nil } @@ -103,7 +104,7 @@ func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) { } // GetPeerTrustMetric returns a trust metric by peer key -func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { +func (tms *MetricStore) GetPeerTrustMetric(ctx context.Context, key string) *Metric { tms.mtx.Lock() defer tms.mtx.Unlock() @@ -111,7 +112,7 @@ func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { if !ok { // If the metric is not available, we will create it tm = NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { + if err := tm.Start(ctx); err != nil { tms.Logger.Error("unable to start metric store", "error", err) } // The metric needs to be in the map @@ -152,7 +153,7 @@ func (tms *MetricStore) size() int { // Loads the history data for all peers from the store DB // cmn.Panics if file is corrupt -func (tms *MetricStore) loadFromDB() bool { +func (tms *MetricStore) loadFromDB(ctx context.Context) bool { // Obtain the history data we have so far bytes, err := tms.db.Get(trustMetricKey) if err != nil { @@ -173,7 +174,7 @@ func (tms *MetricStore) loadFromDB() bool { for key, p := range peers { tm := NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { + if err := tm.Start(ctx); err != nil { tms.Logger.Error("unable to start metric", "error", err) } tm.Init(p) diff --git a/internal/p2p/trust/store_test.go b/internal/p2p/trust/store_test.go index d1420b1dc..a6178459f 100644 --- a/internal/p2p/trust/store_test.go +++ b/internal/p2p/trust/store_test.go @@ -4,6 +4,7 @@ package trust import ( + "context" "fmt" "testing" @@ -15,6 +16,9 @@ import ( ) func TestTrustMetricStoreSaveLoad(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dir := t.TempDir() logger := log.TestingLogger() @@ -26,7 +30,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { store.saveToDB() // Load the data from the file store = NewTrustMetricStore(historyDB, DefaultConfig(), logger) - err = store.Start() + err = store.Start(ctx) require.NoError(t, err) // Make sure we still have 0 entries assert.Zero(t, store.Size()) @@ -44,7 +48,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { tm := NewMetric() tm.SetTicker(tt[i]) - err = tm.Start() + err = tm.Start(ctx) require.NoError(t, err) store.AddPeerTrustMetric(key, tm) @@ -65,7 +69,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { // Load the data from the DB store = NewTrustMetricStore(historyDB, DefaultConfig(), logger) - err = store.Start() + err = store.Start(ctx) require.NoError(t, err) // Check that we still have 100 peers with imperfect trust values @@ -79,6 +83,9 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { } func TestTrustMetricStoreConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) @@ -91,11 +98,11 @@ func TestTrustMetricStoreConfig(t *testing.T) { // Create a store with custom config store := NewTrustMetricStore(historyDB, config, logger) - err = store.Start() + err = store.Start(ctx) require.NoError(t, err) // Have the store make us a metric with the config - tm := store.GetPeerTrustMetric("TestKey") + tm := store.GetPeerTrustMetric(ctx, "TestKey") // Check that the options made it to the metric assert.Equal(t, 0.5, tm.proportionalWeight) @@ -105,18 +112,21 @@ func TestTrustMetricStoreConfig(t *testing.T) { } func TestTrustMetricStoreLookup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) store := NewTrustMetricStore(historyDB, DefaultConfig(), log.TestingLogger()) - err = store.Start() + err = store.Start(ctx) require.NoError(t, err) // Create 100 peers in the trust metric store for i := 0; i < 100; i++ { key := fmt.Sprintf("peer_%d", i) - store.GetPeerTrustMetric(key) + store.GetPeerTrustMetric(ctx, key) // Check that the trust metric was successfully entered ktm := store.peerMetrics[key] @@ -128,16 +138,19 @@ func TestTrustMetricStoreLookup(t *testing.T) { } func TestTrustMetricStorePeerScore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + historyDB, err := dbm.NewDB("", "memdb", "") require.NoError(t, err) store := NewTrustMetricStore(historyDB, DefaultConfig(), log.TestingLogger()) - err = store.Start() + err = store.Start(ctx) require.NoError(t, err) key := "TestKey" - tm := store.GetPeerTrustMetric(key) + tm := store.GetPeerTrustMetric(ctx, key) // This peer is innocent so far first := tm.TrustScore() @@ -156,7 +169,7 @@ func TestTrustMetricStorePeerScore(t *testing.T) { store.PeerDisconnected(key) // We will remember our experiences with this peer - tm = store.GetPeerTrustMetric(key) + tm = store.GetPeerTrustMetric(ctx, key) assert.NotEqual(t, 100, tm.TrustScore()) err = store.Stop() require.NoError(t, err) diff --git a/internal/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go index 4b4abe607..5eb810bd6 100644 --- a/internal/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -51,16 +51,15 @@ func TestEcho(t *testing.T) { logger := log.TestingLogger() clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Start server s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) - if err := s.Start(); err != nil { + if err := s.Start(ctx); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); s.Wait() }) // Start client cli, err := clientCreator(logger.With("module", "abci-client")) @@ -68,14 +67,13 @@ func TestEcho(t *testing.T) { t.Fatalf("Error creating ABCI client: %v", err.Error()) } - if err := cli.Start(); err != nil { + if err := cli.Start(ctx); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } proxy := newAppConnTest(cli) t.Log("Connected") - ctx := context.Background() for i := 0; i < 1000; i++ { _, err = proxy.EchoAsync(ctx, fmt.Sprintf("echo-%v", i)) if err != nil { @@ -99,16 +97,15 @@ func BenchmarkEcho(b *testing.B) { logger := log.TestingLogger() clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Start server s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) - if err := s.Start(); err != nil { + if err := s.Start(ctx); err != nil { b.Fatalf("Error starting socket server: %v", err.Error()) } - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) + b.Cleanup(func() { cancel(); s.Wait() }) // Start client cli, err := clientCreator(logger.With("module", "abci-client")) @@ -116,7 +113,7 @@ func BenchmarkEcho(b *testing.B) { b.Fatalf("Error creating ABCI client: %v", err.Error()) } - if err := cli.Start(); err != nil { + if err := cli.Start(ctx); err != nil { b.Fatalf("Error starting ABCI client: %v", err.Error()) } @@ -125,7 +122,6 @@ func BenchmarkEcho(b *testing.B) { echoString := strings.Repeat(" ", 200) b.StartTimer() // Start benchmarking tests - ctx := context.Background() for i := 0; i < b.N; i++ { _, err = proxy.EchoAsync(ctx, echoString) if err != nil { @@ -152,16 +148,15 @@ func TestInfo(t *testing.T) { logger := log.TestingLogger() clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Start server s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) - if err := s.Start(); err != nil { + if err := s.Start(ctx); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); s.Wait() }) // Start client cli, err := clientCreator(logger.With("module", "abci-client")) @@ -169,7 +164,7 @@ func TestInfo(t *testing.T) { t.Fatalf("Error creating ABCI client: %v", err.Error()) } - if err := cli.Start(); err != nil { + if err := cli.Start(ctx); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go index 62862d66e..5e5920f24 100644 --- a/internal/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -1,6 +1,8 @@ package proxy import ( + "context" + "errors" "fmt" "os" "syscall" @@ -51,14 +53,22 @@ type multiAppConn struct { queryConn AppConnQuery snapshotConn AppConnSnapshot - consensusConnClient abciclient.Client - mempoolConnClient abciclient.Client - queryConnClient abciclient.Client - snapshotConnClient abciclient.Client + consensusConnClient stoppableClient + mempoolConnClient stoppableClient + queryConnClient stoppableClient + snapshotConnClient stoppableClient clientCreator abciclient.Creator } +// TODO: this is a totally internal and quasi permanent shim for +// clients. eventually we can have a single client and have some kind +// of reasonable lifecycle witout needing an explicit stop method. +type stoppableClient interface { + abciclient.Client + Stop() error +} + // NewMultiAppConn makes all necessary abci connections to the application. func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { multiAppConn := &multiAppConn{ @@ -85,36 +95,36 @@ func (app *multiAppConn) Snapshot() AppConnSnapshot { return app.snapshotConn } -func (app *multiAppConn) OnStart() error { - c, err := app.abciClientFor(connQuery) +func (app *multiAppConn) OnStart(ctx context.Context) error { + c, err := app.abciClientFor(ctx, connQuery) if err != nil { return err } - app.queryConnClient = c + app.queryConnClient = c.(stoppableClient) app.queryConn = NewAppConnQuery(c, app.metrics) - c, err = app.abciClientFor(connSnapshot) + c, err = app.abciClientFor(ctx, connSnapshot) if err != nil { app.stopAllClients() return err } - app.snapshotConnClient = c + app.snapshotConnClient = c.(stoppableClient) app.snapshotConn = NewAppConnSnapshot(c, app.metrics) - c, err = app.abciClientFor(connMempool) + c, err = app.abciClientFor(ctx, connMempool) if err != nil { app.stopAllClients() return err } - app.mempoolConnClient = c + app.mempoolConnClient = c.(stoppableClient) app.mempoolConn = NewAppConnMempool(c, app.metrics) - c, err = app.abciClientFor(connConsensus) + c, err = app.abciClientFor(ctx, connConsensus) if err != nil { app.stopAllClients() return err } - app.consensusConnClient = c + app.consensusConnClient = c.(stoppableClient) app.consensusConn = NewAppConnConsensus(c, app.metrics) // Kill Tendermint if the ABCI application crashes. @@ -160,34 +170,42 @@ func (app *multiAppConn) killTMOnClientError() { func (app *multiAppConn) stopAllClients() { if app.consensusConnClient != nil { if err := app.consensusConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping consensus client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping consensus client", "error", err) + } } } if app.mempoolConnClient != nil { if err := app.mempoolConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping mempool client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping mempool client", "error", err) + } } } if app.queryConnClient != nil { if err := app.queryConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping query client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping query client", "error", err) + } } } if app.snapshotConnClient != nil { if err := app.snapshotConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping snapshot client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.Logger.Error("error while stopping snapshot client", "error", err) + } } } } -func (app *multiAppConn) abciClientFor(conn string) (abciclient.Client, error) { +func (app *multiAppConn) abciClientFor(ctx context.Context, conn string) (abciclient.Client, error) { c, err := app.clientCreator(app.Logger.With( "module", "abci-client", "connection", conn)) if err != nil { return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) } - if err := c.Start(); err != nil { + if err := c.Start(ctx); err != nil { return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) } return c, nil diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go index af9c30091..9ad39cb3b 100644 --- a/internal/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "errors" "os" "os/signal" @@ -17,31 +18,42 @@ import ( "github.com/tendermint/tendermint/libs/log" ) +type noopStoppableClientImpl struct { + abciclient.Client + count int +} + +func (c *noopStoppableClientImpl) Stop() error { c.count++; return nil } + func TestAppConns_Start_Stop(t *testing.T) { quitCh := make(<-chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + clientMock := &abcimocks.Client{} - clientMock.On("Start").Return(nil).Times(4) - clientMock.On("Stop").Return(nil).Times(4) + clientMock.On("Start", mock.Anything).Return(nil).Times(4) clientMock.On("Quit").Return(quitCh).Times(4) + cl := &noopStoppableClientImpl{Client: clientMock} creatorCallCount := 0 creator := func(logger log.Logger) (abciclient.Client, error) { creatorCallCount++ - return clientMock, nil + return cl, nil } appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) - err := appConns.Start() + err := appConns.Start(ctx) require.NoError(t, err) time.Sleep(100 * time.Millisecond) - err = appConns.Stop() - require.NoError(t, err) + cancel() + appConns.Wait() clientMock.AssertExpectations(t) + assert.Equal(t, 4, cl.count) assert.Equal(t, 4, creatorCallCount) } @@ -56,31 +68,30 @@ func TestAppConns_Failure(t *testing.T) { } }() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quitCh := make(chan struct{}) var recvQuitCh <-chan struct{} // nolint:gosimple recvQuitCh = quitCh clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return() - clientMock.On("Start").Return(nil) - clientMock.On("Stop").Return(nil) + clientMock.On("Start", mock.Anything).Return(nil) clientMock.On("Quit").Return(recvQuitCh) clientMock.On("Error").Return(errors.New("EOF")).Once() + cl := &noopStoppableClientImpl{Client: clientMock} creator := func(log.Logger) (abciclient.Client, error) { - return clientMock, nil + return cl, nil } appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) - err := appConns.Start() + err := appConns.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := appConns.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); appConns.Wait() }) // simulate failure close(quitCh) diff --git a/internal/state/execution.go b/internal/state/execution.go index dc64e6e3d..e4a1ba6c3 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -332,7 +332,7 @@ func execBlockOnProxyApp( byzVals = append(byzVals, evidence.ABCI()...) } - ctx := context.Background() + ctx := context.TODO() // Begin block var err error diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 80b1b6e58..5da5adbb5 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -40,9 +40,12 @@ func TestApplyBlock(t *testing.T) { cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) - err := proxyApp.Start() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -62,12 +65,15 @@ func TestApplyBlock(t *testing.T) { // TestBeginBlockValidators ensures we send absent validators list. func TestBeginBlockValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) - err := proxyApp.Start() + + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // no need to check error again state, stateDB, _ := makeState(2, 2) stateStore := sm.NewStore(stateDB) @@ -125,12 +131,14 @@ func TestBeginBlockValidators(t *testing.T) { // TestBeginBlockByzantineValidators ensures we send byzantine validators list. func TestBeginBlockByzantineValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) - err := proxyApp.Start() + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -350,13 +358,15 @@ func TestUpdateValidators(t *testing.T) { // TestEndBlockValidatorUpdates ensures we update validator set and send an event. func TestEndBlockValidatorUpdates(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) - err := proxyApp.Start() + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -372,7 +382,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { ) eventBus := eventbus.NewDefault(logger) - err = eventBus.Start() + err = eventBus.Start(ctx) require.NoError(t, err) defer eventBus.Stop() //nolint:errcheck // ignore for tests @@ -405,7 +415,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } // test we threw an event - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) defer cancel() msg, err := updatesSub.Next(ctx) require.NoError(t, err) @@ -420,13 +430,15 @@ func TestEndBlockValidatorUpdates(t *testing.T) { // TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that // would result in empty set causes no panic, an error is raised and NextValidators is not updated func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) logger := log.TestingLogger() proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) - err := proxyApp.Start() + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) stateStore := sm.NewStore(stateDB) diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index d5c230e81..80c4adf02 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -116,7 +116,7 @@ func (is *Service) publish(msg pubsub.Message) error { // indexer if the underlying event sinks support indexing. // // TODO(creachadair): Can we get rid of the "enabled" check? -func (is *Service) OnStart() error { +func (is *Service) OnStart(ctx context.Context) error { // If the event sinks support indexing, register an observer to capture // block header data for the indexer. if IndexingEnabled(is.eventSinks) { diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index a986530f0..879cf8820 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -1,6 +1,7 @@ package indexer_test import ( + "context" "database/sql" "fmt" "os" @@ -47,16 +48,15 @@ func NewIndexerService(es []indexer.EventSink, eventBus *eventbus.EventBus) *ind } func TestIndexerServiceIndexesBlocks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := tmlog.TestingLogger() // event bus eventBus := eventbus.NewDefault(logger) - err := eventBus.Start() + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(eventBus.Wait) assert.False(t, indexer.KVSinkEnabled([]indexer.EventSink{})) assert.False(t, indexer.IndexingEnabled([]indexer.EventSink{})) @@ -71,13 +71,8 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { assert.True(t, indexer.IndexingEnabled(eventSinks)) service := NewIndexerService(eventSinks, eventBus) - err = service.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := service.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, service.Start(ctx)) + t.Cleanup(service.Wait) // publish block with txs err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index f5306801f..650579f9b 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -143,6 +143,9 @@ func TestType(t *testing.T) { } func TestIndexing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("IndexBlockEvents", func(t *testing.T) { indexer := &EventSink{store: testDB(), chainID: chainID} require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) @@ -154,7 +157,7 @@ func TestIndexing(t *testing.T) { verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) verifyNotImplemented(t, "block search", func() (bool, error) { - v, err := indexer.SearchBlockEvents(context.Background(), nil) + v, err := indexer.SearchBlockEvents(ctx, nil) return v != nil, err }) @@ -188,7 +191,7 @@ func TestIndexing(t *testing.T) { return txr != nil, err }) verifyNotImplemented(t, "tx search", func() (bool, error) { - txr, err := indexer.SearchTxEvents(context.Background(), nil) + txr, err := indexer.SearchTxEvents(ctx, nil) return txr != nil, err }) diff --git a/internal/state/state_test.go b/internal/state/state_test.go index fdf681294..3f989536a 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -310,6 +310,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } func TestProposerFrequency(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // some explicit test cases testCases := []struct { @@ -370,7 +372,7 @@ func TestProposerFrequency(t *testing.T) { votePower := int64(mrand.Int()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) val := types.NewValidator(pubKey, votePower) val.ProposerPriority = mrand.Int63() diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index eb0cebbb7..65c0648d4 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -28,9 +28,11 @@ import ( const validationTestsStopHeight int64 = 10 func TestValidateBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(3, 1) stateStore := sm.NewStore(stateDB) @@ -115,9 +117,11 @@ func TestValidateBlockHeader(t *testing.T) { } func TestValidateBlockCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(1, 1) stateStore := sm.NewStore(stateDB) @@ -207,7 +211,7 @@ func TestValidateBlockCommit(t *testing.T) { ) require.NoError(t, err, "height %d", height) - bpvPubKey, err := badPrivVal.GetPubKey(context.Background()) + bpvPubKey, err := badPrivVal.GetPubKey(ctx) require.NoError(t, err) badVote := &types.Vote{ @@ -223,9 +227,9 @@ func TestValidateBlockCommit(t *testing.T) { g := goodVote.ToProto() b := badVote.ToProto() - err = badPrivVal.SignVote(context.Background(), chainID, g) + err = badPrivVal.SignVote(ctx, chainID, g) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote(context.Background(), chainID, b) + err = badPrivVal.SignVote(ctx, chainID, b) require.NoError(t, err, "height %d", height) goodVote.Signature, badVote.Signature = g.Signature, b.Signature @@ -236,9 +240,11 @@ func TestValidateBlockCommit(t *testing.T) { } func TestValidateBlockEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) state, stateDB, privVals := makeState(4, 1) stateStore := sm.NewStore(stateDB) diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index e5a6a85cd..e717dad12 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -114,6 +114,10 @@ func TestDispatcherProviders(t *testing.T) { func TestPeerListBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() assert.Zero(t, peerList.Len()) numPeers := 10 @@ -199,6 +203,9 @@ func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) { func TestPeerListConcurrent(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() numPeers := 10 @@ -229,7 +236,6 @@ func TestPeerListConcurrent(t *testing.T) { // we use a context with cancel and a separate go routine to wait for all // the other goroutines to close. - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 939fb409c..6566f823b 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -210,15 +210,11 @@ func NewReactor( // handle individual envelopes as to not have to deal with bounding workers or pools. // The caller must be sure to execute OnStop to ensure the outbound p2p Channels are // closed. No error is returned. -func (r *Reactor) OnStart() error { - go r.processSnapshotCh() - - go r.processChunkCh() - - go r.processBlockCh() - - go r.processParamsCh() - +func (r *Reactor) OnStart(ctx context.Context) error { + go r.processCh(r.snapshotCh, "snapshot") + go r.processCh(r.chunkCh, "chunk") + go r.processCh(r.blockCh, "light block") + go r.processCh(r.paramsCh, "consensus params") go r.processPeerUpdates() return nil @@ -607,7 +603,7 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { "chunk", msg.Index, "peer", envelope.From, ) - resp, err := r.conn.LoadSnapshotChunkSync(context.Background(), abci.RequestLoadSnapshotChunk{ + resp, err := r.conn.LoadSnapshotChunkSync(context.TODO(), abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, @@ -815,28 +811,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err return err } -// processSnapshotCh initiates a blocking process where we listen for and handle -// envelopes on the SnapshotChannel. -func (r *Reactor) processSnapshotCh() { - r.processCh(r.snapshotCh, "snapshot") -} - -// processChunkCh initiates a blocking process where we listen for and handle -// envelopes on the ChunkChannel. -func (r *Reactor) processChunkCh() { - r.processCh(r.chunkCh, "chunk") -} - -// processBlockCh initiates a blocking process where we listen for and handle -// envelopes on the LightBlockChannel. -func (r *Reactor) processBlockCh() { - r.processCh(r.blockCh, "light block") -} - -func (r *Reactor) processParamsCh() { - r.processCh(r.paramsCh, "consensus params") -} - // processCh routes state sync messages to their respective handlers. Any error // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal @@ -848,8 +822,11 @@ func (r *Reactor) processCh(ch *p2p.Channel, chName string) { select { case envelope := <-ch.In: if err := r.handleMessage(ch.ID, envelope); err != nil { - r.Logger.Error(fmt.Sprintf("failed to process %s message", chName), - "ch_id", ch.ID, "envelope", envelope, "err", err) + r.Logger.Error("failed to process message", + "err", err, + "channel", chName, + "ch_id", ch.ID, + "envelope", envelope) ch.Error <- p2p.PeerError{ NodeID: envelope.From, Err: err, @@ -857,7 +834,7 @@ func (r *Reactor) processCh(ch *p2p.Channel, chName string) { } case <-r.closeCh: - r.Logger.Debug(fmt.Sprintf("stopped listening on %s channel; closing...", chName)) + r.Logger.Debug("channel closed", "channel", chName) return } } @@ -923,7 +900,7 @@ func (r *Reactor) processPeerUpdates() { // recentSnapshots fetches the n most recent snapshots from the app func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { - resp, err := r.conn.ListSnapshotsSync(context.Background(), abci.RequestListSnapshots{}) + resp, err := r.conn.ListSnapshotsSync(context.TODO(), abci.RequestListSnapshots{}) if err != nil { return nil, err } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index b90e5fd78..8dc2d6038 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -69,6 +69,7 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, conn *proxymocks.AppConnSnapshot, connQuery *proxymocks.AppConnQuery, @@ -176,11 +177,11 @@ func setup( rts.reactor.metrics, ) - require.NoError(t, rts.reactor.Start()) + require.NoError(t, rts.reactor.Start(ctx)) require.True(t, rts.reactor.IsRunning()) t.Cleanup(func() { - require.NoError(t, rts.reactor.Stop()) + rts.reactor.Wait() require.False(t, rts.reactor.IsRunning()) }) @@ -188,8 +189,11 @@ func setup( } func TestReactor_Sync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const snapshotHeight = 7 - rts := setup(t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, nil, 2) chain := buildLightBlockChain(t, 1, 10, time.Now()) // app accepts any snapshot rts.conn.On("OfferSnapshotSync", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). @@ -200,7 +204,7 @@ func TestReactor_Sync(t *testing.T) { Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) // app query returns valid state app hash - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + rts.connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: 9, LastBlockHeight: snapshotHeight, LastBlockAppHash: chain[snapshotHeight+1].AppHash, @@ -213,7 +217,7 @@ func TestReactor_Sync(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) go graduallyAddPeers(rts.peerUpdateCh, closeCh, 1*time.Second) go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ @@ -226,7 +230,7 @@ func TestReactor_Sync(t *testing.T) { go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) // update the config to use the p2p provider rts.reactor.cfg.UseP2P = true @@ -235,12 +239,15 @@ func TestReactor_Sync(t *testing.T) { rts.reactor.cfg.DiscoveryTime = 1 * time.Second // Run state sync - _, err := rts.reactor.Sync(context.Background()) + _, err := rts.reactor.Sync(ctx) require.NoError(t, err) } func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -282,19 +289,23 @@ func TestReactor_ChunkRequest(t *testing.T) { }, } - for name, tc := range testcases { - tc := tc + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("LoadSnapshotChunkSync", context.Background(), abci.RequestLoadSnapshotChunk{ + conn.On("LoadSnapshotChunkSync", mock.Anything, abci.RequestLoadSnapshotChunk{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) - rts := setup(t, conn, nil, nil, 2) + rts := setup(ctx, t, conn, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -311,7 +322,10 @@ func TestReactor_ChunkRequest(t *testing.T) { } func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -360,18 +374,23 @@ func TestReactor_SnapshotsRequest(t *testing.T) { }, }, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("ListSnapshotsSync", context.Background(), abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + conn.On("ListSnapshotsSync", mock.Anything, abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ Snapshots: tc.snapshots, }, nil) - rts := setup(t, conn, nil, nil, 100) + rts := setup(ctx, t, conn, nil, nil, 100) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -379,7 +398,7 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } if len(tc.expectResponses) > 0 { - retryUntil(t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) + retryUntil(ctx, t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) } responses := make([]*ssproto.SnapshotsResponse, len(tc.expectResponses)) @@ -395,7 +414,10 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } func TestReactor_LightBlockResponse(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) var height int64 = 10 h := factory.MakeRandomHeader() @@ -448,7 +470,10 @@ func TestReactor_LightBlockResponse(t *testing.T) { } func TestReactor_BlockProviders(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ NodeID: types.NodeID("aa"), Status: p2p.PeerStatusUp, @@ -462,7 +487,7 @@ func TestReactor_BlockProviders(t *testing.T) { defer close(closeCh) chain := buildLightBlockChain(t, 1, 10, time.Now()) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) peers := rts.reactor.peers.All() require.Len(t, peers, 2) @@ -479,7 +504,7 @@ func TestReactor_BlockProviders(t *testing.T) { go func(t *testing.T, p provider.Provider) { defer wg.Done() for height := 2; height < 10; height++ { - lb, err := p.LightBlock(context.Background(), int64(height)) + lb, err := p.LightBlock(ctx, int64(height)) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, height, int(lb.Height)) @@ -487,7 +512,6 @@ func TestReactor_BlockProviders(t *testing.T) { }(t, p) } - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { @@ -501,7 +525,10 @@ func TestReactor_BlockProviders(t *testing.T) { } func TestReactor_StateProviderP2P(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) // make syncer non nil else test won't think we are state syncing rts.reactor.syncer = rts.syncer peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength)) @@ -519,8 +546,8 @@ func TestReactor_StateProviderP2P(t *testing.T) { defer close(closeCh) chain := buildLightBlockChain(t, 1, 10, time.Now()) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) rts.reactor.cfg.UseP2P = true rts.reactor.cfg.TrustHeight = 1 @@ -533,10 +560,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { } require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured") - bctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ictx, cancel := context.WithTimeout(bctx, time.Second) + ictx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() rts.reactor.mtx.Lock() @@ -545,7 +569,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { require.NoError(t, err) rts.reactor.syncer.stateProvider = rts.reactor.stateProvider - actx, cancel := context.WithTimeout(bctx, 10*time.Second) + actx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() appHash, err := rts.reactor.stateProvider.AppHash(actx, 5) @@ -569,13 +593,19 @@ func TestReactor_StateProviderP2P(t *testing.T) { } func TestReactor_Backfill(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // test backfill algorithm with varying failure rates [0, 10] failureRates := []int{0, 2, 9} for _, failureRate := range failureRates { failureRate := failureRate t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) - rts := setup(t, nil, nil, nil, 21) + rts := setup(ctx, t, nil, nil, nil, 21) var ( startHeight int64 = 20 @@ -605,11 +635,11 @@ func TestReactor_Backfill(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, failureRate) err := rts.reactor.backfill( - context.Background(), + ctx, factory.DefaultTestChainID, startHeight, stopHeight, @@ -644,8 +674,8 @@ func TestReactor_Backfill(t *testing.T) { // retryUntil will continue to evaluate fn and will return successfully when true // or fail when the timeout is reached. -func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) +func retryUntil(ctx context.Context, t *testing.T, fn func() bool, timeout time.Duration) { + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() for { @@ -656,7 +686,9 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { } } -func handleLightBlockRequests(t *testing.T, +func handleLightBlockRequests( + ctx context.Context, + t *testing.T, chain map[int64]*types.LightBlock, receiving chan p2p.Envelope, sending chan p2p.Envelope, @@ -666,6 +698,8 @@ func handleLightBlockRequests(t *testing.T, errorCount := 0 for { select { + case <-ctx.Done(): + return case envelope := <-receiving: if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok { if requests%10 >= failureRate { @@ -709,13 +743,24 @@ func handleLightBlockRequests(t *testing.T, } } -func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Envelope, closeCh chan struct{}) { +func handleConsensusParamsRequest( + ctx context.Context, + t *testing.T, + receiving, sending chan p2p.Envelope, + closeCh chan struct{}, +) { t.Helper() params := types.DefaultConsensusParams() paramsProto := params.ToProto() for { select { + case <-ctx.Done(): + return case envelope := <-receiving: + if ctx.Err() != nil { + return + } + t.Log("received consensus params request") msg, ok := envelope.Message.(*ssproto.ParamsRequest) require.True(t, ok) diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index b4212961a..f266017dd 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -565,7 +565,7 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { // verifyApp verifies the sync, checking the app hash and last block height. It returns the // app version, which should be returned as part of the initial state. func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { - resp, err := s.connQuery.InfoSync(context.Background(), proxy.RequestInfo) + resp, err := s.connQuery.InfoSync(context.TODO(), proxy.RequestInfo) if err != nil { return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err) } diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index ad902a54c..4c240830f 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -22,9 +22,10 @@ import ( "github.com/tendermint/tendermint/version" ) -var ctx = context.Background() - func TestSyncer_SyncAny(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + state := sm.State{ ChainID: "chain", Version: sm.Version{ @@ -68,7 +69,7 @@ func TestSyncer_SyncAny(t *testing.T) { peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") peerCID := types.NodeID("cc") - rts := setup(t, connSnapshot, connQuery, stateProvider, 3) + rts := setup(ctx, t, connSnapshot, connQuery, stateProvider, 3) rts.reactor.syncer = rts.syncer @@ -110,7 +111,7 @@ func TestSyncer_SyncAny(t *testing.T) { // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -119,7 +120,7 @@ func TestSyncer_SyncAny(t *testing.T) { }, AppHash: []byte("app_hash_2"), }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -160,7 +161,7 @@ func TestSyncer_SyncAny(t *testing.T) { // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( &abci.ResponseApplySnapshotChunk{ @@ -168,16 +169,16 @@ func TestSyncer_SyncAny(t *testing.T) { RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{1, 1, 0}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1, 1, 1}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: 9, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), @@ -217,7 +218,10 @@ func TestSyncer_SyncAny_noSnapshots(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) _, _, err := rts.syncer.SyncAny(ctx, 0, func() {}) require.Equal(t, errNoSnapshots, err) @@ -227,7 +231,10 @@ func TestSyncer_SyncAny_abort(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") @@ -235,7 +242,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) @@ -248,7 +255,10 @@ func TestSyncer_SyncAny_reject(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -266,15 +276,15 @@ func TestSyncer_SyncAny_reject(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s12), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) @@ -287,7 +297,10 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -305,11 +318,11 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) @@ -322,7 +335,10 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -351,11 +367,11 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerCID, sbc) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(sa), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) @@ -368,7 +384,10 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -378,7 +397,7 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) @@ -405,16 +424,23 @@ func TestSyncer_offerSnapshot(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) @@ -451,13 +477,20 @@ func TestSyncer_applyChunks_Results(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") @@ -468,11 +501,11 @@ func TestSyncer_applyChunks_Results(t *testing.T) { _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) @@ -505,13 +538,19 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") require.NoError(t, err) @@ -529,13 +568,13 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for 1 to be refetched - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -570,13 +609,19 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. @@ -623,13 +668,13 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for b sender to be rejected - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, Sender: "aa", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, Sender: "bb", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -638,7 +683,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // On retry, the last chunk will be tried again, so we just accept it then. if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } @@ -693,12 +738,18 @@ func TestSyncer_verifyApp(t *testing.T) { }, nil, errVerifyFailed}, "error": {nil, boom, boom}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(ctx) + defer cancel() - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(tc.response, tc.err) + rts := setup(ctx, t, nil, nil, nil, 2) + + rts.connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.err) version, err := rts.syncer.verifyApp(s) unwrapped := errors.Unwrap(err) if unwrapped != nil { diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go index d6199bc80..a5bb975c9 100644 --- a/libs/events/event_cache_test.go +++ b/libs/events/event_cache_test.go @@ -1,6 +1,7 @@ package events import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -8,8 +9,11 @@ import ( ) func TestEventCache_Flush(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() + err := evsw.Start(ctx) require.NoError(t, err) err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { diff --git a/libs/events/events.go b/libs/events/events.go index 146a9cfa7..f6151e734 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -2,6 +2,7 @@ package events import ( + "context" "fmt" tmsync "github.com/tendermint/tendermint/internal/libs/sync" @@ -45,6 +46,7 @@ type Fireable interface { type EventSwitch interface { service.Service Fireable + Stop() error AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error RemoveListenerForEvent(event string, listenerID string) @@ -68,7 +70,7 @@ func NewEventSwitch() EventSwitch { return evsw } -func (evsw *eventSwitch) OnStart() error { +func (evsw *eventSwitch) OnStart(ctx context.Context) error { return nil } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 9e21e0235..0e8667908 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -1,6 +1,7 @@ package events import ( + "context" "fmt" "testing" "time" @@ -14,23 +15,20 @@ import ( // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single // listener to an event, and sends a string "data". func TestAddListenerForEventFireOnce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) messages := make(chan EventData) - err = evsw.AddListenerForEvent("listener", "event", + require.NoError(t, evsw.AddListenerForEvent("listener", "event", func(data EventData) { // test there's no deadlock if we remove the listener inside a callback evsw.RemoveListener("listener") messages <- data - }) - require.NoError(t, err) + })) go evsw.FireEvent("event", "data") received := <-messages if received != "data" { @@ -41,24 +39,21 @@ func TestAddListenerForEventFireOnce(t *testing.T) { // TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single // listener to an event, and sends a thousand integers. func TestAddListenerForEventFireMany(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener for one event - err = evsw.AddListenerForEvent("listener", "event", + require.NoError(t, evsw.AddListenerForEvent("listener", "event", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events @@ -75,14 +70,12 @@ func TestAddListenerForEventFireMany(t *testing.T) { // listener to three different events and sends a thousand integers for each // of the three events. func TestAddListenerForDifferentEvents(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum := make(chan uint64) doneSending1 := make(chan uint64) @@ -90,21 +83,18 @@ func TestAddListenerForDifferentEvents(t *testing.T) { doneSending3 := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener to three events - err = evsw.AddListenerForEvent("listener", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event3", func(data EventData) { numbers <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events @@ -127,15 +117,13 @@ func TestAddListenerForDifferentEvents(t *testing.T) { // listener to two of those three events, and then sends a thousand integers // for each of the three events. func TestAddDifferentListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + evsw := NewEventSwitch() + require.NoError(t, evsw.Start(ctx)) + + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -145,31 +133,26 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events for listener1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for listener2 @@ -199,14 +182,12 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { roundCount = 2000 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) done1 := make(chan struct{}) done2 := make(chan struct{}) @@ -249,14 +230,12 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // two events, fires a thousand integers for the first event, then unsubscribes // the listener and fires a thousand integers for the second event. func TestAddAndRemoveListener(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -265,16 +244,14 @@ func TestAddAndRemoveListener(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 @@ -300,29 +277,23 @@ func TestAddAndRemoveListener(t *testing.T) { // TestRemoveListener does basic tests on adding and removing func TestRemoveListener(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work - err = evsw.AddListenerForEvent("listener", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", func(data EventData) { sum1++ - }) - require.NoError(t, err) - - err = evsw.AddListenerForEvent("listener", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", func(data EventData) { sum2++ - }) - require.NoError(t, err) + })) for i := 0; i < count; i++ { evsw.FireEvent("event1", true) @@ -361,14 +332,11 @@ func TestRemoveListener(t *testing.T) { // NOTE: it is important to run this test with race conditions tracking on, // `go test -race`, to examine for possible race conditions. func TestRemoveListenersAsync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -378,36 +346,30 @@ func TestRemoveListenersAsync(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", func(data EventData) { numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event1", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event1", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", func(data EventData) { numbers2 <- data.(uint64) - }) - require.NoError(t, err) + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index cae644f7b..4d317215f 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -12,8 +12,9 @@ import ( ) func TestExample(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := newTestServer(ctx, t) sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: "example-client", diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index c1224c642..930dd47bc 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -341,7 +341,7 @@ func (s *Server) OnStop() { s.stop() } func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() } // OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { s.run(); return nil } +func (s *Server) OnStart(ctx context.Context) error { s.run(); return nil } // OnReset implements Service.OnReset. It has no effect for this service. func (s *Server) OnReset() error { return nil } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 8dcf8b3d9..be7f3e6e0 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" @@ -20,8 +19,10 @@ const ( ) func TestSubscribeWithArgs(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) t.Run("DefaultLimit", func(t *testing.T) { sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ @@ -47,8 +48,10 @@ func TestSubscribeWithArgs(t *testing.T) { } func TestObserver(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) done := make(chan struct{}) var got interface{} @@ -65,8 +68,10 @@ func TestObserver(t *testing.T) { } func TestObserverErrors(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) require.Error(t, s.Observe(ctx, nil, query.Empty{})) require.NoError(t, s.Observe(ctx, func(pubsub.Message) error { return nil })) @@ -74,8 +79,10 @@ func TestObserverErrors(t *testing.T) { } func TestPublishDoesNotBlock(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: clientID, @@ -100,8 +107,10 @@ func TestPublishDoesNotBlock(t *testing.T) { } func TestSubscribeErrors(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) t.Run("EmptyQueryErr", func(t *testing.T) { _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ClientID: clientID}) @@ -118,8 +127,10 @@ func TestSubscribeErrors(t *testing.T) { } func TestSlowSubscriber(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: clientID, @@ -137,8 +148,10 @@ func TestSlowSubscriber(t *testing.T) { } func TestDifferentClients(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: "client-1", @@ -188,8 +201,10 @@ func TestDifferentClients(t *testing.T) { } func TestSubscribeDuplicateKeys(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) testCases := []struct { query string @@ -241,8 +256,10 @@ func TestSubscribeDuplicateKeys(t *testing.T) { } func TestClientSubscribesTwice(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) q := query.MustParse("tm.events.type='NewBlock'") events := []abci.Event{{ @@ -274,8 +291,10 @@ func TestClientSubscribesTwice(t *testing.T) { } func TestUnsubscribe(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: clientID, @@ -296,8 +315,10 @@ func TestUnsubscribe(t *testing.T) { } func TestClientUnsubscribesTwice(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: clientID, @@ -315,8 +336,10 @@ func TestClientUnsubscribesTwice(t *testing.T) { } func TestResubscribe(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) args := pubsub.SubscribeArgs{ ClientID: clientID, @@ -336,8 +359,10 @@ func TestResubscribe(t *testing.T) { } func TestUnsubscribeAll(t *testing.T) { - s := newTestServer(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t) sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: clientID, @@ -364,28 +389,27 @@ func TestBufferCapacity(t *testing.T) { require.Equal(t, 2, s.BufferCapacity()) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() require.NoError(t, s.Publish(ctx, "Nighthawk")) require.NoError(t, s.Publish(ctx, "Sage")) - ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + ctx, cancel = context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() require.ErrorIs(t, s.Publish(ctx, "Ironclad"), context.DeadlineExceeded) } -func newTestServer(t testing.TB) *pubsub.Server { +func newTestServer(ctx context.Context, t testing.TB) *pubsub.Server { t.Helper() s := pubsub.NewServer(func(s *pubsub.Server) { s.Logger = log.TestingLogger() }) - require.NoError(t, s.Start()) - t.Cleanup(func() { - assert.NoError(t, s.Stop()) - }) + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) return s } diff --git a/libs/service/service.go b/libs/service/service.go index 88c25d804..0fc26dbdb 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -1,8 +1,8 @@ package service import ( + "context" "errors" - "fmt" "sync/atomic" "github.com/tendermint/tendermint/libs/log" @@ -22,22 +22,10 @@ var ( // Service defines a service that can be started, stopped, and reset. type Service interface { - // Start the service. - // If it's already started or stopped, will return an error. - // If OnStart() returns an error, it's returned by Start() - Start() error - OnStart() error - - // Stop the service. - // If it's already stopped, will return an error. - // OnStop must never error. - Stop() error - OnStop() - - // Reset the service. - // Panics by default - must be overwritten to enable reset. - Reset() error - OnReset() error + // Start is called to start the service, which should run until + // the context terminates. If the service is already running, Start + // must report an error. + Start(context.Context) error // Return true if the service is running IsRunning() bool @@ -52,6 +40,18 @@ type Service interface { Wait() } +// Implementation describes the implementation that the +// BaseService implementation wraps. +type Implementation interface { + Service + + // Called by the Services Start Method + OnStart(context.Context) error + + // Called when the service's context is canceled. + OnStop() +} + /* Classical-inheritance-style service declarations. Services can be started, then stopped, then optionally restarted. @@ -82,7 +82,7 @@ Typical usage: return fs } - func (fs *FooService) OnStart() error { + func (fs *FooService) OnStart(ctx context.Context) error { fs.BaseService.OnStart() // Always call the overridden method. // initialize private fields // start subroutines, etc. @@ -102,11 +102,11 @@ type BaseService struct { quit chan struct{} // The "subclass" of BaseService - impl Service + impl Implementation } // NewBaseService creates a new BaseService. -func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { +func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseService { if logger == nil { logger = log.NewNopLogger() } @@ -119,10 +119,10 @@ func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { } } -// Start implements Service by calling OnStart (if defined). An error will be -// returned if the service is already running or stopped. Not to start the -// stopped service, you need to call Reset. -func (bs *BaseService) Start() error { +// Start starts the Service and calls its OnStart method. An error will be +// returned if the service is already running or stopped. To restart a +// stopped service, call Reset. +func (bs *BaseService) Start(ctx context.Context) error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { bs.Logger.Error("not starting service; already stopped", "service", bs.name, "impl", bs.impl.String()) @@ -132,11 +132,26 @@ func (bs *BaseService) Start() error { bs.Logger.Info("starting service", "service", bs.name, "impl", bs.impl.String()) - if err := bs.impl.OnStart(); err != nil { + if err := bs.impl.OnStart(ctx); err != nil { // revert flag atomic.StoreUint32(&bs.started, 0) return err } + + go func(ctx context.Context) { + <-ctx.Done() + if err := bs.Stop(); err != nil { + bs.Logger.Error("stopped service", + "err", err.Error(), + "service", bs.name, + "impl", bs.impl.String()) + } + + bs.Logger.Info("stopped service", + "service", bs.name, + "impl", bs.impl.String()) + }(ctx) + return nil } @@ -147,7 +162,7 @@ func (bs *BaseService) Start() error { // OnStart implements Service by doing nothing. // NOTE: Do not put anything in here, // that way users don't need to call BaseService.OnStart() -func (bs *BaseService) OnStart() error { return nil } +func (bs *BaseService) OnStart(ctx context.Context) error { return nil } // Stop implements Service by calling OnStop (if defined) and closing quit // channel. An error will be returned if the service is already stopped. @@ -175,26 +190,6 @@ func (bs *BaseService) Stop() error { // that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} -// Reset implements Service by calling OnReset callback (if defined). An error -// will be returned if the service is running. -func (bs *BaseService) Reset() error { - if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug("cannot reset service; not stopped", "service", bs.name, "impl", bs.impl.String()) - return fmt.Errorf("can't reset running %s", bs.name) - } - - // whether or not we've started, we can reset - atomic.CompareAndSwapUint32(&bs.started, 1, 0) - - bs.quit = make(chan struct{}) - return bs.impl.OnReset() -} - -// OnReset implements Service by panicking. -func (bs *BaseService) OnReset() error { - panic("The service cannot be reset") -} - // IsRunning implements Service by returning true or false depending on the // service's state. func (bs *BaseService) IsRunning() bool { @@ -202,16 +197,10 @@ func (bs *BaseService) IsRunning() bool { } // Wait blocks until the service is stopped. -func (bs *BaseService) Wait() { - <-bs.quit -} +func (bs *BaseService) Wait() { <-bs.quit } // String implements Service by returning a string representation of the service. -func (bs *BaseService) String() string { - return bs.name -} +func (bs *BaseService) String() string { return bs.name } // Quit Implements Service by returning a quit channel. -func (bs *BaseService) Quit() <-chan struct{} { - return bs.quit -} +func (bs *BaseService) Quit() <-chan struct{} { return bs.quit } diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 7abc6f4fb..dc5d0ccb1 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -1,6 +1,7 @@ package service import ( + "context" "testing" "time" @@ -16,9 +17,12 @@ func (testService) OnReset() error { } func TestBaseServiceWait(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := &testService{} ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() + err := ts.Start(ctx) require.NoError(t, err) waitFinished := make(chan struct{}) @@ -36,22 +40,3 @@ func TestBaseServiceWait(t *testing.T) { t.Fatal("expected Wait() to finish within 100 ms.") } } - -func TestBaseServiceReset(t *testing.T) { - ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() - require.NoError(t, err) - - err = ts.Reset() - require.Error(t, err, "expected cant reset service error") - - err = ts.Stop() - require.NoError(t, err) - - err = ts.Reset() - require.NoError(t, err) - - err = ts.Start() - require.NoError(t, err) -} diff --git a/libs/strings/string.go b/libs/strings/string.go index b09c00063..6cc0b18ee 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -55,6 +55,10 @@ func SplitAndTrim(s, sep, cutset string) []string { return spl } +// TrimSpace removes all leading and trailing whitespace from the +// string. +func TrimSpace(s string) string { return strings.TrimSpace(s) } + // Returns true if s is a non-empty printable non-tab ascii character. func IsASCIIText(s string) bool { if len(s) == 0 { diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 6f2622588..f8c183308 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -49,8 +49,8 @@ func NewProxy( // routes to proxy via Client, and starts up an HTTP server on the TCP network // address p.Addr. // See http#Server#ListenAndServe. -func (p *Proxy) ListenAndServe() error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServe(ctx context.Context) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } @@ -67,8 +67,8 @@ func (p *Proxy) ListenAndServe() error { // ListenAndServeTLS acts identically to ListenAndServe, except that it expects // HTTPS connections. // See http#Server#ListenAndServeTLS. -func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } @@ -84,7 +84,7 @@ func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { ) } -func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { +func (p *Proxy) listen(ctx context.Context) (net.Listener, *http.ServeMux, error) { mux := http.NewServeMux() // 1) Register regular routes. @@ -107,7 +107,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { // 3) Start a client. if !p.Client.IsRunning() { - if err := p.Client.Start(); err != nil { + if err := p.Client.Start(ctx); err != nil { return nil, mux, fmt.Errorf("can't start client: %w", err) } } diff --git a/light/rpc/client.go b/light/rpc/client.go index dc745542e..6143338f4 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -98,9 +98,9 @@ func NewClient(next rpcclient.Client, lc LightClient, opts ...Option) *Client { return c } -func (c *Client) OnStart() error { +func (c *Client) OnStart(ctx context.Context) error { if !c.next.IsRunning() { - return c.next.Start() + return c.next.Start(ctx) } return nil } diff --git a/node/node.go b/node/node.go index a0b77823b..7d3b56b47 100644 --- a/node/node.go +++ b/node/node.go @@ -82,7 +82,11 @@ type nodeImpl struct { // newDefaultNode returns a Tendermint node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. -func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, error) { +func newDefaultNode( + ctx context.Context, + cfg *config.Config, + logger log.Logger, +) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) if err != nil { return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err) @@ -108,7 +112,9 @@ func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, err appClient, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - return makeNode(cfg, + return makeNode( + ctx, + cfg, pval, nodeKey, appClient, @@ -119,7 +125,9 @@ func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, err } // makeNode returns a new, ready to go, Tendermint Node. -func makeNode(cfg *config.Config, +func makeNode( + ctx context.Context, + cfg *config.Config, privValidator types.PrivValidator, nodeKey types.NodeKey, clientCreator abciclient.Creator, @@ -127,7 +135,10 @@ func makeNode(cfg *config.Config, dbProvider config.DBProvider, logger log.Logger, ) (service.Service, error) { - closers := []closer{} + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + + closers := []closer{convertCancelCloser(cancel)} blockStore, stateDB, dbCloser, err := initDBs(cfg, dbProvider) if err != nil { @@ -157,7 +168,7 @@ func makeNode(cfg *config.Config, nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) + proxyApp, err := createAndStartProxyAppConns(ctx, clientCreator, logger, nodeMetrics.proxy) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } @@ -166,12 +177,13 @@ func makeNode(cfg *config.Config, // we might need to index the txs of the replayed block as this might not have happened // when the node stopped last time (i.e. the node stopped after it saved the block // but before it indexed the txs, or, endblocker panicked) - eventBus, err := createAndStartEventBus(logger) + eventBus, err := createAndStartEventBus(ctx, logger) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } - indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, + indexerService, eventSinks, err := createAndStartIndexerService( + ctx, cfg, dbProvider, eventBus, logger, genDoc.ChainID, nodeMetrics.indexer) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) @@ -184,14 +196,19 @@ func makeNode(cfg *config.Config, // FIXME: we should start services inside OnStart switch protocol { case "grpc": - privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorGRPCClient(ctx, cfg, genDoc.ChainID, logger) if err != nil { return nil, combineCloseError( fmt.Errorf("error with private validator grpc client: %w", err), makeCloser(closers)) } default: - privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorSocketClient( + ctx, + cfg.PrivValidator.ListenAddr, + genDoc.ChainID, + logger, + ) if err != nil { return nil, combineCloseError( fmt.Errorf("error with private validator socket client: %w", err), @@ -201,7 +218,7 @@ func makeNode(cfg *config.Config, } var pubKey crypto.PubKey if cfg.Mode == config.ModeValidator { - pubKey, err = privValidator.GetPubKey(context.TODO()) + pubKey, err = privValidator.GetPubKey(ctx) if err != nil { return nil, combineCloseError(fmt.Errorf("can't get pubkey: %w", err), makeCloser(closers)) @@ -227,7 +244,7 @@ func makeNode(cfg *config.Config, if err := consensus.NewHandshaker( logger.With("module", "handshaker"), stateStore, state, blockStore, eventBus, genDoc, - ).Handshake(proxyApp); err != nil { + ).Handshake(ctx, proxyApp); err != nil { return nil, combineCloseError(err, makeCloser(closers)) } @@ -253,7 +270,6 @@ func makeNode(cfg *config.Config, nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) @@ -492,7 +508,7 @@ func makeSeedNode(cfg *config.Config, } // OnStart starts the Node. It implements service.Service. -func (n *nodeImpl) OnStart() error { +func (n *nodeImpl) OnStart(ctx context.Context) error { if n.config.RPC.PprofListenAddress != "" { // this service is not cleaned up (I believe that we'd // need to have another thread and a potentially a @@ -513,7 +529,7 @@ func (n *nodeImpl) OnStart() error { // Start the RPC server before the P2P server // so we can eg. receive txs for the first block if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed { - listeners, err := n.startRPC() + listeners, err := n.startRPC(ctx) if err != nil { return err } @@ -526,39 +542,39 @@ func (n *nodeImpl) OnStart() error { } // Start the transport. - if err := n.router.Start(); err != nil { + if err := n.router.Start(ctx); err != nil { return err } n.isListening = true if n.config.Mode != config.ModeSeed { - if err := n.bcReactor.Start(); err != nil { + if err := n.bcReactor.Start(ctx); err != nil { return err } // Start the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Start(); err != nil { + if err := n.consensusReactor.Start(ctx); err != nil { return err } // Start the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Start(); err != nil { + if err := n.stateSyncReactor.Start(ctx); err != nil { return err } // Start the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Start(); err != nil { + if err := n.mempoolReactor.Start(ctx); err != nil { return err } // Start the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Start(); err != nil { + if err := n.evidenceReactor.Start(ctx); err != nil { return err } } if n.config.P2P.PexReactor { - if err := n.pexReactor.Start(); err != nil { + if err := n.pexReactor.Start(ctx); err != nil { return err } } @@ -591,7 +607,7 @@ func (n *nodeImpl) OnStart() error { // bubbling up the error and gracefully shutting down the rest of the node go func() { n.Logger.Info("starting state sync") - state, err := n.stateSyncReactor.Sync(context.TODO()) + state, err := n.stateSyncReactor.Sync(ctx) if err != nil { n.Logger.Error("state sync failed; shutting down this node", "err", err) // stop the node @@ -617,7 +633,7 @@ func (n *nodeImpl) OnStart() error { // is running // FIXME Very ugly to have these metrics bleed through here. n.consensusReactor.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(state); err != nil { + if err := bcR.SwitchToBlockSync(ctx, state); err != nil { n.Logger.Error("failed to switch to block sync", "err", err) return } @@ -638,19 +654,13 @@ func (n *nodeImpl) OnStart() error { // OnStop stops the Node. It implements service.Service. func (n *nodeImpl) OnStop() { - n.Logger.Info("Stopping Node") if n.eventBus != nil { - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) - } + n.eventBus.Wait() } if n.indexerService != nil { - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) - } + n.indexerService.Wait() } for _, es := range n.eventSinks { @@ -660,41 +670,14 @@ func (n *nodeImpl) OnStop() { } if n.config.Mode != config.ModeSeed { - // now stop the reactors - - // Stop the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the blockchain reactor", "err", err) - } - - // Stop the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the consensus reactor", "err", err) - } - - // Stop the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the state sync reactor", "err", err) - } - - // Stop the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the mempool reactor", "err", err) - } - - // Stop the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the evidence reactor", "err", err) - } - } - - if err := n.pexReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) - } - - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) + n.bcReactor.Wait() + n.consensusReactor.Wait() + n.stateSyncReactor.Wait() + n.mempoolReactor.Wait() + n.evidenceReactor.Wait() } + n.pexReactor.Wait() + n.router.Wait() n.isListening = false // finally stop the listeners / external services @@ -706,9 +689,7 @@ func (n *nodeImpl) OnStop() { } if pvsc, ok := n.privValidator.(service.Service); ok { - if err := pvsc.Stop(); err != nil { - n.Logger.Error("Error closing private validator", "err", err) - } + pvsc.Wait() } if n.prometheusSrv != nil { @@ -719,13 +700,15 @@ func (n *nodeImpl) OnStop() { } if err := n.shutdownOps(); err != nil { - n.Logger.Error("problem shutting down additional services", "err", err) + if strings.TrimSpace(err.Error()) != "" { + n.Logger.Error("problem shutting down additional services", "err", err) + } } } -func (n *nodeImpl) startRPC() ([]net.Listener, error) { +func (n *nodeImpl) startRPC(ctx context.Context) ([]net.Listener, error) { if n.config.Mode == config.ModeValidator { - pubKey, err := n.privValidator.GetPubKey(context.TODO()) + pubKey, err := n.privValidator.GetPubKey(ctx) if pubKey == nil || err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -970,8 +953,8 @@ func loadStateFromDBOrGenesisDocProvider( } func createAndStartPrivValidatorSocketClient( - listenAddr, - chainID string, + ctx context.Context, + listenAddr, chainID string, logger log.Logger, ) (types.PrivValidator, error) { @@ -980,13 +963,13 @@ func createAndStartPrivValidatorSocketClient( return nil, fmt.Errorf("failed to start private validator: %w", err) } - pvsc, err := privval.NewSignerClient(pve, chainID) + pvsc, err := privval.NewSignerClient(ctx, pve, chainID) if err != nil { return nil, fmt.Errorf("failed to start private validator: %w", err) } // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO()) + _, err = pvsc.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -1001,6 +984,7 @@ func createAndStartPrivValidatorSocketClient( } func createAndStartPrivValidatorGRPCClient( + ctx context.Context, cfg *config.Config, chainID string, logger log.Logger, @@ -1016,7 +1000,7 @@ func createAndStartPrivValidatorGRPCClient( } // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO()) + _, err = pvsc.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -1031,7 +1015,7 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt if conf.FilterPeers && proxyApp != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ + res, err := proxyApp.Query().QuerySync(ctx, abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", id), }) if err != nil { diff --git a/node/node_test.go b/node/node_test.go index e9c2159ed..407bf93ea 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -7,7 +7,6 @@ import ( "math" "net" "os" - "syscall" "testing" "time" @@ -43,14 +42,17 @@ func TestNodeStartStop(t *testing.T) { defer os.RemoveAll(cfg.RootDir) + ctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + // create & start node - ns, err := newDefaultNode(cfg, log.TestingLogger()) + ns, err := newDefaultNode(ctx, cfg, log.TestingLogger()) require.NoError(t, err) - require.NoError(t, ns.Start()) + require.NoError(t, ns.Start(ctx)) t.Cleanup(func() { if ns.IsRunning() { - assert.NoError(t, ns.Stop()) + bcancel() ns.Wait() } }) @@ -58,9 +60,6 @@ func TestNodeStartStop(t *testing.T) { n, ok := ns.(*nodeImpl) require.True(t, ok) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // wait for the node to produce a block blocksSub, err := n.EventBus().SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ ClientID: "node_test", @@ -75,35 +74,35 @@ func TestNodeStartStop(t *testing.T) { // stop the node go func() { - err = n.Stop() - require.NoError(t, err) + bcancel() + n.Wait() }() select { case <-n.Quit(): - case <-time.After(5 * time.Second): - pid := os.Getpid() - p, err := os.FindProcess(pid) - if err != nil { - panic(err) + return + case <-time.After(10 * time.Second): + if n.IsRunning() { + t.Fatal("timed out waiting for shutdown") } - err = p.Signal(syscall.SIGABRT) - fmt.Println(err) - t.Fatal("timed out waiting for shutdown") + } } -func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { +func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { t.Helper() - ns, err := newDefaultNode(conf, logger) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ns, err := newDefaultNode(ctx, conf, logger) require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) t.Cleanup(func() { - if ns.IsRunning() { - assert.NoError(t, ns.Stop()) + cancel() + if n.IsRunning() { ns.Wait() } }) @@ -118,11 +117,14 @@ func TestNodeDelayedStart(t *testing.T) { defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // create & start node - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) - require.NoError(t, n.Start()) + require.NoError(t, n.Start(ctx)) startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) @@ -133,8 +135,11 @@ func TestNodeSetAppVersion(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // create node - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) // default config uses the kvstore app appVersion := kvstore.ProtocolVersion @@ -151,6 +156,9 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -170,31 +178,34 @@ func TestNodeSetPrivValTCP(t *testing.T) { ) go func() { - err := signerServer.Start() + err := signerServer.Start(ctx) if err != nil { panic(err) } }() defer signerServer.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } // address without a protocol must result in error func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addrNoPrefix := testFreeAddr(t) cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix + n, err := newDefaultNode(ctx, cfg, log.TestingLogger()) - n, err := newDefaultNode(cfg, log.TestingLogger()) assert.Error(t, err) if n != nil && n.IsRunning() { - assert.NoError(t, n.Stop()) + cancel() n.Wait() } } @@ -203,6 +214,9 @@ func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -222,11 +236,11 @@ func TestNodeSetPrivValIPC(t *testing.T) { ) go func() { - err := pvsc.Start() + err := pvsc.Start(ctx) require.NoError(t, err) }() defer pvsc.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } @@ -248,11 +262,11 @@ func TestCreateProposalBlock(t *testing.T) { cfg, err := config.ResetTestRoot("node_create_proposal") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) - err = proxyApp.Start() + err = proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() @@ -344,9 +358,8 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) - err = proxyApp.Start() + err = proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() @@ -408,9 +421,8 @@ func TestMaxProposalBlockSize(t *testing.T) { defer os.RemoveAll(cfg.RootDir) cc := abciclient.NewLocalCreator(kvstore.NewApplication()) proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) - err = proxyApp.Start() + err = proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() @@ -432,7 +444,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) tx := tmrand.Bytes(txLength - 6) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) // now produce more txs than what a normal block can hold with 10 smaller txs // At the end of the test, only the single big tx should be added @@ -521,6 +533,9 @@ func TestNodeNewSeedNode(t *testing.T) { cfg.Mode = config.ModeSeed defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) require.NoError(t, err) @@ -530,17 +545,20 @@ func TestNodeNewSeedNode(t *testing.T) { defaultGenesisDocProviderFunc(cfg), log.TestingLogger(), ) + t.Cleanup(ns.Wait) require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) - err = n.Start() + err = n.Start(ctx) require.NoError(t, err) assert.True(t, n.pexReactor.IsRunning()) - require.NoError(t, n.Stop()) + cancel() + n.Wait() + assert.False(t, n.pexReactor.IsRunning()) } func TestNodeSetEventSink(t *testing.T) { @@ -549,19 +567,22 @@ func TestNodeSetEventSink(t *testing.T) { defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.TestingLogger() setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { - eventBus, err := createAndStartEventBus(logger) + eventBus, err := createAndStartEventBus(ctx, logger) require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, eventBus.Stop()) }) + t.Cleanup(eventBus.Wait) genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - indexService, eventSinks, err := createAndStartIndexerService(cfg, + indexService, eventSinks, err := createAndStartIndexerService(ctx, cfg, config.DefaultDBProvider, eventBus, logger, genDoc.ChainID, indexer.NopMetrics()) require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + t.Cleanup(indexService.Wait) return eventSinks } cleanup := func(ns service.Service) func() { @@ -576,7 +597,7 @@ func TestNodeSetEventSink(t *testing.T) { if !n.IsRunning() { return } - assert.NoError(t, n.Stop()) + cancel() n.Wait() } } @@ -598,7 +619,7 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.NULL, eventSinks[0].Type()) cfg.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(cfg, logger) + ns, err := newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) assert.Contains(t, err.Error(), "unsupported event sink type") t.Cleanup(cleanup(ns)) @@ -610,7 +631,7 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.NULL, eventSinks[0].Type()) cfg.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) assert.Contains(t, err.Error(), "the psql connection settings cannot be empty") t.Cleanup(cleanup(ns)) @@ -652,14 +673,14 @@ func TestNodeSetEventSink(t *testing.T) { var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} cfg.TxIndex.PsqlConn = psqlConn - ns, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) assert.Contains(t, err.Error(), e.Error()) t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} cfg.TxIndex.PsqlConn = psqlConn - ns, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) assert.Contains(t, err.Error(), e.Error()) t.Cleanup(cleanup(ns)) diff --git a/node/public.go b/node/public.go index c616eebac..87007bdfc 100644 --- a/node/public.go +++ b/node/public.go @@ -2,6 +2,7 @@ package node import ( + "context" "fmt" abciclient "github.com/tendermint/tendermint/abci/client" @@ -16,8 +17,12 @@ import ( // process that host their own process-local tendermint node. This is // equivalent to running tendermint in it's own process communicating // to an external ABCI application. -func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) { - return newDefaultNode(conf, logger) +func NewDefault( + ctx context.Context, + conf *config.Config, + logger log.Logger, +) (service.Service, error) { + return newDefaultNode(ctx, conf, logger) } // New constructs a tendermint node. The ClientCreator makes it @@ -26,7 +31,9 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) // Genesis document: if the value is nil, the genesis document is read // from the file specified in the config, and otherwise the node uses // value of the final argument. -func New(conf *config.Config, +func New( + ctx context.Context, + conf *config.Config, logger log.Logger, cf abciclient.Creator, gen *types.GenesisDoc, @@ -51,7 +58,9 @@ func New(conf *config.Config, return nil, err } - return makeNode(conf, + return makeNode( + ctx, + conf, pval, nodeKey, cf, diff --git a/node/setup.go b/node/setup.go index 297ed0265..6ca991484 100644 --- a/node/setup.go +++ b/node/setup.go @@ -2,6 +2,7 @@ package node import ( "bytes" + "context" "errors" "fmt" "strings" @@ -52,6 +53,10 @@ func makeCloser(cs []closer) closer { } } +func convertCancelCloser(cancel context.CancelFunc) closer { + return func() error { cancel(); return nil } +} + func combineCloseError(err error, cl closer) error { if err == nil { return cl() @@ -88,26 +93,31 @@ func initDBs( return blockStore, stateDB, makeCloser(closers), nil } -// nolint:lll -func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { +func createAndStartProxyAppConns( + ctx context.Context, + clientCreator abciclient.Creator, + logger log.Logger, + metrics *proxy.Metrics, +) (proxy.AppConns, error) { proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), metrics) - if err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(ctx); err != nil { return nil, fmt.Errorf("error starting proxy app connections: %v", err) } return proxyApp, nil } -func createAndStartEventBus(logger log.Logger) (*eventbus.EventBus, error) { +func createAndStartEventBus(ctx context.Context, logger log.Logger) (*eventbus.EventBus, error) { eventBus := eventbus.NewDefault(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { + if err := eventBus.Start(ctx); err != nil { return nil, err } return eventBus, nil } func createAndStartIndexerService( + ctx context.Context, cfg *config.Config, dbProvider config.DBProvider, eventBus *eventbus.EventBus, @@ -127,7 +137,7 @@ func createAndStartIndexerService( Metrics: metrics, }) - if err := indexerService.Start(); err != nil { + if err := indexerService.Start(ctx); err != nil { return nil, nil, err } diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 75ad04d42..7e0483f9c 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -109,7 +109,7 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) - ctx := context.Background() + ctx := context.TODO() _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) conn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { diff --git a/privval/signer_client.go b/privval/signer_client.go index 5e5b32a92..ec6d95ca6 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -23,9 +23,9 @@ var _ types.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) -func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { +func NewSignerClient(ctx context.Context, endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { if !endpoint.IsRunning() { - if err := endpoint.Start(); err != nil { + if err := endpoint.Start(ctx); err != nil { return nil, fmt.Errorf("failed to start listener endpoint: %w", err) } } diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 9aa49e709..f9272b004 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -23,370 +23,336 @@ type signerTestCase struct { mockPV types.PrivValidator signerClient *SignerClient signerServer *SignerServer + name string + closer context.CancelFunc } -func getSignerTestCases(t *testing.T) []signerTestCase { +func getSignerTestCases(ctx context.Context, t *testing.T) []signerTestCase { + t.Helper() + testCases := make([]signerTestCase, 0) // Get test cases for each possible dialer (DialTCP / DialUnix / etc) - for _, dtc := range getDialerTestCases(t) { + for idx, dtc := range getDialerTestCases(t) { chainID := tmrand.Str(12) mockPV := types.NewMockPV() + cctx, ccancel := context.WithCancel(ctx) // get a pair of signer listener, signer dialer endpoints - sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) - sc, err := NewSignerClient(sl, chainID) + sl, sd := getMockEndpoints(cctx, t, dtc.addr, dtc.dialer) + sc, err := NewSignerClient(cctx, sl, chainID) require.NoError(t, err) ss := NewSignerServer(sd, chainID, mockPV) - err = ss.Start() - require.NoError(t, err) + require.NoError(t, ss.Start(cctx)) - tc := signerTestCase{ + testCases = append(testCases, signerTestCase{ + name: fmt.Sprintf("Case%d%T_%s", idx, dtc.dialer, chainID), + closer: ccancel, chainID: chainID, mockPV: mockPV, signerClient: sc, signerServer: ss, - } - - testCases = append(testCases, tc) + }) } return testCases } func TestSignerClose(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - err := tc.signerClient.Close() - assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - err = tc.signerServer.Stop() - assert.NoError(t, err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + assert.NoError(t, tc.signerClient.Close()) + assert.NoError(t, tc.signerServer.Stop()) + }) } } func TestSignerPing(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tc := range getSignerTestCases(ctx, t) { err := tc.signerClient.Ping() assert.NoError(t, err) } } func TestSignerGetPubKey(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + pubKey, err := tc.signerClient.GetPubKey(ctx) + require.NoError(t, err) + expectedPubKey, err := tc.mockPV.GetPubKey(ctx) + require.NoError(t, err) + + assert.Equal(t, expectedPubKey, pubKey) + + pubKey, err = tc.signerClient.GetPubKey(ctx) + require.NoError(t, err) + expectedpk, err := tc.mockPV.GetPubKey(ctx) + require.NoError(t, err) + expectedAddr := expectedpk.Address() + + assert.Equal(t, expectedAddr, pubKey.Address()) }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - pubKey, err := tc.signerClient.GetPubKey(context.Background()) - require.NoError(t, err) - expectedPubKey, err := tc.mockPV.GetPubKey(context.Background()) - require.NoError(t, err) - - assert.Equal(t, expectedPubKey, pubKey) - - pubKey, err = tc.signerClient.GetPubKey(context.Background()) - require.NoError(t, err) - expectedpk, err := tc.mockPV.GetPubKey(context.Background()) - require.NoError(t, err) - expectedAddr := expectedpk.Address() - - assert.Equal(t, expectedAddr, pubKey.Address()) } } func TestSignerProposal(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - have := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } - want := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + have := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + want := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } + + require.NoError(t, tc.mockPV.SignProposal(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignProposal(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) }) - require.NoError(t, tc.mockPV.SignProposal(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignProposal(context.Background(), tc.chainID, have.ToProto())) - - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVote(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) }) - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVoteResetDeadline(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } + + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + assert.Equal(t, want.Signature, have.Signature) + + // TODO(jleni): Clarify what is actually being tested + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + assert.Equal(t, want.Signature, have.Signature) }) - - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) - - // TODO(jleni): Clarify what is actually being tested - - // This would exceed the deadline if it was not extended by the previous message - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVoteKeepAlive(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } + + // Check that even if the client does not request a + // signature for a long time. The service is still available + + // in this particular case, we use the dialer logger to ensure that + // test messages are properly interleaved in the test logs + tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") + time.Sleep(testTimeoutReadWrite * 3) + tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) }) - - // Check that even if the client does not request a - // signature for a long time. The service is still available - - // in this particular case, we use the dialer logger to ensure that - // test messages are properly interleaved in the test logs - tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") - time.Sleep(testTimeoutReadWrite * 3) - tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") - - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerSignProposalErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - // Replace service with a mock that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + // Replace service with a mock that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + proposal := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + Signature: []byte("signature"), } + + err := tc.signerClient.SignProposal(ctx, tc.chainID, proposal.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignProposal(ctx, tc.chainID, proposal.ToProto()) + require.Error(t, err) + + err = tc.signerClient.SignProposal(ctx, tc.chainID, proposal.ToProto()) + require.Error(t, err) }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - proposal := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - Signature: []byte("signature"), - } - - err := tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = tc.mockPV.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Error(t, err) - - err = tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Error(t, err) } } func TestSignerSignVoteErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - vote := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - Signature: []byte("signature"), - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // Replace signer service privval with one that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + vote := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + Signature: []byte("signature"), } + + // Replace signer service privval with one that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + err := tc.signerClient.SignVote(ctx, tc.chainID, vote.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignVote(ctx, tc.chainID, vote.ToProto()) + require.Error(t, err) + + err = tc.signerClient.SignVote(ctx, tc.chainID, vote.ToProto()) + require.Error(t, err) }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - err := tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = tc.mockPV.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Error(t, err) - - err = tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Error(t, err) } } @@ -413,28 +379,23 @@ func brokenHandler(ctx context.Context, privVal types.PrivValidator, request pri } func TestSignerUnexpectedResponse(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc.signerServer.privVal = types.NewMockPV() - tc.mockPV = types.NewMockPV() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc.signerServer.SetRequestHandler(brokenHandler) + for _, tc := range getSignerTestCases(ctx, t) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } + tc.signerServer.privVal = types.NewMockPV() + tc.mockPV = types.NewMockPV() + + tc.signerServer.SetRequestHandler(brokenHandler) + + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} + + e := tc.signerClient.SignVote(ctx, tc.chainID, want.ToProto()) + assert.EqualError(t, e, "empty response") }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - - ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} - - e := tc.signerClient.SignVote(context.Background(), tc.chainID, want.ToProto()) - assert.EqualError(t, e, "empty response") } } diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 292e7a476..e2287c630 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -1,6 +1,7 @@ package privval import ( + "context" "fmt" "net" "time" @@ -63,7 +64,7 @@ func NewSignerListenerEndpoint( } // OnStart implements service.Service. -func (sl *SignerListenerEndpoint) OnStart() error { +func (sl *SignerListenerEndpoint) OnStart(ctx context.Context) error { sl.connectRequestCh = make(chan struct{}) sl.connectionAvailableCh = make(chan net.Conn) diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index cbd45e6ce..b92e0abe5 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -1,6 +1,7 @@ package privval import ( + "context" "net" "testing" "time" @@ -38,6 +39,9 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { retries = 10 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ln, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -71,7 +75,7 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { mockPV := types.NewMockPV() signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - err = signerServer.Start() + err = signerServer.Start(ctx) require.NoError(t, err) t.Cleanup(func() { if err := signerServer.Stop(); err != nil { @@ -88,6 +92,9 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { } func TestRetryConnToRemoteSigner(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tc := range getDialerTestCases(t) { var ( logger = log.TestingLogger() @@ -107,14 +114,9 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) - t.Cleanup(func() { - if err := listenerEndpoint.Stop(); err != nil { - t.Error(err) - } - }) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, signerServer.Start()) + require.NoError(t, signerServer.Start(ctx)) assert.True(t, signerServer.IsRunning()) <-endpointIsOpenCh if err := signerServer.Stop(); err != nil { @@ -128,13 +130,8 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) // let some pings pass - require.NoError(t, signerServer2.Start()) + require.NoError(t, signerServer2.Start(ctx)) assert.True(t, signerServer2.IsRunning()) - t.Cleanup(func() { - if err := signerServer2.Stop(); err != nil { - t.Error(err) - } - }) // give the client some time to re-establish the conn to the remote signer // should see sth like this in the logs: @@ -175,15 +172,23 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite ) } -func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { +func startListenerEndpointAsync( + ctx context.Context, + t *testing.T, + sle *SignerListenerEndpoint, + endpointIsOpenCh chan struct{}, +) { + t.Helper() + go func(sle *SignerListenerEndpoint) { - require.NoError(t, sle.Start()) + require.NoError(t, sle.Start(ctx)) assert.True(t, sle.IsRunning()) close(endpointIsOpenCh) }(sle) } func getMockEndpoints( + ctx context.Context, t *testing.T, addr string, socketDialer SocketDialer, @@ -204,9 +209,9 @@ func getMockEndpoints( SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) SignerDialerEndpointConnRetries(1e6)(dialerEndpoint) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, dialerEndpoint.Start()) + require.NoError(t, dialerEndpoint.Start(ctx)) assert.True(t, dialerEndpoint.IsRunning()) <-endpointIsOpenCh diff --git a/privval/signer_server.go b/privval/signer_server.go index 24bf67cc5..e31d3bdb4 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -42,8 +42,8 @@ func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal typ } // OnStart implements service.Service. -func (ss *SignerServer) OnStart() error { - go ss.serviceLoop() +func (ss *SignerServer) OnStart(ctx context.Context) error { + go ss.serviceLoop(ctx) return nil } @@ -91,18 +91,18 @@ func (ss *SignerServer) servicePendingRequest() { } } -func (ss *SignerServer) serviceLoop() { +func (ss *SignerServer) serviceLoop(ctx context.Context) { for { select { + case <-ss.Quit(): + return + case <-ctx.Done(): + return default: - err := ss.endpoint.ensureConnection() - if err != nil { + if err := ss.endpoint.ensureConnection(); err != nil { return } ss.servicePendingRequest() - - case <-ss.Quit(): - return } } } diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 0f908e271..e4c2a14ed 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -92,7 +92,7 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { } // Start starts the websocket client and the event loop. -func (w *wsEvents) Start() error { +func (w *wsEvents) Start(ctx context.Context) error { if err := w.ws.Start(); err != nil { return err } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 474eb9937..8d160b799 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -35,7 +35,7 @@ type Client interface { // These methods define the operational structure of the client. // Start the client. Start must report an error if the client is running. - Start() error + Start(context.Context) error // Stop the client. Stop must report an error if the client is not running. Stop() error diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index c242f01c4..5ae9b951c 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -32,7 +32,6 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { require.NoError(t, err) t.Cleanup(func() { cancel() - assert.NoError(t, node.Stop()) assert.NoError(t, closer(ctx)) assert.NoError(t, app.Close()) node.Wait() diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go new file mode 100644 index 000000000..7012e1c2d --- /dev/null +++ b/rpc/client/mocks/client.go @@ -0,0 +1,756 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + bytes "github.com/tendermint/tendermint/libs/bytes" + client "github.com/tendermint/tendermint/rpc/client" + + context "context" + + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/types" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +// ABCIInfo provides a mock function with given fields: _a0 +func (_m *Client) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultABCIInfo + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultABCIInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ABCIQuery provides a mock function with given fields: ctx, path, data +func (_m *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { + ret := _m.Called(ctx, path, data) + + var r0 *coretypes.ResultABCIQuery + if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) *coretypes.ResultABCIQuery); ok { + r0 = rf(ctx, path, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes) error); ok { + r1 = rf(ctx, path, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ABCIQueryWithOptions provides a mock function with given fields: ctx, path, data, opts +func (_m *Client) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + ret := _m.Called(ctx, path, data, opts) + + var r0 *coretypes.ResultABCIQuery + if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) *coretypes.ResultABCIQuery); ok { + r0 = rf(ctx, path, data, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) error); ok { + r1 = rf(ctx, path, data, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Block provides a mock function with given fields: ctx, height +func (_m *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultBlock + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlock); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultBlock + if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultBlock); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockResults provides a mock function with given fields: ctx, height +func (_m *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultBlockResults + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlockResults); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockResults) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockSearch provides a mock function with given fields: ctx, query, page, perPage, orderBy +func (_m *Client) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { + ret := _m.Called(ctx, query, page, perPage, orderBy) + + var r0 *coretypes.ResultBlockSearch + if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) *coretypes.ResultBlockSearch); ok { + r0 = rf(ctx, query, page, perPage, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockSearch) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, *int, *int, string) error); ok { + r1 = rf(ctx, query, page, perPage, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockchainInfo provides a mock function with given fields: ctx, minHeight, maxHeight +func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { + ret := _m.Called(ctx, minHeight, maxHeight) + + var r0 *coretypes.ResultBlockchainInfo + if rf, ok := ret.Get(0).(func(context.Context, int64, int64) *coretypes.ResultBlockchainInfo); ok { + r0 = rf(ctx, minHeight, maxHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockchainInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, int64) error); ok { + r1 = rf(ctx, minHeight, maxHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastEvidence provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastEvidence + if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) *coretypes.ResultBroadcastEvidence); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastEvidence) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Evidence) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxCommit provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTxCommit + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTxCommit); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTxCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultCheckTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultCheckTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultCheckTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Commit provides a mock function with given fields: ctx, height +func (_m *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultCommit + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultCommit); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConsensusParams provides a mock function with given fields: ctx, height +func (_m *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultConsensusParams + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultConsensusParams); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultConsensusParams) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConsensusState provides a mock function with given fields: _a0 +func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultConsensusState + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultConsensusState); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultConsensusState) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DumpConsensusState provides a mock function with given fields: _a0 +func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultDumpConsensusState + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultDumpConsensusState); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultDumpConsensusState) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Genesis provides a mock function with given fields: _a0 +func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultGenesis + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultGenesis); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultGenesis) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GenesisChunked provides a mock function with given fields: _a0, _a1 +func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultGenesisChunk + if rf, ok := ret.Get(0).(func(context.Context, uint) *coretypes.ResultGenesisChunk); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultGenesisChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Health provides a mock function with given fields: _a0 +func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultHealth + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultHealth); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHealth) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsRunning provides a mock function with given fields: +func (_m *Client) IsRunning() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NetInfo provides a mock function with given fields: _a0 +func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultNetInfo + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultNetInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultNetInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NumUnconfirmedTxs provides a mock function with given fields: _a0 +func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultUnconfirmedTxs + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Status provides a mock function with given fields: _a0 +func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultStatus + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultStatus); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Stop provides a mock function with given fields: +func (_m *Client) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity +func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { + _va := make([]interface{}, len(outCapacity)) + for _i := range outCapacity { + _va[_i] = outCapacity[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, subscriber, query) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 <-chan coretypes.ResultEvent + if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) <-chan coretypes.ResultEvent); ok { + r0 = rf(ctx, subscriber, query, outCapacity...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan coretypes.ResultEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, ...int) error); ok { + r1 = rf(ctx, subscriber, query, outCapacity...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Tx provides a mock function with given fields: ctx, hash, prove +func (_m *Client) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { + ret := _m.Called(ctx, hash, prove) + + var r0 *coretypes.ResultTx + if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes, bool) *coretypes.ResultTx); ok { + r0 = rf(ctx, hash, prove) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes, bool) error); ok { + r1 = rf(ctx, hash, prove) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxSearch provides a mock function with given fields: ctx, query, prove, page, perPage, orderBy +func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { + ret := _m.Called(ctx, query, prove, page, perPage, orderBy) + + var r0 *coretypes.ResultTxSearch + if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) *coretypes.ResultTxSearch); ok { + r0 = rf(ctx, query, prove, page, perPage, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultTxSearch) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bool, *int, *int, string) error); ok { + r1 = rf(ctx, query, prove, page, perPage, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UnconfirmedTxs provides a mock function with given fields: ctx, limit +func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(ctx, limit) + + var r0 *coretypes.ResultUnconfirmedTxs + if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(ctx, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { + r1 = rf(ctx, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Unsubscribe provides a mock function with given fields: ctx, subscriber, query +func (_m *Client) Unsubscribe(ctx context.Context, subscriber string, query string) error { + ret := _m.Called(ctx, subscriber, query) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, subscriber, query) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnsubscribeAll provides a mock function with given fields: ctx, subscriber +func (_m *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { + ret := _m.Called(ctx, subscriber) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, subscriber) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Validators provides a mock function with given fields: ctx, height, page, perPage +func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perPage *int) (*coretypes.ResultValidators, error) { + ret := _m.Called(ctx, height, page, perPage) + + var r0 *coretypes.ResultValidators + if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int) *coretypes.ResultValidators); ok { + r0 = rf(ctx, height, page, perPage) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultValidators) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64, *int, *int) error); ok { + r1 = rf(ctx, height, page, perPage) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 38766e047..12c13d686 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -463,7 +463,7 @@ func TestClientMethodCalls(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. - err := c.Start() + err := c.Start(ctx) require.Nil(t, err) t.Cleanup(func() { if err := c.Stop(); err != nil { diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 7bdc1bb2e..90c3b2e49 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -73,10 +73,13 @@ func CreateConfig(testName string) (*config.Config, error) { type ServiceCloser func(context.Context) error -func StartTendermint(ctx context.Context, +func StartTendermint( + ctx context.Context, conf *config.Config, app abci.Application, - opts ...func(*Options)) (service.Service, ServiceCloser, error) { + opts ...func(*Options), +) (service.Service, ServiceCloser, error) { + ctx, cancel := context.WithCancel(ctx) nodeOpts := &Options{} for _, opt := range opts { @@ -89,14 +92,14 @@ func StartTendermint(ctx context.Context, logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) } papp := abciclient.NewLocalCreator(app) - tmNode, err := node.New(conf, logger, papp, nil) + tmNode, err := node.New(ctx, conf, logger, papp, nil) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } - err = tmNode.Start() + err = tmNode.Start(ctx) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } waitForRPC(ctx, conf) @@ -106,9 +109,7 @@ func StartTendermint(ctx context.Context, } return tmNode, func(ctx context.Context) error { - if err := tmNode.Stop(); err != nil { - logger.Error("Error when trying to stop node", "err", err) - } + cancel() tmNode.Wait() os.RemoveAll(conf.RootDir) return nil diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 7a9e67915..5d25b0195 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -38,6 +38,9 @@ var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, fals // main is the binary entrypoint. func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if len(os.Args) != 2 { fmt.Printf("Usage: %v ", os.Args[0]) return @@ -47,14 +50,14 @@ func main() { configFile = os.Args[1] } - if err := run(configFile); err != nil { + if err := run(ctx, configFile); err != nil { logger.Error(err.Error()) os.Exit(1) } } // run runs the application - basically like main() with error handling. -func run(configFile string) error { +func run(ctx context.Context, configFile string) error { cfg, err := LoadConfig(configFile) if err != nil { return err @@ -62,7 +65,7 @@ func run(configFile string) error { // Start remote signer (must start before node if running builtin). if cfg.PrivValServer != "" { - if err = startSigner(cfg); err != nil { + if err = startSigner(ctx, cfg); err != nil { return err } if cfg.Protocol == "builtin" { @@ -73,15 +76,15 @@ func run(configFile string) error { // Start app server. switch cfg.Protocol { case "socket", "grpc": - err = startApp(cfg) + err = startApp(ctx, cfg) case "builtin": switch cfg.Mode { case string(e2e.ModeLight): - err = startLightNode(cfg) + err = startLightNode(ctx, cfg) case string(e2e.ModeSeed): - err = startSeedNode() + err = startSeedNode(ctx) default: - err = startNode(cfg) + err = startNode(ctx, cfg) } default: err = fmt.Errorf("invalid protocol %q", cfg.Protocol) @@ -97,7 +100,7 @@ func run(configFile string) error { } // startApp starts the application server, listening for connections from Tendermint. -func startApp(cfg *Config) error { +func startApp(ctx context.Context, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err @@ -106,7 +109,7 @@ func startApp(cfg *Config) error { if err != nil { return err } - err = server.Start() + err = server.Start(ctx) if err != nil { return err } @@ -118,7 +121,7 @@ func startApp(cfg *Config) error { // configuration is in $TMHOME/config/tendermint.toml. // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. -func startNode(cfg *Config) error { +func startNode(ctx context.Context, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err @@ -129,7 +132,9 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } - n, err := node.New(tmcfg, + n, err := node.New( + ctx, + tmcfg, nodeLogger, abciclient.NewLocalCreator(app), nil, @@ -137,10 +142,10 @@ func startNode(cfg *Config) error { if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startSeedNode() error { +func startSeedNode(ctx context.Context) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return fmt.Errorf("failed to setup config: %w", err) @@ -148,14 +153,14 @@ func startSeedNode() error { tmcfg.Mode = config.ModeSeed - n, err := node.New(tmcfg, nodeLogger, nil, nil) + n, err := node.New(ctx, tmcfg, nodeLogger, nil, nil) if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startLightNode(cfg *Config) error { +func startLightNode(ctx context.Context, cfg *Config) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return err @@ -204,7 +209,7 @@ func startLightNode(cfg *Config) error { } logger.Info("Starting proxy...", "laddr", tmcfg.RPC.ListenAddress) - if err := p.ListenAndServe(); err != http.ErrServerClosed { + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) } @@ -213,7 +218,7 @@ func startLightNode(cfg *Config) error { } // startSigner starts a signer server connecting to the given endpoint. -func startSigner(cfg *Config) error { +func startSigner(ctx context.Context, cfg *Config) error { filePV, err := privval.LoadFilePV(cfg.PrivValKey, cfg.PrivValState) if err != nil { return err @@ -251,7 +256,8 @@ func startSigner(cfg *Config) error { endpoint := privval.NewSignerDialerEndpoint(logger, dialFn, privval.SignerDialerEndpointRetryWaitInterval(1*time.Second), privval.SignerDialerEndpointConnRetries(100)) - err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start() + + err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start(ctx) if err != nil { return err } diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index 406e062fd..ba60d72cc 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -17,7 +17,7 @@ func init() { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) appConnMem, _ := cc(log.NewNopLogger()) - err := appConnMem.Start() + err := appConnMem.Start(context.TODO()) if err != nil { panic(err) } diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 96eaaaff0..c8b5cd81d 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -89,7 +89,7 @@ type timeoutError interface { // NewTestHarness will load Tendermint data from the given files (including // validator public/private keypairs and chain details) and create a new // harness. -func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) { +func NewTestHarness(ctx context.Context, logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) { keyFile := ExpandPath(cfg.KeyFile) stateFile := ExpandPath(cfg.StateFile) logger.Info("Loading private validator configuration", "keyFile", keyFile, "stateFile", stateFile) @@ -113,7 +113,7 @@ func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, err return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } - signerClient, err := privval.NewSignerClient(spv, st.ChainID) + signerClient, err := privval.NewSignerClient(ctx, spv, st.ChainID) if err != nil { return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index 85a589185..2ef630555 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -1,6 +1,7 @@ package internal import ( + "context" "fmt" "os" "testing" @@ -73,17 +74,24 @@ const ( ) func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := makeConfig(t, 1, 2) defer cleanup(cfg) - th, err := NewTestHarness(log.TestingLogger(), cfg) + th, err := NewTestHarness(ctx, log.TestingLogger(), cfg) require.NoError(t, err) th.Run() assert.Equal(t, ErrMaxAcceptRetriesReached, th.exitCode) } func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, false) @@ -93,7 +101,11 @@ func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { } func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, ed25519.GenPrivKey(), false, false) @@ -103,7 +115,11 @@ func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { } func TestRemoteSignerProposalSigningFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, th.fpv.Key.PrivKey, true, false) @@ -113,7 +129,11 @@ func TestRemoteSignerProposalSigningFailed(t *testing.T) { } func TestRemoteSignerVoteSigningFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + harnessTest( + ctx, t, func(th *TestHarness) *privval.SignerServer { return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, true) @@ -144,11 +164,16 @@ func newMockSignerServer( } // For running relatively standard tests. -func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval.SignerServer, expectedExitCode int) { +func harnessTest( + ctx context.Context, + t *testing.T, + signerServerMaker func(th *TestHarness) *privval.SignerServer, + expectedExitCode int, +) { cfg := makeConfig(t, 100, 3) defer cleanup(cfg) - th, err := NewTestHarness(log.TestingLogger(), cfg) + th, err := NewTestHarness(ctx, log.TestingLogger(), cfg) require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -157,7 +182,7 @@ func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval. }() ss := signerServerMaker(th) - require.NoError(t, ss.Start()) + require.NoError(t, ss.Start(ctx)) assert.True(t, ss.IsRunning()) defer ss.Stop() //nolint:errcheck // ignore for tests diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go index 90afd7d1f..4bf1933e0 100644 --- a/tools/tm-signer-harness/main.go +++ b/tools/tm-signer-harness/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "flag" "fmt" "os" @@ -115,7 +116,7 @@ Usage: } } -func runTestHarness(acceptRetries int, bindAddr, tmhome string) { +func runTestHarness(ctx context.Context, acceptRetries int, bindAddr, tmhome string) { tmhome = internal.ExpandPath(tmhome) cfg := internal.TestHarnessConfig{ BindAddr: bindAddr, @@ -128,7 +129,7 @@ func runTestHarness(acceptRetries int, bindAddr, tmhome string) { SecretConnKey: ed25519.GenPrivKey(), ExitWhenComplete: true, } - harness, err := internal.NewTestHarness(logger, cfg) + harness, err := internal.NewTestHarness(ctx, logger, cfg) if err != nil { logger.Error(err.Error()) if therr, ok := err.(*internal.TestHarnessError); ok { @@ -156,6 +157,9 @@ func extractKey(tmhome, outputPath string) { } func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := rootCmd.Parse(os.Args[1:]); err != nil { fmt.Printf("Error parsing flags: %v\n", err) os.Exit(1) @@ -183,7 +187,7 @@ func main() { fmt.Printf("Error parsing flags: %v\n", err) os.Exit(1) } - runTestHarness(flagAcceptRetries, flagBindAddr, flagTMHome) + runTestHarness(ctx, flagAcceptRetries, flagBindAddr, flagTMHome) case "extract_key": if err := extractKeyCmd.Parse(os.Args[2:]); err != nil { fmt.Printf("Error parsing flags: %v\n", err) diff --git a/types/block_test.go b/types/block_test.go index 1c762653b..e0c1ab3be 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -555,6 +555,9 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { round = 0 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type commitVoteTest struct { blockIDs []BlockID numVotes []int // must sum to numValidators @@ -572,7 +575,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { vi := int32(0) for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - pubKey, err := vals[vi].GetPubKey(context.Background()) + pubKey, err := vals[vi].GetPubKey(ctx) require.NoError(t, err) vote := &Vote{ ValidatorAddress: pubKey.Address(), From 7e58f02eb84cc51c497aff6bb62a660782a9a696 Mon Sep 17 00:00:00 2001 From: Sam Kleinman Date: Fri, 19 Nov 2021 11:49:51 -0500 Subject: [PATCH 172/174] service: remove quit method (#7293) --- internal/proxy/multi_app_conn.go | 58 ++++++++++++++++++--------- internal/proxy/multi_app_conn_test.go | 17 +++----- libs/service/service.go | 3 -- 3 files changed, 44 insertions(+), 34 deletions(-) diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go index 5e5920f24..6196992ff 100644 --- a/internal/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -128,7 +128,7 @@ func (app *multiAppConn) OnStart(ctx context.Context) error { app.consensusConn = NewAppConnConsensus(c, app.metrics) // Kill Tendermint if the ABCI application crashes. - go app.killTMOnClientError() + app.startWatchersForClientErrorToKillTendermint(ctx) return nil } @@ -137,7 +137,12 @@ func (app *multiAppConn) OnStop() { app.stopAllClients() } -func (app *multiAppConn) killTMOnClientError() { +func (app *multiAppConn) startWatchersForClientErrorToKillTendermint(ctx context.Context) { + // this function starts a number of threads (per abci client) + // that will SIGTERM's our own PID if any of the ABCI clients + // exit/return early. If the context is canceled then these + // functions will not kill tendermint. + killFn := func(conn string, err error, logger log.Logger) { logger.Error( fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), @@ -147,23 +152,38 @@ func (app *multiAppConn) killTMOnClientError() { } } - select { - case <-app.consensusConnClient.Quit(): - if err := app.consensusConnClient.Error(); err != nil { - killFn(connConsensus, err, app.Logger) - } - case <-app.mempoolConnClient.Quit(): - if err := app.mempoolConnClient.Error(); err != nil { - killFn(connMempool, err, app.Logger) - } - case <-app.queryConnClient.Quit(): - if err := app.queryConnClient.Error(); err != nil { - killFn(connQuery, err, app.Logger) - } - case <-app.snapshotConnClient.Quit(): - if err := app.snapshotConnClient.Error(); err != nil { - killFn(connSnapshot, err, app.Logger) - } + type op struct { + connClient stoppableClient + name string + } + + for _, client := range []op{ + { + connClient: app.consensusConnClient, + name: connConsensus, + }, + { + connClient: app.mempoolConnClient, + name: connMempool, + }, + { + connClient: app.queryConnClient, + name: connQuery, + }, + { + connClient: app.snapshotConnClient, + name: connSnapshot, + }, + } { + go func(name string, client stoppableClient) { + client.Wait() + if ctx.Err() != nil { + return + } + if err := client.Error(); err != nil { + killFn(name, err, app.Logger) + } + }(client.name, client.connClient) } } diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go index 9ad39cb3b..98ea0ca53 100644 --- a/internal/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -26,14 +26,13 @@ type noopStoppableClientImpl struct { func (c *noopStoppableClientImpl) Stop() error { c.count++; return nil } func TestAppConns_Start_Stop(t *testing.T) { - quitCh := make(<-chan struct{}) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() clientMock := &abcimocks.Client{} clientMock.On("Start", mock.Anything).Return(nil).Times(4) - clientMock.On("Quit").Return(quitCh).Times(4) + clientMock.On("Error").Return(nil) + clientMock.On("Wait").Return(nil).Times(4) cl := &noopStoppableClientImpl{Client: clientMock} creatorCallCount := 0 @@ -65,22 +64,19 @@ func TestAppConns_Failure(t *testing.T) { go func() { for range c { close(ok) + return } }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - quitCh := make(chan struct{}) - var recvQuitCh <-chan struct{} // nolint:gosimple - recvQuitCh = quitCh - clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return() clientMock.On("Start", mock.Anything).Return(nil) - clientMock.On("Quit").Return(recvQuitCh) - clientMock.On("Error").Return(errors.New("EOF")).Once() + clientMock.On("Wait").Return(nil) + clientMock.On("Error").Return(errors.New("EOF")) cl := &noopStoppableClientImpl{Client: clientMock} creator := func(log.Logger) (abciclient.Client, error) { @@ -93,9 +89,6 @@ func TestAppConns_Failure(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { cancel(); appConns.Wait() }) - // simulate failure - close(quitCh) - select { case <-ok: t.Log("SIGTERM successfully received") diff --git a/libs/service/service.go b/libs/service/service.go index 0fc26dbdb..f2b440e94 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -30,9 +30,6 @@ type Service interface { // Return true if the service is running IsRunning() bool - // Quit returns a channel, which is closed once service is stopped. - Quit() <-chan struct{} - // String representation of the service String() string From 26b887b883fe85a60053ee8e5a77650eb26d9d96 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Nov 2021 10:45:38 -0800 Subject: [PATCH 173/174] build: update location of proto builder image (#7296) Updates #7272. --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 043b2031d..d091b9d48 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,8 @@ endif LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" -DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto +BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto +DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE) CGO_ENABLED ?= 0 # handle nostrip From 02c7dca9456a4c343ceb98eaa6e60c4e97771a63 Mon Sep 17 00:00:00 2001 From: William Banfield <4561443+williambanfield@users.noreply.github.com> Date: Tue, 23 Nov 2021 14:53:59 -0500 Subject: [PATCH 174/174] docs: add abci timing metrics to the metrics docs (#7311) --- docs/nodes/metrics.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index baaa9a812..6589e044a 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -20,6 +20,7 @@ The following metrics are available: | **Name** | **Type** | **Tags** | **Description** | | -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | +| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | | consensus_height | Gauge | | Height of the chain | | consensus_validators | Gauge | | Number of validators | | consensus_validators_power | Gauge | | Total voting power of all validators | @@ -55,6 +56,16 @@ The following metrics are available: Percentage of missing + byzantine validators: -```md -((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 +```prometheus +((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 +``` + +Rate at which the application is responding to each ABCI method call. +``` +sum(rate(tendermint_abci_connection_method_timing_count[5m])) by (method) +``` + +The 95th percentile response time for the application to the `deliver_tx` ABCI method call. +``` +histogram_quantile(0.95, sum by(le) (rate(tendermint_abci_connection_method_timing_bucket{method="deliver_tx"}[5m]))) ```