diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a5bfbc861..d7ed13b80 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -14,9 +14,6 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v2 - with: - go-version: "1.15" - uses: actions/checkout@master - name: Prepare id: prep @@ -37,23 +34,26 @@ jobs: fi echo ::set-output name=tags::${TAGS} + - name: Set up QEMU + uses: docker/setup-qemu-action@master + with: + platforms: all + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - name: Login to DockerHub + if: ${{ github.event_name != 'pull_request' }} uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build Tendermint - run: | - make build-linux && cp build/tendermint DOCKER/tendermint - - name: Publish to Docker Hub uses: docker/build-push-action@v2 with: - context: ./DOCKER + context: . file: ./DOCKER/Dockerfile + platforms: linux/amd64,linux/arm64 push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml new file mode 100644 index 000000000..148ec99e7 --- /dev/null +++ b/.github/workflows/fuzz-nightly.yml @@ -0,0 +1,69 @@ +# Runs fuzzing nightly. +name: fuzz-nightly +on: + workflow_dispatch: # allow running workflow manually + schedule: + - cron: '0 3 * * *' + +jobs: + fuzz-nightly-test: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.15' + + - uses: actions/checkout@v2 + + - name: Install go-fuzz + working-directory: test/fuzz + run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + + - name: Fuzz mempool + working-directory: test/fuzz + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool + continue-on-error: true + + - name: Fuzz p2p-addrbook + working-directory: test/fuzz + run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook + continue-on-error: true + + - name: Fuzz p2p-pex + working-directory: test/fuzz + run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex + continue-on-error: true + + - name: Fuzz p2p-sc + working-directory: test/fuzz + run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-sc + continue-on-error: true + + - name: Fuzz p2p-rpc-server + working-directory: test/fuzz + run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server + continue-on-error: true + + - name: Set crashers count + working-directory: test/fuzz + run: echo "::set-output name=crashers-count::$(find . -type d -name "crashers" | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" + id: set-crashers-count + + outputs: + crashers_count: ${{ steps.set-crashers-count.outputs.crashers-count }} + + fuzz-nightly-fail: + needs: fuzz-nightly-test + if: ${{ needs.set-crashers-count.outputs.crashers-count != 0 }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack if any crashers + uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly Fuzz Tests + SLACK_ICON_EMOJI: ':firecracker:' + SLACK_COLOR: danger + SLACK_MESSAGE: Crashers found in Nightly Fuzz tests + SLACK_FOOTER: '' diff --git a/.gitignore b/.gitignore index d5442f35d..40bfa28d8 100644 --- a/.gitignore +++ b/.gitignore @@ -40,4 +40,8 @@ test/e2e/networks/*/ test/logs test/maverick/maverick test/p2p/data/ -vendor \ No newline at end of file +vendor +test/fuzz/**/corpus +test/fuzz/**/crashers +test/fuzz/**/suppressions +test/fuzz/**/*.zip diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index a4ce89e47..79b1e9b5f 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,4 +1,14 @@ -FROM alpine:3.9 +# stage 1 Generate Tendermint Binary +FROM golang:1.15-alpine as builder +RUN apk update && \ + apk upgrade && \ + apk --no-cache add make +COPY / /tendermint +WORKDIR /tendermint +RUN make build-linux + +# stage 2 +FROM golang:1.15-alpine LABEL maintainer="hello@tendermint.com" # Tendermint will be looking for the genesis file in /tendermint/config/genesis.json @@ -29,15 +39,14 @@ EXPOSE 26656 26657 26660 STOPSIGNAL SIGTERM -ARG BINARY=tendermint -COPY $BINARY /usr/bin/tendermint +COPY --from=builder /tendermint/build/tendermint /usr/bin/tendermint # You can overwrite these before the first run to influence # config.json and genesis.json. Additionally, you can override # CMD to add parameters to `tendermint node`. ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain -COPY ./docker-entrypoint.sh /usr/local/bin/ +COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/ ENTRYPOINT ["docker-entrypoint.sh"] CMD ["start"] diff --git a/Makefile b/Makefile index 90d651dbf..73bc0923d 100644 --- a/Makefile +++ b/Makefile @@ -94,6 +94,7 @@ proto-gen: .PHONY: proto-gen proto-gen-docker: + @docker pull -q tendermintdev/docker-build-proto @echo "Generating Protobuf files" @docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh .PHONY: proto-gen-docker diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index f055d03b9..7dd4d4ea3 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -150,6 +150,7 @@ func (r *Reactor) OnStart() error { return err } + r.poolWG.Add(1) go r.poolRoutine(false) } @@ -354,7 +355,9 @@ func (r *Reactor) SwitchToFastSync(state sm.State) error { return err } + r.poolWG.Add(1) go r.poolRoutine(true) + return nil } @@ -426,7 +429,6 @@ func (r *Reactor) poolRoutine(stateSynced bool) { go r.requestRoutine() - r.poolWG.Add(1) defer r.poolWG.Done() FOR_LOOP: diff --git a/consensus/replay.go b/consensus/replay.go index 9e393fbda..6f4dabdcb 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -255,7 +255,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { h.logger.Info("ABCI Handshake App Info", "height", blockHeight, - "hash", fmt.Sprintf("%X", appHash), + "hash", appHash, "software-version", res.Version, "protocol-version", res.AppVersion, ) @@ -272,7 +272,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", - "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + "appHeight", blockHeight, "appHash", appHash) // TODO: (on restart) replay mempool diff --git a/docs/nodes/README.md b/docs/nodes/README.md index 4a8815a15..3786ad7d1 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -15,6 +15,7 @@ This section will focus on how to operate full nodes, validators and light clien - [Light Client guides](./light-client.md) - [How to sync a light client](./light-client.md#) - [Metrics](./metrics.md) +- [Logging](./logging.md) ## Node Types diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index c42a82779..7db0cae3d 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -494,4 +494,4 @@ This section will cover settings within the p2p section of the `config.toml`. - `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. - `seed-mode` = is used for when node operators want to run their node as a seed node. Seed node's run a variation of the PeX protocol that disconnects from peers after sending them a list of peers to connect to. To minimize the servers usage, it is recommended to set the mempool's size to 0. -- `private-peer-ids` = is a comma separated list of node ids that you would not like exposed to other peers (ie. you will not tell other peers about the private-peer-ids). This can be filled with a validators node id. \ No newline at end of file +- `private-peer-ids` = is a comma separated list of node ids that you would not like exposed to other peers (ie. you will not tell other peers about the private-peer-ids). This can be filled with a validators node id. diff --git a/docs/nodes/logging.md b/docs/nodes/logging.md new file mode 100644 index 000000000..9c44b91d5 --- /dev/null +++ b/docs/nodes/logging.md @@ -0,0 +1,171 @@ +--- +order: 7 +--- + +## Logging + +Logging adds detail and allows the node operator to better identify what they are looking for. Tendermint supports log levels on a global and per-module basis. This allows the node operator to see only the information they need and the developer to hone in on specific changes they are working on. + +## Configuring Log Levels + + +There are three log levels, `info`, `debug` and `error`. These can be configured either through the command line via `tendermint start --log-level ""` or within the `config.toml` file. + +- `info` Info represents an informational message. It is used to show that modules have started, stopped and how they are functioning. +- `debug` Debug is used to trace various calls or problems. Debug is used widely throughout a codebase and can lead to overly verbose logging. +- `error` Error represents something that has gone wrong. An error log can represent a potential problem that can lead to a node halt. + + +The default setting is a global `main:info,state:info,statesync:info,*:error` level. If you would like to set the log level for a specific module, it can be done in the following format: + +> We are setting all modules to log level `info` and the mempool to `error`. This will log all errors within the mempool module. + +Within the `config.toml`: + +```toml +# Output level for logging, including package level options +log-level = "*:info,mempool:error" +``` + +Via the command line: + +```sh +tendermint start --log-level "*:info,mempool:error" +``` + +## List of modules + +Here is the list of modules you may encounter in Tendermint's log and a +little overview what they do. + +- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI + client with respect to the application and maintains 3 connections: + mempool, consensus and query. The code used by Tendermint Core can + be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client). +- `blockchain` Provides storage, pool (a group of peers), and reactor + for both storing and exchanging blocks between peers. +- `consensus` The heart of Tendermint core, which is the + implementation of the consensus algorithm. Includes two + "submodules": `wal` (write-ahead logging) for ensuring data + integrity and `replay` to replay blocks and messages on recovery + from a crash. + [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). + You can subscribe to them by calling `subscribe` RPC method. Refer + to [RPC docs](./rpc.md) for additional information. +- `mempool` Mempool module handles all incoming transactions, whenever + they are coming from peers or the application. +- `p2p` Provides an abstraction around peer-to-peer communication. For + more details, please check out the + [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). +- `rpc-server` RPC server. For implementation details, please read the + [doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go). +- `state` Represents the latest state and execution submodule, which + executes blocks against the application. +- `statesync` Provides a way to quickly sync a node with pruned history. + + +### Walkabout example + +We first create three connections (mempool, consensus and query) to the +application (running `kvstore` locally in this case). + +```sh +I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn +I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient +I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient +I[10-04|13:54:27.367] Starting localClient module=abci-client connection=consensus impl=localClient +``` + +Then Tendermint Core and the application perform a handshake. + +```sh +I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 +I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +``` + +After that, we start a few more things like the event switch, reactors, +and perform UPNP discover in order to detect the IP address. + +```sh +I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch +I[10-04|13:54:27.375] This node is a validator module=consensus +I[10-04|13:54:27.379] Starting Node module=main impl=Node +I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 +I[10-04|13:54:27.382] Getting UPNP external address module=p2p +I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" +I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) +I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" +I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor +I[10-04|13:54:30.387] Starting BlockchainReactor module=blockchain impl=BlockchainReactor +I[10-04|13:54:30.387] Starting ConsensusReactor module=consensus impl=ConsensusReactor +I[10-04|13:54:30.387] ConsensusReactor module=consensus fastSync=false +I[10-04|13:54:30.387] Starting ConsensusState module=consensus impl=ConsensusState +I[10-04|13:54:30.387] Starting WAL module=consensus wal=/home/vagrant/.tendermint/data/cs.wal/wal impl=WAL +I[10-04|13:54:30.388] Starting TimeoutTicker module=consensus impl=TimeoutTicker +``` + +Notice the second row where Tendermint Core reports that "This node is a +validator". It also could be just an observer (regular node). + +Next we replay all the messages from the WAL. + +```sh +I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 +I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight +I[10-04|13:54:30.390] Replay: Done module=consensus +``` + +"Started node" message signals that everything is ready for work. + +```sh +I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server +I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" +``` + +Next follows a standard block creation cycle, where we enter a new +round, propose a block, receive more than 2/3 of prevotes, then +precommits and finally have a chance to commit a block. For details, +please refer to [Byzantine Consensus +Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md). + +```sh +I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus +I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus +I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" +I[10-04|13:54:30.394] Signed proposal module=consensus height=91 round=0 proposal="Proposal{91/0 1:21B79872514F (-1,:0:000000000000) {/10EDEDD7C84E.../}}" +I[10-04|13:54:30.397] Received complete proposal block module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E +I[10-04|13:54:30.397] enterPrevote(91/0). Current: 91/0/RoundStepPropose module=consensus +I[10-04|13:54:30.397] enterPrevote: ProposalBlock is valid module=consensus height=91 round=0 +I[10-04|13:54:30.398] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" err=null +I[10-04|13:54:30.401] Added to prevote module=consensus vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" prevotes="VoteSet{H:91 R:0 T:1 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" +I[10-04|13:54:30.401] enterPrecommit(91/0). Current: 91/0/RoundStepPrevote module=consensus +I[10-04|13:54:30.401] enterPrecommit: +2/3 prevoted proposal block. Locking module=consensus hash=F671D562C7B9242900A286E1882EE64E5556FE9E +I[10-04|13:54:30.402] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" err=null +I[10-04|13:54:30.404] Added to precommit module=consensus vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" precommits="VoteSet{H:91 R:0 T:2 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" +I[10-04|13:54:30.404] enterCommit(91/0). Current: 91/0/RoundStepPrecommit module=consensus +I[10-04|13:54:30.405] Finalizing commit of block with 0 txs module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E root=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:30.405] Block{ + Header{ + ChainID: test-chain-3MNw2N + Height: 91 + Time: 2017-10-04 13:54:30.393 +0000 UTC + NumTxs: 0 + LastBlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 + LastCommit: 56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D + Data: + Validators: CE25FBFF2E10C0D51AA1A07C064A96931BC8B297 + App: E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD + }#F671D562C7B9242900A286E1882EE64E5556FE9E + Data{ + + }# + Commit{ + BlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 + Precommits: Vote{0:125B0E3C5512 90/00/2(Precommit) F15AB8BEF9A6 {/FE98E2B956F0.../}} + }#56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D +}#F671D562C7B9242900A286E1882EE64E5556FE9E module=consensus +I[10-04|13:54:30.408] Executed block module=state height=91 validTxs=0 invalidTxs=0 +I[10-04|13:54:30.410] Committed state module=state height=91 txs=0 hash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD +I[10-04|13:54:30.410] Recheck txs module=mempool numtxs=0 height=91 +``` diff --git a/docs/nodes/state-sync.md b/docs/nodes/state-sync.md index 9ead79459..25b4f3178 100644 --- a/docs/nodes/state-sync.md +++ b/docs/nodes/state-sync.md @@ -29,7 +29,7 @@ If you are relying on publicly exposed RPC's to get the need information, you ca Example: ```bash -curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" +curl -s https://233.123.0.140:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" ``` The response will be: diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index da2fc6f8b..2ed05ecf5 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -1,145 +1,7 @@ --- -order: 7 +order: false --- # How to read logs -## Walkabout example - -We first create three connections (mempool, consensus and query) to the -application (running `kvstore` locally in this case). - -```sh -I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn -I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient -I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient -I[10-04|13:54:27.367] Starting localClient module=abci-client connection=consensus impl=localClient -``` - -Then Tendermint Core and the application perform a handshake. - -```sh -I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD -I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 -I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD -``` - -After that, we start a few more things like the event switch, reactors, -and perform UPNP discover in order to detect the IP address. - -```sh -I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch -I[10-04|13:54:27.375] This node is a validator module=consensus -I[10-04|13:54:27.379] Starting Node module=main impl=Node -I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 -I[10-04|13:54:27.382] Getting UPNP external address module=p2p -I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" -I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) -I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" -I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor -I[10-04|13:54:30.387] Starting BlockchainReactor module=blockchain impl=BlockchainReactor -I[10-04|13:54:30.387] Starting ConsensusReactor module=consensus impl=ConsensusReactor -I[10-04|13:54:30.387] ConsensusReactor module=consensus fastSync=false -I[10-04|13:54:30.387] Starting ConsensusState module=consensus impl=ConsensusState -I[10-04|13:54:30.387] Starting WAL module=consensus wal=/home/vagrant/.tendermint/data/cs.wal/wal impl=WAL -I[10-04|13:54:30.388] Starting TimeoutTicker module=consensus impl=TimeoutTicker -``` - -Notice the second row where Tendermint Core reports that "This node is a -validator". It also could be just an observer (regular node). - -Next we replay all the messages from the WAL. - -```sh -I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 -I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight -I[10-04|13:54:30.390] Replay: Done module=consensus -``` - -"Started node" message signals that everything is ready for work. - -```sh -I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server -I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" -``` - -Next follows a standard block creation cycle, where we enter a new -round, propose a block, receive more than 2/3 of prevotes, then -precommits and finally have a chance to commit a block. For details, -please refer to [Byzantine Consensus -Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md). - -```sh -I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus -I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus -I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" -I[10-04|13:54:30.394] Signed proposal module=consensus height=91 round=0 proposal="Proposal{91/0 1:21B79872514F (-1,:0:000000000000) {/10EDEDD7C84E.../}}" -I[10-04|13:54:30.397] Received complete proposal block module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E -I[10-04|13:54:30.397] enterPrevote(91/0). Current: 91/0/RoundStepPropose module=consensus -I[10-04|13:54:30.397] enterPrevote: ProposalBlock is valid module=consensus height=91 round=0 -I[10-04|13:54:30.398] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" err=null -I[10-04|13:54:30.401] Added to prevote module=consensus vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" prevotes="VoteSet{H:91 R:0 T:1 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" -I[10-04|13:54:30.401] enterPrecommit(91/0). Current: 91/0/RoundStepPrevote module=consensus -I[10-04|13:54:30.401] enterPrecommit: +2/3 prevoted proposal block. Locking module=consensus hash=F671D562C7B9242900A286E1882EE64E5556FE9E -I[10-04|13:54:30.402] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" err=null -I[10-04|13:54:30.404] Added to precommit module=consensus vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" precommits="VoteSet{H:91 R:0 T:2 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" -I[10-04|13:54:30.404] enterCommit(91/0). Current: 91/0/RoundStepPrecommit module=consensus -I[10-04|13:54:30.405] Finalizing commit of block with 0 txs module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E root=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD -I[10-04|13:54:30.405] Block{ - Header{ - ChainID: test-chain-3MNw2N - Height: 91 - Time: 2017-10-04 13:54:30.393 +0000 UTC - NumTxs: 0 - LastBlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 - LastCommit: 56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D - Data: - Validators: CE25FBFF2E10C0D51AA1A07C064A96931BC8B297 - App: E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - }#F671D562C7B9242900A286E1882EE64E5556FE9E - Data{ - - }# - Commit{ - BlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 - Precommits: Vote{0:125B0E3C5512 90/00/2(Precommit) F15AB8BEF9A6 {/FE98E2B956F0.../}} - }#56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D -}#F671D562C7B9242900A286E1882EE64E5556FE9E module=consensus -I[10-04|13:54:30.408] Executed block module=state height=91 validTxs=0 invalidTxs=0 -I[10-04|13:54:30.410] Committed state module=state height=91 txs=0 hash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD -I[10-04|13:54:30.410] Recheck txs module=mempool numtxs=0 height=91 -``` - -## List of modules - -Here is the list of modules you may encounter in Tendermint's log and a -little overview what they do. - -- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI - client with respect to the application and maintains 3 connections: - mempool, consensus and query. The code used by Tendermint Core can - be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client). -- `blockchain` Provides storage, pool (a group of peers), and reactor - for both storing and exchanging blocks between peers. -- `consensus` The heart of Tendermint core, which is the - implementation of the consensus algorithm. Includes two - "submodules": `wal` (write-ahead logging) for ensuring data - integrity and `replay` to replay blocks and messages on recovery - from a crash. -- `events` Simple event notification system. The list of events can be - found - [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). - You can subscribe to them by calling `subscribe` RPC method. Refer - to [RPC docs](./rpc.md) for additional information. -- `mempool` Mempool module handles all incoming transactions, whenever - they are coming from peers or the application. -- `p2p` Provides an abstraction around peer-to-peer communication. For - more details, please check out the - [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). -- `rpc` [Tendermint's RPC](./rpc.md). -- `rpc-server` RPC server. For implementation details, please read the - [doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go). -- `state` Represents the latest state and execution submodule, which - executes blocks against the application. -- `types` A collection of the publicly exposed types and methods to - work with them. +This file has moved to the [node section](../nodes/logging.md). diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 81bb7dd2d..cd6e5a18a 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -40,7 +40,7 @@ Default logging level (`log-level = "main:info,state:info,statesync:info,*:error normal operation mode. Read [this post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756) for details on how to configure `log-level` config variable. Some of the -modules can be found [here](./how-to-read-logs.md#list-of-modules). If +modules can be found [here](../nodes/logging#list-of-modules). If you're trying to debug Tendermint or asked to provide logs with debug logging level, you can do so by running Tendermint with `--log-level="*:debug"`. @@ -109,7 +109,7 @@ to achieve the same things. ## Debugging Tendermint If you ever have to debug Tendermint, the first thing you should probably do is -check out the logs. See [How to read logs](./how-to-read-logs.md), where we +check out the logs. See [Logging](../nodes/logging.md), where we explain what certain log statements mean. If, after skimming through the logs, things are not clear still, the next thing @@ -307,7 +307,6 @@ flush throttle timeout and increase other params. ```toml [p2p] - send-rate=20000000 # 2MB/s recv-rate=20000000 # 2MB/s flush-throttle-timeout=10 diff --git a/go.mod b/go.mod index 8f2176b22..a7c0890e8 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/prometheus/client_golang v1.9.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.7.0 - github.com/sasha-s/go-deadlock v0.2.0 + github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.1.1 github.com/spf13/viper v1.7.1 diff --git a/go.sum b/go.sum index 555ee31c9..1c99c4972 100644 --- a/go.sum +++ b/go.sum @@ -484,6 +484,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= +github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -627,6 +629,7 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -756,6 +759,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index fd27cf33f..d154f7b71 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -8,7 +8,7 @@ import ( "strings" ) -// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +// HexBytes enables HEX-encoding for json/encoding. type HexBytes []byte var ( @@ -58,7 +58,7 @@ func (bz *HexBytes) UnmarshalJSON(data []byte) error { return nil } -// Allow it to fulfill various interfaces in light-client, etc... +// Bytes fulfils various interfaces in light-client, etc... func (bz HexBytes) Bytes() []byte { return bz } @@ -67,6 +67,9 @@ func (bz HexBytes) String() string { return strings.ToUpper(hex.EncodeToString(bz)) } +// Format writes either address of 0th element in a slice in base 16 notation, +// with leading 0x (%p), or casts HexBytes to bytes and writes as hexadecimal +// string to s. func (bz HexBytes) Format(s fmt.State, verb rune) { switch verb { case 'p': diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index 6205beec2..db882f1c1 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -3,6 +3,7 @@ package bytes import ( "encoding/json" "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -24,7 +25,6 @@ func TestMarshal(t *testing.T) { // Test that the hex encoding works. func TestJSONMarshal(t *testing.T) { - type TestStruct struct { B1 []byte B2 HexBytes @@ -64,3 +64,10 @@ func TestJSONMarshal(t *testing.T) { }) } } + +func TestHexBytes_String(t *testing.T) { + hs := HexBytes([]byte("test me")) + if _, err := strconv.ParseInt(hs.String(), 16, 64); err != nil { + t.Fatal(err) + } +} diff --git a/libs/log/tm_logger.go b/libs/log/tm_logger.go index 75273f88c..7c106336c 100644 --- a/libs/log/tm_logger.go +++ b/libs/log/tm_logger.go @@ -21,7 +21,7 @@ type tmLogger struct { // Interface assertions var _ Logger = (*tmLogger)(nil) -// NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer +// NewTMLogger returns a logger that encodes msg and keyvals to the Writer // using go-kit's log as an underlying logger and our custom formatter. Note // that underlying logger could be swapped with something else. func NewTMLogger(w io.Writer) Logger { @@ -52,6 +52,7 @@ func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) te // Info logs a message at level Info. func (l *tmLogger) Info(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Info(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { errLogger := kitlevel.Error(l.srcLogger) kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again @@ -61,6 +62,7 @@ func (l *tmLogger) Info(msg string, keyvals ...interface{}) { // Debug logs a message at level Debug. func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Debug(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { errLogger := kitlevel.Error(l.srcLogger) kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again @@ -70,6 +72,7 @@ func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { // Error logs a message at level Error. func (l *tmLogger) Error(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Error(l.srcLogger) + lWithMsg := kitlog.With(lWithLevel, msgKey, msg) if err := lWithMsg.Log(keyvals...); err != nil { lWithMsg.Log("err", err) //nolint:errcheck // no need to check error again diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go index 92fbe40eb..cbe29d994 100644 --- a/libs/log/tm_logger_test.go +++ b/libs/log/tm_logger_test.go @@ -20,6 +20,75 @@ func TestLoggerLogsItsErrors(t *testing.T) { } } +func TestInfo(t *testing.T) { + var bufInfo bytes.Buffer + + l := log.NewTMLogger(&bufInfo) + l.Info("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufInfo.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, "] ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + +func TestDebug(t *testing.T) { + var bufDebug bytes.Buffer + + ld := log.NewTMLogger(&bufDebug) + ld.Debug("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufDebug.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, "] ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + +func TestError(t *testing.T) { + var bufErr bytes.Buffer + + le := log.NewTMLogger(&bufErr) + le.Error("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufErr.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, "] ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + func BenchmarkTMLoggerSimple(b *testing.B) { benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) } diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index 9519fd310..c6004cc62 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -2,8 +2,10 @@ package log import ( "bytes" + "encoding/hex" "fmt" "io" + "strings" "sync" "time" @@ -80,6 +82,11 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { excludeIndexes = append(excludeIndexes, i) module = keyvals[i+1].(string) } + + // Print []byte as a hexadecimal string (uppercased) + if b, ok := keyvals[i+1].([]byte); ok { + keyvals[i+1] = strings.ToUpper(hex.EncodeToString(b)) + } } // Form a custom Tendermint line diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index be87432a1..955a9e405 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -53,6 +53,12 @@ func TestTMFmtLogger(t *testing.T) { t.Fatal(err) } assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("hash", []byte("test me")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hash=74657374206D65\n$`), buf.String()) } func BenchmarkTMFmtLoggerSimple(b *testing.B) { diff --git a/light/client.go b/light/client.go index 3b54c48a9..04f9653fc 100644 --- a/light/client.go +++ b/light/client.go @@ -286,7 +286,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(ctx context.Context, options Tru c.logger.Info("Client initialized with old header (trusted is more recent)", "old", options.Height, "trustedHeight", c.latestTrustedBlock.Height, - "trustedHash", hash2str(c.latestTrustedBlock.Hash())) + "trustedHash", c.latestTrustedBlock.Hash()) action := fmt.Sprintf( "Rollback to %d (%X)? Note this will remove newer light blocks up to %d (%X)", @@ -310,7 +310,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(ctx context.Context, options Tru if !bytes.Equal(primaryHash, c.latestTrustedBlock.Hash()) { c.logger.Info("Prev. trusted header's hash (h1) doesn't match hash from primary provider (h2)", - "h1", hash2str(c.latestTrustedBlock.Hash()), "h2", hash2str(primaryHash)) + "h1", c.latestTrustedBlock.Hash(), "h2", primaryHash) action := fmt.Sprintf( "Prev. trusted header's hash %X doesn't match hash %X from primary provider. Remove all the stored light blocks?", @@ -425,7 +425,7 @@ func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, if err != nil { return nil, err } - c.logger.Info("Advanced to new state", "height", latestBlock.Height, "hash", hash2str(latestBlock.Hash())) + c.logger.Info("Advanced to new state", "height", latestBlock.Height, "hash", latestBlock.Hash()) return latestBlock, nil } @@ -450,7 +450,7 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // Check if the light block is already verified. h, err := c.TrustedLightBlock(height) if err == nil { - c.logger.Info("Header has already been verified", "height", height, "hash", hash2str(h.Hash())) + c.logger.Info("Header has already been verified", "height", height, "hash", h.Hash()) // Return already trusted light block return h, nil } @@ -508,7 +508,7 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now return fmt.Errorf("existing trusted header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) } c.logger.Info("Header has already been verified", - "height", newHeader.Height, "hash", hash2str(newHeader.Hash())) + "height", newHeader.Height, "hash", newHeader.Hash()) return nil } @@ -526,7 +526,7 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now } func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.LightBlock, now time.Time) error { - c.logger.Info("VerifyHeader", "height", newLightBlock.Height, "hash", hash2str(newLightBlock.Hash())) + c.logger.Info("VerifyHeader", "height", newLightBlock.Height, "hash", newLightBlock.Hash()) var ( verifyFunc func(ctx context.Context, trusted *types.LightBlock, new *types.LightBlock, now time.Time) error @@ -607,9 +607,9 @@ func (c *Client) verifySequential( // 2) Verify them c.logger.Debug("Verify adjacent newLightBlock against verifiedBlock", "trustedHeight", verifiedBlock.Height, - "trustedHash", hash2str(verifiedBlock.Hash()), + "trustedHash", verifiedBlock.Hash(), "newHeight", interimBlock.Height, - "newHash", hash2str(interimBlock.Hash())) + "newHash", interimBlock.Hash()) err = VerifyAdjacent(verifiedBlock.SignedHeader, interimBlock.SignedHeader, interimBlock.ValidatorSet, c.trustingPeriod, now, c.maxClockDrift) @@ -698,9 +698,9 @@ func (c *Client) verifySkipping( for { c.logger.Debug("Verify non-adjacent newHeader against verifiedBlock", "trustedHeight", verifiedBlock.Height, - "trustedHash", hash2str(verifiedBlock.Hash()), + "trustedHash", verifiedBlock.Hash(), "newHeight", blockCache[depth].Height, - "newHash", hash2str(blockCache[depth].Hash())) + "newHash", blockCache[depth].Hash()) err := Verify(verifiedBlock.SignedHeader, verifiedBlock.ValidatorSet, blockCache[depth].SignedHeader, blockCache[depth].ValidatorSet, c.trustingPeriod, now, c.maxClockDrift, c.trustLevel) @@ -920,9 +920,9 @@ func (c *Client) backwards( interimHeader = interimBlock.Header c.logger.Debug("Verify newHeader against verifiedHeader", "trustedHeight", verifiedHeader.Height, - "trustedHash", hash2str(verifiedHeader.Hash()), + "trustedHash", verifiedHeader.Hash(), "newHeight", interimHeader.Height, - "newHash", hash2str(interimHeader.Hash())) + "newHash", interimHeader.Hash()) if err := VerifyBackwards(interimHeader, verifiedHeader); err != nil { c.logger.Error("primary sent invalid header -> replacing", "err", err, "primary", c.primary) if replaceErr := c.replacePrimaryProvider(); replaceErr != nil { @@ -1033,7 +1033,3 @@ and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "wit return nil } - -func hash2str(hash []byte) string { - return fmt.Sprintf("%X", hash) -} diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 5e610ed4b..f9ef9ae23 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -776,7 +776,7 @@ func TxKey(tx types.Tx) [TxKeySize]byte { return sha256.Sum256(tx) } -// txID is the hex encoded hash of the bytes as a types.Tx. -func txID(tx []byte) string { - return fmt.Sprintf("%X", types.Tx(tx).Hash()) +// txID is a hash of the Tx. +func txID(tx []byte) []byte { + return types.Tx(tx).Hash() } diff --git a/mempool/reactor.go b/mempool/reactor.go index 4cbbe1c1f..f6ae9dc9c 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -39,7 +39,7 @@ const ( // peer information. This should eventually be replaced with a message-oriented // approach utilizing the p2p stack. type PeerManager interface { - GetHeight(p2p.NodeID) (int64, error) + GetHeight(p2p.NodeID) int64 } // Reactor implements a service that contains mempool of txs that are broadcasted @@ -357,10 +357,8 @@ func (r *Reactor) broadcastTxRoutine(peerID p2p.NodeID, closer *tmsync.Closer) { memTx := next.Value.(*mempoolTx) if r.peerMgr != nil { - height, err := r.peerMgr.GetHeight(peerID) - if err != nil { - r.Logger.Error("failed to get peer height", "err", err) - } else if height > 0 && height < memTx.Height()-1 { + height := r.peerMgr.GetHeight(peerID) + if height > 0 && height < memTx.Height()-1 { // allow for a lag of one block time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) continue diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 564dcd3b5..c878a5dca 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/fortytw2/leaktest" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -93,7 +92,13 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, logger log.Logger, chBuf uint) return rts } -func simulateRouter(wg *sync.WaitGroup, primary *reactorTestSuite, suites []*reactorTestSuite, numOut int) { +func simulateRouter( + wg *sync.WaitGroup, + primary *reactorTestSuite, + suites []*reactorTestSuite, + numOut int, +) { + wg.Add(1) // create a mapping for efficient suite lookup by peer ID @@ -160,6 +165,15 @@ func TestReactorBroadcastTxs(t *testing.T) { testSuites[i] = setup(t, config.Mempool, logger, 0) } + // ignore all peer errors + for _, suite := range testSuites { + go func(s *reactorTestSuite) { + // drop all errors on the mempool channel + for range s.mempoolPeerErrCh { + } + }(suite) + } + primary := testSuites[0] secondaries := testSuites[1:] @@ -267,6 +281,15 @@ func TestReactorNoBroadcastToSender(t *testing.T) { primary := testSuites[0] secondary := testSuites[1] + // ignore all peer errors + for _, suite := range testSuites { + go func(s *reactorTestSuite) { + // drop all errors on the mempool channel + for range s.mempoolPeerErrCh { + } + }(suite) + } + peerID := uint16(1) _ = checkTxs(t, primary.reactor.mempool, numTxs, peerID) @@ -312,6 +335,15 @@ func TestReactor_MaxTxBytes(t *testing.T) { testSuites[i] = setup(t, config.Mempool, logger, 0) } + // ignore all peer errors + for _, suite := range testSuites { + go func(s *reactorTestSuite) { + // drop all errors on the mempool channel + for range s.mempoolPeerErrCh { + } + }(suite) + } + primary := testSuites[0] secondary := testSuites[1] @@ -356,10 +388,17 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { reactor := setup(t, config.Mempool, log.TestingLogger().With("node", 0), 0) go func() { + // drop all messages on the mempool channel for range reactor.mempoolOutCh { } }() + go func() { + // drop all errors on the mempool channel + for range reactor.mempoolPeerErrCh { + } + }() + peerID, err := p2p.NewNodeID("00ffaa") require.NoError(t, err) @@ -407,8 +446,22 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { config := cfg.TestConfig() - primary := setup(t, config.Mempool, log.TestingLogger().With("node", 0), 0) - secondary := setup(t, config.Mempool, log.TestingLogger().With("node", 1), 0) + testSuites := []*reactorTestSuite{ + setup(t, config.Mempool, log.TestingLogger().With("node", 0), 0), + setup(t, config.Mempool, log.TestingLogger().With("node", 1), 0), + } + + primary := testSuites[0] + secondary := testSuites[1] + + // ignore all peer errors + for _, suite := range testSuites { + go func(s *reactorTestSuite) { + // drop all errors on the mempool channel + for range s.mempoolPeerErrCh { + } + }(suite) + } // connect peer primary.peerUpdatesCh <- p2p.PeerUpdate{ @@ -421,8 +474,4 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { Status: p2p.PeerStatusDown, PeerID: secondary.peerID, } - - // check that we are not leaking any go-routines - // i.e. broadcastTxRoutine finishes when peer is stopped - leaktest.CheckTimeout(t, 10*time.Second)() } diff --git a/node/node.go b/node/node.go index a6d4c56d2..62d0bb166 100644 --- a/node/node.go +++ b/node/node.go @@ -762,7 +762,11 @@ func NewNode(config *cfg.Config, logNodeStartupInfo(state, pubKey, logger, consensusLogger) // TODO: Fetch and provide real options and do proper p2p bootstrapping. - peerMgr := p2p.NewPeerManager(p2p.PeerManagerOptions{}) + // TODO: Use a persistent peer database. + peerMgr, err := p2p.NewPeerManager(dbm.NewMemDB(), p2p.PeerManagerOptions{}) + if err != nil { + return nil, err + } csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) mpReactorShim, mpReactor, mempool := createMempoolReactor(config, proxyApp, state, memplMetrics, peerMgr, logger) diff --git a/p2p/netaddress.go b/p2p/netaddress.go index 3b6ec6e0d..a9bd72315 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -139,8 +139,9 @@ func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { } } -// NetAddressFromProto converts a Protobuf NetAddress into a native struct. -func NetAddressFromProto(pb tmp2p.NetAddress) (*NetAddress, error) { +// NetAddressFromProto converts a Protobuf PexAddress into a native struct. +// FIXME: Remove this when legacy PEX reactor is removed. +func NetAddressFromProto(pb tmp2p.PexAddress) (*NetAddress, error) { ip := net.ParseIP(pb.IP) if ip == nil { return nil, fmt.Errorf("invalid IP address %v", pb.IP) @@ -155,8 +156,9 @@ func NetAddressFromProto(pb tmp2p.NetAddress) (*NetAddress, error) { }, nil } -// NetAddressesFromProto converts a slice of Protobuf NetAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.NetAddress) ([]*NetAddress, error) { +// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. +// FIXME: Remove this when legacy PEX reactor is removed. +func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*NetAddress, error) { nas := make([]*NetAddress, 0, len(pbs)) for _, pb := range pbs { na, err := NetAddressFromProto(pb) @@ -168,9 +170,10 @@ func NetAddressesFromProto(pbs []tmp2p.NetAddress) ([]*NetAddress, error) { return nas, nil } -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf slice. -func NetAddressesToProto(nas []*NetAddress) []tmp2p.NetAddress { - pbs := make([]tmp2p.NetAddress, 0, len(nas)) +// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. +// FIXME: Remove this when legacy PEX reactor is removed. +func NetAddressesToProto(nas []*NetAddress) []tmp2p.PexAddress { + pbs := make([]tmp2p.PexAddress, 0, len(nas)) for _, na := range nas { if na != nil { pbs = append(pbs, na.ToProto()) @@ -179,9 +182,10 @@ func NetAddressesToProto(nas []*NetAddress) []tmp2p.NetAddress { return pbs } -// ToProto converts a NetAddress to Protobuf. -func (na *NetAddress) ToProto() tmp2p.NetAddress { - return tmp2p.NetAddress{ +// ToProto converts a NetAddress to a Protobuf PexAddress. +// FIXME: Remove this when legacy PEX reactor is removed. +func (na *NetAddress) ToProto() tmp2p.PexAddress { + return tmp2p.PexAddress{ ID: string(na.ID), IP: na.IP.String(), Port: uint32(na.Port), diff --git a/p2p/peer.go b/p2p/peer.go index 8cace63c5..d0b46e9aa 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -12,82 +12,129 @@ import ( "runtime/debug" "sort" "strconv" + "strings" "sync" "time" + "github.com/gogo/protobuf/proto" + "github.com/google/orderedcode" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/libs/cmap" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmconn "github.com/tendermint/tendermint/p2p/conn" + p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" ) -// PeerAddress is a peer address URL. +// PeerAddress is a peer address URL. It differs from Endpoint in that the +// address hostname may be expanded into multiple IP addresses (thus multiple +// endpoints). +// +// If the URL is opaque, i.e. of the form "scheme:", then the opaque +// part has to contain either the node ID or a node ID and path in the form +// "scheme:@". type PeerAddress struct { - *url.URL + ID NodeID + Protocol Protocol + Hostname string + Port uint16 + Path string } -// ParsePeerAddress parses a peer address URL into a PeerAddress. -func ParsePeerAddress(address string) (PeerAddress, error) { - u, err := url.Parse(address) - if err != nil || u == nil { - return PeerAddress{}, fmt.Errorf("unable to parse peer address %q: %w", address, err) +// ParsePeerAddress parses a peer address URL into a PeerAddress, +// normalizing and validating it. +func ParsePeerAddress(urlString string) (PeerAddress, error) { + url, err := url.Parse(urlString) + if err != nil || url == nil { + return PeerAddress{}, fmt.Errorf("invalid peer address %q: %w", urlString, err) } - if u.Scheme == "" { - u.Scheme = string(defaultProtocol) - } - pa := PeerAddress{URL: u} - if err = pa.Validate(); err != nil { - return PeerAddress{}, err - } - return pa, nil -} -// NodeID returns the address node ID. -func (a PeerAddress) NodeID() NodeID { - return NodeID(a.User.Username()) + address := PeerAddress{} + + // If the URL is opaque, i.e. in the form "scheme:", we specify the + // opaque bit to be either a node ID or a node ID and path in the form + // "scheme:@". + if url.Opaque != "" { + parts := strings.Split(url.Opaque, "@") + if len(parts) > 2 { + return PeerAddress{}, fmt.Errorf("invalid address format %q, unexpected @", urlString) + } + address.ID, err = NewNodeID(parts[0]) + if err != nil { + return PeerAddress{}, fmt.Errorf("invalid peer ID %q: %w", parts[0], err) + } + if len(parts) == 2 { + address.Path = parts[1] + } + return address, nil + } + + // Otherwise, just parse a normal networked URL. + address.ID, err = NewNodeID(url.User.Username()) + if err != nil { + return PeerAddress{}, fmt.Errorf("invalid peer ID %q: %w", url.User.Username(), err) + } + + if url.Scheme != "" { + address.Protocol = Protocol(strings.ToLower(url.Scheme)) + } else { + address.Protocol = defaultProtocol + } + + address.Hostname = strings.ToLower(url.Hostname()) + + if portString := url.Port(); portString != "" { + port64, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return PeerAddress{}, fmt.Errorf("invalid port %q: %w", portString, err) + } + address.Port = uint16(port64) + } + + // NOTE: URL paths are case-sensitive, so we don't lowercase them. + address.Path = url.Path + if url.RawPath != "" { + address.Path = url.RawPath + } + if url.RawQuery != "" { + address.Path += "?" + url.RawQuery + } + if url.RawFragment != "" { + address.Path += "#" + url.RawFragment + } + if address.Path != "" && address.Path[0] != '/' && address.Path[0] != '#' { + address.Path = "/" + address.Path + } + + return address, address.Validate() } // Resolve resolves a PeerAddress into a set of Endpoints, by expanding -// out a DNS name in Host to its IP addresses. Field mapping: -// -// Scheme → Endpoint.Protocol -// Host → Endpoint.IP -// User → Endpoint.PeerID -// Port → Endpoint.Port -// Path+Query+Fragment,Opaque → Endpoint.Path -// +// out a DNS hostname to IP addresses. func (a PeerAddress) Resolve(ctx context.Context) ([]Endpoint, error) { - ips, err := net.DefaultResolver.LookupIP(ctx, "ip", a.Host) + // If there is no hostname, this is an opaque URL in the form + // "scheme:". + if a.Hostname == "" { + return []Endpoint{{ + PeerID: a.ID, + Protocol: a.Protocol, + Path: a.Path, + }}, nil + } + + ips, err := net.DefaultResolver.LookupIP(ctx, "ip", a.Hostname) if err != nil { return nil, err } - port, err := a.parsePort() - if err != nil { - return nil, err - } - - path := a.Path - if a.RawPath != "" { - path = a.RawPath - } - if a.Opaque != "" { // used for e.g. "about:blank" style URLs - path = a.Opaque - } - if a.RawQuery != "" { - path += "?" + a.RawQuery - } - if a.RawFragment != "" { - path += "#" + a.RawFragment - } - endpoints := make([]Endpoint, len(ips)) for i, ip := range ips { endpoints[i] = Endpoint{ - PeerID: a.NodeID(), - Protocol: Protocol(a.Scheme), + PeerID: a.ID, + Protocol: a.Protocol, IP: ip, - Port: port, - Path: path, + Port: a.Port, + Path: a.Path, } } return endpoints, nil @@ -95,35 +142,37 @@ func (a PeerAddress) Resolve(ctx context.Context) ([]Endpoint, error) { // Validates validates a PeerAddress. func (a PeerAddress) Validate() error { - if a.Scheme == "" { + if a.Protocol == "" { return errors.New("no protocol") } - if id := a.User.Username(); id == "" { + if a.ID == "" { return errors.New("no peer ID") - } else if err := NodeID(id).Validate(); err != nil { + } else if err := a.ID.Validate(); err != nil { return fmt.Errorf("invalid peer ID: %w", err) } - if a.Hostname() == "" && len(a.Query()) == 0 && a.Opaque == "" { - return errors.New("no host or path given") - } - if port, err := a.parsePort(); err != nil { - return err - } else if port > 0 && a.Hostname() == "" { - return errors.New("cannot specify port without host") + if a.Port > 0 && a.Hostname == "" { + return errors.New("cannot specify port without hostname") } return nil } -// parsePort returns the port number as a uint16. -func (a PeerAddress) parsePort() (uint16, error) { - if portString := a.Port(); portString != "" { - port64, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return 0, fmt.Errorf("invalid port %q: %w", portString, err) +// String formats the address as a URL string. +func (a PeerAddress) String() string { + // Handle opaque URLs. + if a.Hostname == "" { + s := fmt.Sprintf("%s:%s", a.Protocol, a.ID) + if a.Path != "" { + s += "@" + a.Path } - return uint16(port64), nil + return s } - return 0, nil + + s := fmt.Sprintf("%s://%s@%s", a.Protocol, a.ID, a.Hostname) + if a.Port > 0 { + s += ":" + strconv.Itoa(int(a.Port)) + } + s += a.Path // We've already normalized the path with appropriate prefix in ParsePeerAddress() + return s } // PeerStatus specifies peer statuses. @@ -223,6 +272,14 @@ const ( // It does not manage actual connections (this is handled by the Router), // only the peer lifecycle state. // +// We track dialing and connected states independently. This allows us to accept +// an inbound connection from a peer while the router is also dialing an +// outbound connection to that same peer, which will cause the dialer to +// eventually error when attempting to mark the peer as connected. This also +// avoids race conditions where multiple goroutines may end up dialing a peer if +// an incoming connection was briefly accepted and disconnected while we were +// also dialing. +// // For an outbound connection, the flow is as follows: // - DialNext: returns a peer address to dial, marking the peer as dialing. // - DialFailed: reports a dial failure, unmarking the peer as dialing. @@ -239,27 +296,40 @@ const ( // - Disconnected: peer disconnects, unmarking as connected and broadcasts a // PeerStatusDown peer update. // -// If we need to evict a peer, typically because we have connected to additional -// higher-scored peers and need to shed lower-scored ones, the flow is as follows: -// - EvictNext: returns a peer ID to evict, marking peer as evicting. -// - Disconnected: peer was disconnected, unmarking as connected and evicting, -// and broadcasts a PeerStatusDown peer update. +// When evicting peers, either because peers are explicitly scheduled for +// eviction or we are connected to too many peers, the flow is as follows: +// - EvictNext: if marked evict and connected, unmark evict and mark evicting. +// If beyond MaxConnected, pick lowest-scored peer and mark evicting. +// - Disconnected: unmark connected, evicting, evict, and broadcast a +// PeerStatusDown peer update. // -// We track dialing and connected states independently. This allows us to accept -// an inbound connection from a peer while the router is also dialing an -// outbound connection to that same peer, which will cause the dialer to -// eventually error (when attempting to mark the peer as connected). This also -// avoids race conditions where multiple goroutines may end up dialing a peer if -// an incoming connection was briefly accepted and disconnected while we were -// also dialing. +// If all connection slots are full (at MaxConnections), we can use up to +// MaxConnectionsUpgrade additional connections to probe any higher-scored +// unconnected peers, and if we reach them (or they reach us) we allow the +// connection and evict a lower-scored peer. We mark the lower-scored peer as +// upgrading[from]=to to make sure no other higher-scored peers can claim the +// same one for an upgrade. The flow is as follows: +// - Accepted: if upgrade is possible, mark connected and add lower-scored to evict. +// - DialNext: if upgrade is possible, mark upgrading[from]=to and dialing. +// - DialFailed: unmark upgrading[from]=to and dialing. +// - Dialed: unmark upgrading[from]=to and dialing, mark as connected, add +// lower-scored to evict. +// - EvictNext: pick peer from evict, mark as evicting. +// - Disconnected: unmark connected, upgrading[from]=to, evict, evicting. type PeerManager struct { - options PeerManagerOptions + options PeerManagerOptions + wakeDialCh chan struct{} // wakes up DialNext() on relevant peer changes + wakeEvictCh chan struct{} // wakes up EvictNext() on relevant peer changes + closeCh chan struct{} // signal channel for Close() + closeOnce sync.Once mtx sync.Mutex store *peerStore - dialing map[NodeID]bool - connected map[NodeID]bool - evicting map[NodeID]bool + dialing map[NodeID]bool // peers being dialed (DialNext -> Dialed/DialFail) + upgrading map[NodeID]NodeID // peers claimed for upgrade (DialNext -> Dialed/DialFail) + connected map[NodeID]bool // connected peers (Dialed/Accepted -> Disconnected) + evict map[NodeID]bool // peers scheduled for eviction (Connected -> EvictNext) + evicting map[NodeID]bool // peers being evicted (EvictNext -> Disconnected) subscriptions map[*PeerUpdatesCh]*PeerUpdatesCh // keyed by struct identity (address) } @@ -271,6 +341,11 @@ type PeerManagerOptions struct { // necessary to make room for these. PersistentPeers []NodeID + // MaxPeers is the maximum number of peers to track information about, i.e. + // store in the peer store. When exceeded, the lowest-scored unconnected peers + // will be deleted. 0 means no limit. + MaxPeers uint16 + // MaxConnected is the maximum number of connected peers (inbound and // outbound). 0 means no limit. MaxConnected uint16 @@ -305,31 +380,91 @@ type PeerManagerOptions struct { RetryTimeJitter time.Duration } -// isPersistent is a convenience function that checks if the given peer ID -// is contained in PersistentPeers. It just uses a linear search, since -// PersistentPeers is expected to be small. -func (o PeerManagerOptions) isPersistent(id NodeID) bool { - for _, p := range o.PersistentPeers { - if id == p { - return true - } - } - return false -} - // NewPeerManager creates a new peer manager. -func NewPeerManager(options PeerManagerOptions) *PeerManager { - return &PeerManager{ +func NewPeerManager(peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) { + store, err := newPeerStore(peerDB) + if err != nil { + return nil, err + } + peerManager := &PeerManager{ options: options, - // FIXME: Once the store persists data, we need to update existing - // peers in the store with any new information, e.g. changes to - // PersistentPeers configuration. - store: newPeerStore(), + closeCh: make(chan struct{}), + + // We use a buffer of size 1 for these trigger channels, with + // non-blocking sends. This ensures that if e.g. wakeDial() is called + // multiple times before the initial trigger is picked up we only + // process the trigger once. + // + // FIXME: This should maybe be a libs/sync type. + wakeDialCh: make(chan struct{}, 1), + wakeEvictCh: make(chan struct{}, 1), + + store: store, dialing: map[NodeID]bool{}, + upgrading: map[NodeID]NodeID{}, connected: map[NodeID]bool{}, + evict: map[NodeID]bool{}, evicting: map[NodeID]bool{}, subscriptions: map[*PeerUpdatesCh]*PeerUpdatesCh{}, } + if err = peerManager.configurePeers(); err != nil { + return nil, err + } + if err = peerManager.prunePeers(); err != nil { + return nil, err + } + return peerManager, nil +} + +// configurePeers configures peers in the peer store with ephemeral runtime +// configuration, e.g. setting peerInfo.Persistent based on +// PeerManagerOptions.PersistentPeers. The caller must hold the mutex lock. +func (m *PeerManager) configurePeers() error { + for _, peerID := range m.options.PersistentPeers { + if peer, ok := m.store.Get(peerID); ok { + peer.Persistent = true + if err := m.store.Set(peer); err != nil { + return err + } + } + } + return nil +} + +// prunePeers removes peers from the peer store if it contains more than +// MaxPeers peers. The lowest-scored non-connected peers are removed. +// The caller must hold the mutex lock. +func (m *PeerManager) prunePeers() error { + if m.options.MaxPeers == 0 || m.store.Size() <= int(m.options.MaxPeers) { + return nil + } + m.mtx.Lock() + defer m.mtx.Unlock() + + ranked := m.store.Ranked() + for i := len(ranked) - 1; i >= 0; i-- { + peerID := ranked[i].ID + switch { + case m.store.Size() <= int(m.options.MaxPeers): + break + case m.dialing[peerID]: + case m.connected[peerID]: + case m.evicting[peerID]: + default: + if err := m.store.Delete(peerID); err != nil { + return err + } + } + } + return nil +} + +// Close closes the peer manager, releasing resources allocated with it +// (specifically any running goroutines). +func (m *PeerManager) Close() { + m.closeOnce.Do(func() { + close(m.closeCh) + }) } // Add adds a peer to the manager, given as an address. If the peer already @@ -341,18 +476,60 @@ func (m *PeerManager) Add(address PeerAddress) error { m.mtx.Lock() defer m.mtx.Unlock() - peer, err := m.store.Get(address.NodeID()) - if err != nil { + peer, ok := m.store.Get(address.ID) + if !ok { + peer = m.makePeerInfo(address.ID) + } + if _, ok := peer.AddressInfo[address.String()]; !ok { + peer.AddressInfo[address.String()] = &peerAddressInfo{Address: address} + } + if err := m.store.Set(peer); err != nil { return err } - if peer == nil { - peer = &peerInfo{ - ID: address.NodeID(), - Persistent: m.options.isPersistent(address.NodeID()), + if err := m.prunePeers(); err != nil { + return err + } + m.wakeDial() + return nil +} + +// Advertise returns a list of peer addresses to advertise to a peer. +// +// FIXME: This is fairly naïve and only returns the addresses of the +// highest-ranked peers. +func (m *PeerManager) Advertise(peerID NodeID, limit uint16) []PeerAddress { + m.mtx.Lock() + defer m.mtx.Unlock() + + addresses := make([]PeerAddress, 0, limit) + for _, peer := range m.store.Ranked() { + if peer.ID == peerID { + continue + } + for _, addressInfo := range peer.AddressInfo { + if len(addresses) >= int(limit) { + return addresses + } + addresses = append(addresses, addressInfo.Address) } } - peer.AddAddress(address) - return m.store.Set(peer) + return addresses +} + +// makePeerInfo creates a peerInfo for a new peer. +func (m *PeerManager) makePeerInfo(id NodeID) peerInfo { + isPersistent := false + for _, p := range m.options.PersistentPeers { + if id == p { + isPersistent = true + break + } + } + return peerInfo{ + ID: id, + Persistent: isPersistent, + AddressInfo: map[string]*peerAddressInfo{}, + } } // Subscribe subscribes to peer updates. The caller must consume the peer @@ -398,58 +575,60 @@ func (m *PeerManager) broadcast(peerUpdate PeerUpdate) { } // DialNext finds an appropriate peer address to dial, and marks it as dialing. -// The peer will not be returned again until Dialed() or DialFailed() is called -// for the peer and it is no longer connected. Returns an empty ID if no -// appropriate peers are available, or if all connection slots are full. -// -// We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including -// MaxConnectedUpgrade allows us to dial additional peers beyond MaxConnected if -// they have a higher score than any other connected or dialing peer. If we are -// successful in dialing, and thus have more than MaxConnected connected peers, -// the lower-scored peer will be evicted via EvictNext(). -func (m *PeerManager) DialNext() (NodeID, PeerAddress, error) { +// If no peer is found, or all connection slots are full, it blocks until one +// becomes available. The caller must call Dialed() or DialFailed() for the +// returned peer. The context can be used to cancel the call. +func (m *PeerManager) DialNext(ctx context.Context) (NodeID, PeerAddress, error) { + for { + id, address, err := m.TryDialNext() + if err != nil || id != "" { + return id, address, err + } + select { + case <-m.wakeDialCh: + case <-ctx.Done(): + return "", PeerAddress{}, ctx.Err() + } + } +} + +// TryDialNext is equivalent to DialNext(), but immediately returns an empty +// peer ID if no peers or connection slots are available. +func (m *PeerManager) TryDialNext() (NodeID, PeerAddress, error) { m.mtx.Lock() defer m.mtx.Unlock() + // We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including + // MaxConnectedUpgrade allows us to probe additional peers that have a + // higher score than any other peers, and if successful evict it. if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) { return "", PeerAddress{}, nil } - ranked, err := m.store.Ranked() - if err != nil { - return "", PeerAddress{}, err - } - for _, peer := range ranked { + for _, peer := range m.store.Ranked() { if m.dialing[peer.ID] || m.connected[peer.ID] { continue } for _, addressInfo := range peer.AddressInfo { - if time.Since(addressInfo.LastDialFailure) < m.retryDelay(peer, addressInfo.DialFailures) { + if time.Since(addressInfo.LastDialFailure) < m.retryDelay(addressInfo.DialFailures, peer.Persistent) { continue } - // At this point we have an eligible address to dial. If we're full - // but have peer upgrade capacity (as checked above), we need to - // make sure there exists an evictable peer of a lower score that we - // can replace. If so, we can go ahead and dial this peer, and - // EvictNext() will evict a lower-scored one later. + // We now have an eligible address to dial. If we're full but have + // upgrade capacity (as checked above), we find a lower-scored peer + // we can replace and mark it as upgrading so noone else claims it. // // If we don't find one, there is no point in trying additional // peers, since they will all have the same or lower score than this // peer (since they're ordered by score via peerStore.Ranked). - // - // FIXME: There is a race condition here where, if there exists a - // single lower-scored peer, we may end up dialing multiple - // higher-scored new peers that all expect the same lower-scored - // peer to be evicted, causing us to take on too many peers. We may - // need to reserve the eviction for this specific peer such that - // others can't claim it. - if m.options.MaxConnected > 0 && - len(m.connected) >= int(m.options.MaxConnected) && - !m.peerIsUpgrade(peer, ranked) { - return "", PeerAddress{}, nil + if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { + upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score()) + if upgradeFromPeer == "" { + return "", PeerAddress{}, nil + } + m.upgrading[upgradeFromPeer] = peer.ID } m.dialing[peer.ID] = true @@ -459,10 +638,33 @@ func (m *PeerManager) DialNext() (NodeID, PeerAddress, error) { return "", PeerAddress{}, nil } +// wakeDial is used to notify DialNext about changes that *may* cause new +// peers to become eligible for dialing, such as peer disconnections and +// retry timeouts. +func (m *PeerManager) wakeDial() { + // The channel has a 1-size buffer. A non-blocking send ensures + // we only queue up at most 1 trigger between each DialNext(). + select { + case m.wakeDialCh <- struct{}{}: + default: + } +} + +// wakeEvict is used to notify EvictNext about changes that *may* cause +// peers to become eligible for eviction, such as peer upgrades. +func (m *PeerManager) wakeEvict() { + // The channel has a 1-size buffer. A non-blocking send ensures + // we only queue up at most 1 trigger between each EvictNext(). + select { + case m.wakeEvictCh <- struct{}{}: + default: + } +} + // retryDelay calculates a dial retry delay using exponential backoff, based on // retry settings in PeerManagerOptions. If MinRetryTime is 0, this returns // MaxInt64 (i.e. an infinite retry delay, effectively disabling retries). -func (m *PeerManager) retryDelay(peer *peerInfo, failures uint32) time.Duration { +func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration { if failures == 0 { return 0 } @@ -470,7 +672,7 @@ func (m *PeerManager) retryDelay(peer *peerInfo, failures uint32) time.Duration return time.Duration(math.MaxInt64) } maxDelay := m.options.MaxRetryTime - if peer.Persistent && m.options.MaxRetryTimePersistent > 0 { + if persistent && m.options.MaxRetryTimePersistent > 0 { maxDelay = m.options.MaxRetryTimePersistent } @@ -492,16 +694,45 @@ func (m *PeerManager) DialFailed(peerID NodeID, address PeerAddress) error { defer m.mtx.Unlock() delete(m.dialing, peerID) + for from, to := range m.upgrading { + if to == peerID { + delete(m.upgrading, from) // Unmark failed upgrade attempt. + } + } - peer, err := m.store.Get(peerID) - if err != nil || peer == nil { // Peer may have been removed while dialing, ignore. + peer, ok := m.store.Get(peerID) + if !ok { // Peer may have been removed while dialing, ignore. + return nil + } + addressInfo, ok := peer.AddressInfo[address.String()] + if !ok { + return nil // Assume the address has been removed, ignore. + } + addressInfo.LastDialFailure = time.Now().UTC() + addressInfo.DialFailures++ + if err := m.store.Set(peer); err != nil { return err } - if addressInfo := peer.LookupAddressInfo(address); addressInfo != nil { - addressInfo.LastDialFailure = time.Now().UTC() - addressInfo.DialFailures++ - return m.store.Set(peer) - } + + // We spawn a goroutine that notifies DialNext() again when the retry + // timeout has elapsed, so that we can consider dialing it again. + go func() { + retryDelay := m.retryDelay(addressInfo.DialFailures, peer.Persistent) + if retryDelay == time.Duration(math.MaxInt64) { + return + } + // Use an explicit timer with deferred cleanup instead of + // time.After(), to avoid leaking goroutines on PeerManager.Close(). + timer := time.NewTimer(retryDelay) + defer timer.Stop() + select { + case <-timer.C: + m.wakeDial() + case <-m.closeCh: + } + }() + + m.wakeDial() return nil } @@ -513,6 +744,16 @@ func (m *PeerManager) Dialed(peerID NodeID, address PeerAddress) error { delete(m.dialing, peerID) + var upgradeFromPeer NodeID + for from, to := range m.upgrading { + if to == peerID { + delete(m.upgrading, from) + upgradeFromPeer = from + // Don't break, just in case this peer was marked as upgrading for + // multiple lower-scored peers (shouldn't really happen). + } + } + if m.connected[peerID] { return fmt.Errorf("peer %v is already connected", peerID) } @@ -521,35 +762,55 @@ func (m *PeerManager) Dialed(peerID NodeID, address PeerAddress) error { return fmt.Errorf("already connected to maximum number of peers") } - peer, err := m.store.Get(peerID) - if err != nil { - return err - } else if peer == nil { + peer, ok := m.store.Get(peerID) + if !ok { return fmt.Errorf("peer %q was removed while dialing", peerID) } - m.connected[peerID] = true - now := time.Now().UTC() peer.LastConnected = now - if addressInfo := peer.LookupAddressInfo(address); addressInfo != nil { + if addressInfo, ok := peer.AddressInfo[address.String()]; ok { addressInfo.DialFailures = 0 addressInfo.LastDialSuccess = now + // If not found, assume address has been removed. } - return m.store.Set(peer) + if err := m.store.Set(peer); err != nil { + return err + } + + if upgradeFromPeer != "" && m.options.MaxConnected > 0 && + len(m.connected) >= int(m.options.MaxConnected) { + // Look for an even lower-scored peer that may have appeared + // since we started the upgrade. + if p, ok := m.store.Get(upgradeFromPeer); ok { + if u := m.findUpgradeCandidate(p.ID, p.Score()); u != "" { + upgradeFromPeer = u + } + } + m.evict[upgradeFromPeer] = true + } + m.connected[peerID] = true + m.wakeEvict() + + return nil } // Accepted marks an incoming peer connection successfully accepted. If the peer // is already connected or we don't allow additional connections then this will // return an error. // -// If MaxConnectedUpgrade is non-zero, the accepted peer is better-scored than any -// other connected peer, and the number of connections does not exceed -// MaxConnected + MaxConnectedUpgrade then we accept the connection and rely on -// EvictNext() to evict lower-scored peers. +// If full but MaxConnectedUpgrade is non-zero and the incoming peer is +// better-scored than any existing peers, then we accept it and evict a +// lower-scored peer. // // NOTE: We can't take an address here, since e.g. TCP uses a different port // number for outbound traffic than inbound traffic, so the peer's endpoint // wouldn't necessarily be an appropriate address to dial. +// +// FIXME: When we accept a connection from a peer, we should register that +// peer's address in the peer store so that we can dial it later. In order to do +// that, we'll need to get the remote address after all, but as noted above that +// can't be the remote endpoint since that will usually have the wrong port +// number. func (m *PeerManager) Accepted(peerID NodeID) error { m.mtx.Lock() defer m.mtx.Unlock() @@ -562,40 +823,33 @@ func (m *PeerManager) Accepted(peerID NodeID) error { return fmt.Errorf("already connected to maximum number of peers") } - peer, err := m.store.Get(peerID) - if err != nil { - return err - } - if peer == nil { - peer = &peerInfo{ - ID: peerID, - Persistent: m.options.isPersistent(peerID), - } + peer, ok := m.store.Get(peerID) + if !ok { + peer = m.makePeerInfo(peerID) } - // If we're already full (i.e. at MaxConnected), but we allow upgrades (and we - // know from the check above that we have upgrade capacity), then we can look - // for a lower-scored evictable peer, and if found we can accept this connection - // anyway and let EvictNext() evict the lower-scored peer for us. - // - // FIXME: There is a race condition here where, if there exists a single - // lower-scored peer, we may end up accepting multiple higher-scored new - // peers that all expect the same lower-scored peer to be evicted, causing - // us to take on too many peers. We may need to reserve the eviction for - // this specific peer such that others can't claim it. + // If all connections slots are full, but we allow upgrades (and we checked + // above that we have upgrade capacity), then we can look for a lower-scored + // peer to replace and if found accept the connection anyway and evict it. + var upgradeFromPeer NodeID if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { - ranked, err := m.store.Ranked() - if err != nil { - return err - } - if !m.peerIsUpgrade(peer, ranked) { + upgradeFromPeer = m.findUpgradeCandidate(peer.ID, peer.Score()) + if upgradeFromPeer == "" { return fmt.Errorf("already connected to maximum number of peers") } } - m.connected[peerID] = true peer.LastConnected = time.Now().UTC() - return m.store.Set(peer) + if err := m.store.Set(peer); err != nil { + return err + } + + m.connected[peerID] = true + if upgradeFromPeer != "" { + m.evict[upgradeFromPeer] = true + } + m.wakeEvict() + return nil } // Ready marks a peer as ready, broadcasting status updates to subscribers. The @@ -606,8 +860,7 @@ func (m *PeerManager) Ready(peerID NodeID) { m.mtx.Lock() defer m.mtx.Unlock() - connected := m.connected[peerID] - if connected { + if m.connected[peerID] { m.broadcast(PeerUpdate{ PeerID: peerID, Status: PeerStatusUp, @@ -622,30 +875,58 @@ func (m *PeerManager) Disconnected(peerID NodeID) error { defer m.mtx.Unlock() delete(m.connected, peerID) + delete(m.upgrading, peerID) + delete(m.evict, peerID) delete(m.evicting, peerID) m.broadcast(PeerUpdate{ PeerID: peerID, Status: PeerStatusDown, }) + m.wakeDial() return nil } -// EvictNext returns the next peer to evict (i.e. disconnect), or an empty ID if -// no peers should be evicted. The evicted peer will be a lowest-scored peer -// that is currently connected and not already being evicted. -func (m *PeerManager) EvictNext() (NodeID, error) { +// EvictNext returns the next peer to evict (i.e. disconnect). If no evictable +// peers are found, the call will block until one becomes available or the +// context is cancelled. +func (m *PeerManager) EvictNext(ctx context.Context) (NodeID, error) { + for { + id, err := m.TryEvictNext() + if err != nil || id != "" { + return id, err + } + select { + case <-m.wakeEvictCh: + case <-ctx.Done(): + return "", ctx.Err() + } + } +} + +// TryEvictNext is equivalent to EvictNext, but immediately returns an empty +// node ID if no evictable peers are found. +func (m *PeerManager) TryEvictNext() (NodeID, error) { m.mtx.Lock() defer m.mtx.Unlock() + // If any connected peers are explicitly scheduled for eviction, we return a + // random one. + for peerID := range m.evict { + delete(m.evict, peerID) + if m.connected[peerID] && !m.evicting[peerID] { + m.evicting[peerID] = true + return peerID, nil + } + } + + // If we're below capacity, we don't need to evict anything. if m.options.MaxConnected == 0 || len(m.connected)-len(m.evicting) <= int(m.options.MaxConnected) { return "", nil } - ranked, err := m.store.Ranked() - if err != nil { - return "", err - } + // If we're above capacity, just pick the lowest-ranked peer to evict. + ranked := m.store.Ranked() for i := len(ranked) - 1; i >= 0; i-- { peer := ranked[i] if m.connected[peer.ID] && !m.evicting[peer.ID] { @@ -653,24 +934,29 @@ func (m *PeerManager) EvictNext() (NodeID, error) { return peer.ID, nil } } + return "", nil } -// peerIsUpgrade checks whether connecting to a given peer would be an -// upgrade, i.e. that there exists a lower-scored peer that is already -// connected and not scheduled for eviction, such that connecting to -// the peer would cause a lower-scored peer to be evicted if we're full. -func (m *PeerManager) peerIsUpgrade(peer *peerInfo, ranked []*peerInfo) bool { +// findUpgradeCandidate looks for a lower-scored peer that we could evict +// to make room for the given peer. Returns an empty ID if none is found. +// The caller must hold the mutex lock. +func (m *PeerManager) findUpgradeCandidate(id NodeID, score PeerScore) NodeID { + ranked := m.store.Ranked() for i := len(ranked) - 1; i >= 0; i-- { candidate := ranked[i] - if candidate.Score() >= peer.Score() { - return false - } - if m.connected[candidate.ID] && !m.evicting[candidate.ID] { - return true + switch { + case candidate.Score() >= score: + return "" // no further peers can be scored lower, due to sorting + case !m.connected[candidate.ID]: + case m.evict[candidate.ID]: + case m.evicting[candidate.ID]: + case m.upgrading[candidate.ID] != "": + default: + return candidate.ID } } - return false + return "" } // GetHeight returns a peer's height, as reported via SetHeight. If the peer @@ -681,15 +967,12 @@ func (m *PeerManager) peerIsUpgrade(peer *peerInfo, ranked []*peerInfo) bool { // consensus and mempool reactors. These dependencies should be removed from the // reactors, and instead query this information independently via new P2P // protocol additions. -func (m *PeerManager) GetHeight(peerID NodeID) (int64, error) { +func (m *PeerManager) GetHeight(peerID NodeID) int64 { m.mtx.Lock() defer m.mtx.Unlock() - peer, err := m.store.Get(peerID) - if err != nil || peer == nil { - return 0, err - } - return peer.Height, nil + peer, _ := m.store.Get(peerID) + return peer.Height } // SetHeight stores a peer's height, making it available via GetHeight. If the @@ -704,117 +987,226 @@ func (m *PeerManager) SetHeight(peerID NodeID, height int64) error { m.mtx.Lock() defer m.mtx.Unlock() - peer, err := m.store.Get(peerID) - if err != nil { - return err - } - if peer == nil { - peer = &peerInfo{ - ID: peerID, - Persistent: m.options.isPersistent(peerID), - } + peer, ok := m.store.Get(peerID) + if !ok { + peer = m.makePeerInfo(peerID) } peer.Height = height return m.store.Set(peer) } -// peerStore stores information about peers. It is currently a bare-bones -// in-memory store, and will be fleshed out later. +// peerStore stores information about peers. It is not thread-safe, assuming +// it is used only by PeerManager which handles concurrency control, allowing +// it to execute multiple operations atomically via its own mutex. // -// peerStore is not thread-safe, since it assumes it is only used by PeerManager -// which handles concurrency control. This allows the manager to execute multiple -// operations atomically while it holds the mutex. +// The entire set of peers is kept in memory, for performance. It is loaded +// from disk on initialization, and any changes are written back to disk +// (without fsync, since we can afford to lose recent writes). type peerStore struct { - peers map[NodeID]peerInfo + db dbm.DB + peers map[NodeID]*peerInfo + ranked []*peerInfo // cache for Ranked(), nil invalidates cache } -// newPeerStore creates a new peer store. -func newPeerStore() *peerStore { - return &peerStore{ - peers: map[NodeID]peerInfo{}, +// newPeerStore creates a new peer store, loading all persisted peers from the +// database into memory. +func newPeerStore(db dbm.DB) (*peerStore, error) { + store := &peerStore{ + db: db, } + if err := store.loadPeers(); err != nil { + return nil, err + } + return store, nil } -// Get fetches a peer, returning nil if not found. -func (s *peerStore) Get(id NodeID) (*peerInfo, error) { - peer, ok := s.peers[id] - if !ok { - return nil, nil - } - return &peer, nil -} +// loadPeers loads all peers from the database into memory. +func (s *peerStore) loadPeers() error { + peers := make(map[NodeID]*peerInfo) -// Set stores peer data. -func (s *peerStore) Set(peer *peerInfo) error { - if peer == nil { - return errors.New("peer cannot be nil") + start, end := keyPeerInfoRange() + iter, err := s.db.Iterator(start, end) + if err != nil { + return err } - s.peers[peer.ID] = *peer + defer iter.Close() + for ; iter.Valid(); iter.Next() { + // FIXME: We may want to tolerate failures here, by simply logging + // the errors and ignoring the faulty peer entries. + msg := new(p2pproto.PeerInfo) + if err := proto.Unmarshal(iter.Value(), msg); err != nil { + return fmt.Errorf("invalid peer Protobuf data: %w", err) + } + peer, err := peerInfoFromProto(msg) + if err != nil { + return fmt.Errorf("invalid peer data: %w", err) + } + peers[peer.ID] = peer + } + if iter.Error() != nil { + return iter.Error() + } + s.peers = peers + s.ranked = nil // invalidate cache if populated return nil } -// List retrieves all peers. -func (s *peerStore) List() ([]*peerInfo, error) { - peers := []*peerInfo{} - for _, peer := range s.peers { - peer := peer - peers = append(peers, &peer) - } - return peers, nil +// Get fetches a peer. The boolean indicates whether the peer existed or not. +// The returned peer info is a copy, and can be mutated at will. +func (s *peerStore) Get(id NodeID) (peerInfo, bool) { + peer, ok := s.peers[id] + return peer.Copy(), ok } -// Ranked returns a list of peers ordered by score (better peers first). -// Peers with equal scores are returned in an arbitrary order. -// -// This is used to determine which peers to connect to and which peers to evict -// in order to make room for better peers. -// -// FIXME: For now, we simply generate the list on every call, but this can get -// expensive since it's called fairly frequently. We may want to either cache -// this, or store peers in a data structure that maintains order (e.g. a heap or -// ordered map). -func (s *peerStore) Ranked() ([]*peerInfo, error) { - peers, err := s.List() - if err != nil { - return nil, err +// Set stores peer data. The input data will be copied, and can safely be reused +// by the caller. +func (s *peerStore) Set(peer peerInfo) error { + if err := peer.Validate(); err != nil { + return err } - sort.Slice(peers, func(i, j int) bool { + peer = peer.Copy() + + // FIXME: We may want to optimize this by avoiding saving to the database + // if there haven't been any changes to persisted fields. + bz, err := peer.ToProto().Marshal() + if err != nil { + return err + } + if err = s.db.Set(keyPeerInfo(peer.ID), bz); err != nil { + return err + } + + if current, ok := s.peers[peer.ID]; !ok || current.Score() != peer.Score() { + // If the peer is new, or its score changes, we invalidate the Ranked() cache. + s.peers[peer.ID] = &peer + s.ranked = nil + } else { + // Otherwise, since s.ranked contains pointers to the old data and we + // want those pointers to remain valid with the new data, we have to + // update the existing pointer address. + *current = peer + } + + return nil +} + +// Delete deletes a peer, or does nothing if it does not exist. +func (s *peerStore) Delete(id NodeID) error { + if _, ok := s.peers[id]; !ok { + return nil + } + if err := s.db.Delete(keyPeerInfo(id)); err != nil { + return err + } + delete(s.peers, id) + s.ranked = nil + return nil +} + +// List retrieves all peers in an arbitrary order. The returned data is a copy, +// and can be mutated at will. +func (s *peerStore) List() []peerInfo { + peers := make([]peerInfo, 0, len(s.peers)) + for _, peer := range s.peers { + peers = append(peers, peer.Copy()) + } + return peers +} + +// Ranked returns a list of peers ordered by score (better peers first). Peers +// with equal scores are returned in an arbitrary order. The returned list must +// not be mutated or accessed concurrently by the caller, since it returns +// pointers to internal peerStore data for performance. +// +// Ranked is used to determine both which peers to dial, which ones to evict, +// and which ones to delete completely. +// +// FIXME: For now, we simply maintain a cache in s.ranked which is invalidated +// by setting it to nil, but if necessary we should use a better data structure +// for this (e.g. a heap or ordered map). +// +// FIXME: The scoring logic is currently very naïve, see peerInfo.Score(). +func (s *peerStore) Ranked() []*peerInfo { + if s.ranked != nil { + return s.ranked + } + s.ranked = make([]*peerInfo, 0, len(s.peers)) + for _, peer := range s.peers { + s.ranked = append(s.ranked, peer) + } + sort.Slice(s.ranked, func(i, j int) bool { // FIXME: If necessary, consider precomputing scores before sorting, // to reduce the number of Score() calls. - return peers[i].Score() > peers[j].Score() + return s.ranked[i].Score() > s.ranked[j].Score() }) - return peers, nil + return s.ranked +} + +// Size returns the number of peers in the peer store. +func (s *peerStore) Size() int { + return len(s.peers) } // peerInfo contains peer information stored in a peerStore. type peerInfo struct { ID NodeID - AddressInfo []*addressInfo - Persistent bool - Height int64 + AddressInfo map[string]*peerAddressInfo LastConnected time.Time + + // These fields are ephemeral, i.e. not persisted to the database. + Persistent bool + Height int64 } -// AddAddress adds an address to a peer, unless it already exists. It does not -// validate the address. Returns true if the address was new. -func (p *peerInfo) AddAddress(address PeerAddress) bool { - if p.LookupAddressInfo(address) != nil { - return false +// peerInfoFromProto converts a Protobuf PeerInfo message to a peerInfo, +// erroring if the data is invalid. +func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) { + p := &peerInfo{ + ID: NodeID(msg.ID), + AddressInfo: map[string]*peerAddressInfo{}, } - p.AddressInfo = append(p.AddressInfo, &addressInfo{Address: address}) - return true -} - -// LookupAddressInfo returns address info for an address, or nil if unknown. -func (p *peerInfo) LookupAddressInfo(address PeerAddress) *addressInfo { - // We just do a linear search for now. - addressString := address.String() - for _, info := range p.AddressInfo { - if info.Address.String() == addressString { - return info + if msg.LastConnected != nil { + p.LastConnected = *msg.LastConnected + } + for _, addr := range msg.AddressInfo { + addressInfo, err := peerAddressInfoFromProto(addr) + if err != nil { + return nil, err } + p.AddressInfo[addressInfo.Address.String()] = addressInfo } - return nil + return p, p.Validate() +} + +// ToProto converts the peerInfo to p2pproto.PeerInfo for database storage. The +// Protobuf type only contains persisted fields, while ephemeral fields are +// discarded. The returned message may contain pointers to original data, since +// it is expected to be serialized immediately. +func (p *peerInfo) ToProto() *p2pproto.PeerInfo { + msg := &p2pproto.PeerInfo{ + ID: string(p.ID), + LastConnected: &p.LastConnected, + } + for _, addressInfo := range p.AddressInfo { + msg.AddressInfo = append(msg.AddressInfo, addressInfo.ToProto()) + } + if msg.LastConnected.IsZero() { + msg.LastConnected = nil + } + return msg +} + +// Copy returns a deep copy of the peer info. +func (p *peerInfo) Copy() peerInfo { + if p == nil { + return peerInfo{} + } + c := *p + for i, addressInfo := range c.AddressInfo { + addressInfoCopy := addressInfo.Copy() + c.AddressInfo[i] = &addressInfoCopy + } + return c } // Score calculates a score for the peer. Higher-scored peers will be @@ -827,14 +1219,96 @@ func (p *peerInfo) Score() PeerScore { return score } -// addressInfo contains information and statistics about an address. -type addressInfo struct { +// Validate validates the peer info. +func (p *peerInfo) Validate() error { + if p.ID == "" { + return errors.New("no peer ID") + } + return nil +} + +// peerAddressInfo contains information and statistics about a peer address. +type peerAddressInfo struct { Address PeerAddress LastDialSuccess time.Time LastDialFailure time.Time DialFailures uint32 // since last successful dial } +// peerAddressInfoFromProto converts a Protobuf PeerAddressInfo message +// to a peerAddressInfo. +func peerAddressInfoFromProto(msg *p2pproto.PeerAddressInfo) (*peerAddressInfo, error) { + address, err := ParsePeerAddress(msg.Address) + if err != nil { + return nil, fmt.Errorf("invalid address %q: %w", address, err) + } + addressInfo := &peerAddressInfo{ + Address: address, + DialFailures: msg.DialFailures, + } + if msg.LastDialSuccess != nil { + addressInfo.LastDialSuccess = *msg.LastDialSuccess + } + if msg.LastDialFailure != nil { + addressInfo.LastDialFailure = *msg.LastDialFailure + } + return addressInfo, addressInfo.Validate() +} + +// ToProto converts the address into to a Protobuf message for serialization. +func (a *peerAddressInfo) ToProto() *p2pproto.PeerAddressInfo { + msg := &p2pproto.PeerAddressInfo{ + Address: a.Address.String(), + LastDialSuccess: &a.LastDialSuccess, + LastDialFailure: &a.LastDialFailure, + DialFailures: a.DialFailures, + } + if msg.LastDialSuccess.IsZero() { + msg.LastDialSuccess = nil + } + if msg.LastDialFailure.IsZero() { + msg.LastDialFailure = nil + } + return msg +} + +// Copy returns a copy of the address info. +func (a *peerAddressInfo) Copy() peerAddressInfo { + return *a +} + +// Validate validates the address info. +func (a *peerAddressInfo) Validate() error { + return a.Address.Validate() +} + +// These are database key prefixes. +const ( + prefixPeerInfo int64 = 1 +) + +// keyPeerInfo generates a peerInfo database key. +func keyPeerInfo(id NodeID) []byte { + key, err := orderedcode.Append(nil, prefixPeerInfo, string(id)) + if err != nil { + panic(err) + } + return key +} + +// keyPeerInfoPrefix generates start/end keys for the entire peerInfo key range. +func keyPeerInfoRange() ([]byte, []byte) { + start, err := orderedcode.Append(nil, prefixPeerInfo, "") + if err != nil { + panic(err) + } + end, err := orderedcode.Append(nil, prefixPeerInfo, orderedcode.Infinity) + if err != nil { + panic(err) + } + return start, end +} + // ============================================================================ // Types and business logic below may be deprecated. // diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 20b1c848b..b241f44ee 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -285,9 +285,9 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { r.SendAddrs(src, r.book.GetSelection()) } - case *tmp2p.PexAddrs: + case *tmp2p.PexResponse: // If we asked for addresses, add them to the book - addrs, err := p2p.NetAddressesFromProto(msg.Addrs) + addrs, err := p2p.NetAddressesFromProto(msg.Addresses) if err != nil { r.Switch.StopPeerForError(src, err) r.book.MarkBad(src.SocketAddr(), defaultBanTime) @@ -409,7 +409,7 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // SendAddrs sends addrs to the peer. func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)})) + p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: p2p.NetAddressesToProto(netAddrs)})) } // SetEnsurePeersPeriod sets period to ensure peers connected. @@ -773,12 +773,12 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { // mustEncode proto encodes a tmp2p.Message func mustEncode(pb proto.Message) []byte { - msg := tmp2p.Message{} + msg := tmp2p.PexMessage{} switch pb := pb.(type) { case *tmp2p.PexRequest: - msg.Sum = &tmp2p.Message_PexRequest{PexRequest: pb} - case *tmp2p.PexAddrs: - msg.Sum = &tmp2p.Message_PexAddrs{PexAddrs: pb} + msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb} + case *tmp2p.PexResponse: + msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb} default: panic(fmt.Sprintf("Unknown message type %T", pb)) } @@ -791,7 +791,7 @@ func mustEncode(pb proto.Message) []byte { } func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.Message{} + pb := &tmp2p.PexMessage{} err := pb.Unmarshal(bz) if err != nil { @@ -799,10 +799,10 @@ func decodeMsg(bz []byte) (proto.Message, error) { } switch msg := pb.Sum.(type) { - case *tmp2p.Message_PexRequest: + case *tmp2p.PexMessage_PexRequest: return msg.PexRequest, nil - case *tmp2p.Message_PexAddrs: - return msg.PexAddrs, nil + case *tmp2p.PexMessage_PexResponse: + return msg.PexResponse, nil default: return nil, fmt.Errorf("unknown message: %T", msg) } diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index a32164628..549996f12 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -94,6 +94,11 @@ func TestPEXReactorRunning(t *testing.T) { }) } + for _, sw := range switches { + err := sw.Start() // start switch and reactors + require.Nil(t, err) + } + addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { addr := switches[otherSwitchIndex].NetAddress() err := books[switchIndex].AddAddress(addr, addr) @@ -104,11 +109,6 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook(1, 0) addOtherNodeAddrToAddrBook(2, 1) - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) // stop them @@ -128,7 +128,7 @@ func TestPEXReactorReceive(t *testing.T) { size := book.Size() na, err := peer.NodeInfo().NetAddress() require.NoError(t, err) - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{na.ToProto()}}) + msg := mustEncode(&tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{na.ToProto()}}) r.Receive(PexChannel, peer, msg) assert.Equal(t, size+1, book.Size()) @@ -185,7 +185,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) + msg := mustEncode(&tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{peer.SocketAddr().ToProto()}}) // receive some addrs. should clear the request r.Receive(PexChannel, peer, msg) @@ -456,7 +456,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { size := book.Size() na, err := peer.NodeInfo().NetAddress() require.NoError(t, err) - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{na.ToProto()}}) + msg := mustEncode(&tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{na.ToProto()}}) pexR.Receive(PexChannel, peer, msg) assert.Equal(t, size, book.Size()) @@ -634,7 +634,7 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { } func TestPexVectors(t *testing.T) { - addr := tmp2p.NetAddress{ + addr := tmp2p.PexAddress{ ID: "1", IP: "127.0.0.1", Port: 9090, @@ -646,7 +646,7 @@ func TestPexVectors(t *testing.T) { expBytes string }{ {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, - {"PexAddrs", &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, + {"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, } for _, tc := range testCases { diff --git a/p2p/pex/reactor.go b/p2p/pex/reactor.go new file mode 100644 index 000000000..c9c5b6779 --- /dev/null +++ b/p2p/pex/reactor.go @@ -0,0 +1,226 @@ +package pex + +import ( + "context" + "fmt" + "time" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/p2p" + protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" +) + +var ( + _ service.Service = (*ReactorV2)(nil) + _ p2p.Wrapper = (*protop2p.PexMessage)(nil) +) + +const ( + maxAddresses uint16 = 100 + resolveTimeout = 3 * time.Second +) + +// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor +// is Reactor. +// +// FIXME: Rename this when Reactor is removed, and consider moving to p2p/. +type ReactorV2 struct { + service.BaseService + + peerManager *p2p.PeerManager + pexCh *p2p.Channel + peerUpdates *p2p.PeerUpdatesCh + closeCh chan struct{} +} + +// NewReactor returns a reference to a new reactor. +func NewReactorV2( + logger log.Logger, + peerManager *p2p.PeerManager, + pexCh *p2p.Channel, + peerUpdates *p2p.PeerUpdatesCh, +) *ReactorV2 { + r := &ReactorV2{ + peerManager: peerManager, + pexCh: pexCh, + peerUpdates: peerUpdates, + closeCh: make(chan struct{}), + } + + r.BaseService = *service.NewBaseService(logger, "PEX", r) + return r +} + +// OnStart starts separate go routines for each p2p Channel and listens for +// envelopes on each. In addition, it also listens for peer updates and handles +// messages on that p2p channel accordingly. The caller must be sure to execute +// OnStop to ensure the outbound p2p Channels are closed. +func (r *ReactorV2) OnStart() error { + go r.processPexCh() + go r.processPeerUpdates() + return nil +} + +// OnStop stops the reactor by signaling to all spawned goroutines to exit and +// blocking until they all exit. +func (r *ReactorV2) OnStop() { + // Close closeCh to signal to all spawned goroutines to gracefully exit. All + // p2p Channels should execute Close(). + close(r.closeCh) + + // Wait for all p2p Channels to be closed before returning. This ensures we + // can easily reason about synchronization of all p2p Channels and ensure no + // panics will occur. + <-r.pexCh.Done() + <-r.peerUpdates.Done() +} + +// handlePexMessage handles envelopes sent from peers on the PexChannel. +func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { + logger := r.Logger.With("peer", envelope.From) + + // FIXME: We may want to add DoS protection here, by rate limiting peers and + // only processing addresses we actually requested. + switch msg := envelope.Message.(type) { + case *protop2p.PexRequest: + pexAddresses := r.resolve(r.peerManager.Advertise(envelope.From, maxAddresses), maxAddresses) + r.pexCh.Out() <- p2p.Envelope{ + To: envelope.From, + Message: &protop2p.PexResponse{Addresses: pexAddresses}, + } + + case *protop2p.PexResponse: + for _, pexAddress := range msg.Addresses { + peerAddress, err := p2p.ParsePeerAddress( + fmt.Sprintf("%s@%s:%d", pexAddress.ID, pexAddress.IP, pexAddress.Port)) + if err != nil { + logger.Debug("invalid PEX address", "address", pexAddress, "err", err) + continue + } + if err = r.peerManager.Add(peerAddress); err != nil { + logger.Debug("failed to register PEX address", "address", peerAddress, "err", err) + } + } + + default: + return fmt.Errorf("received unknown message: %T", msg) + } + + return nil +} + +// resolve resolves a set of peer addresses into PEX addresses. +// +// FIXME: This is necessary because the current PEX protocol only supports +// IP/port pairs, while the P2P stack uses PeerAddress URLs. The PEX protocol +// should really use URLs too, to exchange DNS names instead of IPs and allow +// different transport protocols (e.g. QUIC and MemoryTransport). +// +// FIXME: We may want to cache and parallelize this, but for now we'll just rely +// on the operating system to cache it for us. +func (r *ReactorV2) resolve(addresses []p2p.PeerAddress, limit uint16) []protop2p.PexAddress { + pexAddresses := make([]protop2p.PexAddress, 0, len(addresses)) + for _, address := range addresses { + ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout) + endpoints, err := address.Resolve(ctx) + cancel() + if err != nil { + r.Logger.Debug("failed to resolve address", "address", address, "err", err) + continue + } + for _, endpoint := range endpoints { + if len(pexAddresses) >= int(limit) { + return pexAddresses + + } else if endpoint.IP != nil { + // PEX currently only supports IP-networked transports (as + // opposed to e.g. p2p.MemoryTransport). + pexAddresses = append(pexAddresses, protop2p.PexAddress{ + ID: string(endpoint.PeerID), + IP: endpoint.IP.String(), + Port: uint32(endpoint.Port), + }) + } + } + } + return pexAddresses +} + +// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. +// It will handle errors and any possible panics gracefully. A caller can handle +// any error returned by sending a PeerError on the respective channel. +func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("panic in processing message: %v", e) + } + }() + + r.Logger.Debug("received message", "peer", envelope.From) + + switch chID { + case p2p.ChannelID(PexChannel): + err = r.handlePexMessage(envelope) + + default: + err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + } + + return err +} + +// processPexCh implements a blocking event loop where we listen for p2p +// Envelope messages from the pexCh. +func (r *ReactorV2) processPexCh() { + defer r.pexCh.Close() + + for { + select { + case envelope := <-r.pexCh.In(): + if err := r.handleMessage(r.pexCh.ID(), envelope); err != nil { + r.Logger.Error("failed to process message", "ch_id", r.pexCh.ID(), "envelope", envelope, "err", err) + r.pexCh.Error() <- p2p.PeerError{ + PeerID: envelope.From, + Err: err, + Severity: p2p.PeerErrorSeverityLow, + } + } + + case <-r.closeCh: + r.Logger.Debug("stopped listening on PEX channel; closing...") + return + } + } +} + +// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we +// send a request for addresses. +func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { + r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID, "status", peerUpdate.Status) + + if peerUpdate.Status == p2p.PeerStatusUp { + r.pexCh.Out() <- p2p.Envelope{ + To: peerUpdate.PeerID, + Message: &protop2p.PexRequest{}, + } + } +} + +// processPeerUpdates initiates a blocking process where we listen for and handle +// PeerUpdate messages. When the reactor is stopped, we will catch the signal and +// close the p2p PeerUpdatesCh gracefully. +func (r *ReactorV2) processPeerUpdates() { + defer r.peerUpdates.Close() + + for { + select { + case peerUpdate := <-r.peerUpdates.Updates(): + r.processPeerUpdate(peerUpdate) + + case <-r.closeCh: + r.Logger.Debug("stopped listening on peer updates channel; closing...") + return + } + } +} diff --git a/p2p/router.go b/p2p/router.go index ae744a7f9..418bc004c 100644 --- a/p2p/router.go +++ b/p2p/router.go @@ -2,6 +2,7 @@ package p2p import ( "context" + "errors" "fmt" "io" "sync" @@ -230,21 +231,16 @@ func (r *Router) routeChannel(channel *Channel) { // acceptPeers accepts inbound connections from peers on the given transport. func (r *Router) acceptPeers(transport Transport) { + ctx := r.stopCtx() for { - select { - case <-r.stopCh: - return - default: - } - // FIXME: We may need transports to enforce some sort of rate limiting // here (e.g. by IP address), or alternatively have PeerManager.Accepted() // do it for us. - conn, err := transport.Accept(context.Background()) + conn, err := transport.Accept(ctx) switch err { case nil: - case ErrTransportClosed{}, io.EOF: - r.logger.Info("transport closed; stopping accept routine", "transport", transport) + case ErrTransportClosed{}, io.EOF, context.Canceled: + r.logger.Debug("stopping accept routine", "transport", transport) return default: r.logger.Error("failed to accept connection", "transport", transport, "err", err) @@ -285,31 +281,25 @@ func (r *Router) acceptPeers(transport Transport) { // dialPeers maintains outbound connections to peers. func (r *Router) dialPeers() { + ctx := r.stopCtx() for { - select { - case <-r.stopCh: + peerID, address, err := r.peerManager.DialNext(ctx) + switch err { + case nil: + case context.Canceled: + r.logger.Debug("stopping dial routine") return default: - } - - peerID, address, err := r.peerManager.DialNext() - if err != nil { r.logger.Error("failed to find next peer to dial", "err", err) return - } else if peerID == "" { - r.logger.Debug("no eligible peers, sleeping") - select { - case <-time.After(time.Second): - continue - case <-r.stopCh: - return - } } go func() { - conn, err := r.dialPeer(address) - if err != nil { - r.logger.Error("failed to dial peer, will retry", "peer", peerID) + conn, err := r.dialPeer(ctx, address) + if errors.Is(err, context.Canceled) { + return + } else if err != nil { + r.logger.Error("failed to dial peer", "peer", peerID) if err = r.peerManager.DialFailed(peerID, address); err != nil { r.logger.Error("failed to report dial failure", "peer", peerID, "err", err) } @@ -344,9 +334,7 @@ func (r *Router) dialPeers() { } // dialPeer attempts to connect to a peer. -func (r *Router) dialPeer(address PeerAddress) (Connection, error) { - ctx := context.Background() - +func (r *Router) dialPeer(ctx context.Context, address PeerAddress) (Connection, error) { resolveCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -367,11 +355,18 @@ func (r *Router) dialPeer(address PeerAddress) (Connection, error) { dialCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() + // FIXME: When we dial and handshake the peer, we should pass it + // appropriate address(es) it can use to dial us back. It can't use our + // remote endpoint, since TCP uses different port numbers for outbound + // connections than it does for inbound. Also, we may need to vary this + // by the peer's endpoint, since e.g. a peer on 192.168.0.0 can reach us + // on a private address on this endpoint, but a peer on the public + // Internet can't and needs a different public address. conn, err := t.Dial(dialCtx, endpoint) if err != nil { - r.logger.Error("failed to dial endpoint", "endpoint", endpoint) + r.logger.Error("failed to dial endpoint", "endpoint", endpoint, "err", err) } else { - r.logger.Info("connected to peer", "peer", address.NodeID(), "endpoint", endpoint) + r.logger.Info("connected to peer", "peer", address.ID, "endpoint", endpoint) return conn, nil } } @@ -481,34 +476,25 @@ func (r *Router) sendPeer(peerID NodeID, conn Connection, queue queue) error { // evictPeers evicts connected peers as requested by the peer manager. func (r *Router) evictPeers() { + ctx := r.stopCtx() for { - select { - case <-r.stopCh: + peerID, err := r.peerManager.EvictNext(ctx) + switch err { + case nil: + case context.Canceled: + r.logger.Debug("stopping evict routine") return default: - } - - peerID, err := r.peerManager.EvictNext() - if err != nil { r.logger.Error("failed to find next peer to evict", "err", err) return - } else if peerID == "" { - r.logger.Debug("no evictable peers, sleeping") - select { - case <-time.After(time.Second): - continue - case <-r.stopCh: - return - } } r.logger.Info("evicting peer", "peer", peerID) r.peerMtx.RLock() - queue, ok := r.peerQueues[peerID] - r.peerMtx.RUnlock() - if ok { + if queue, ok := r.peerQueues[peerID]; ok { queue.close() } + r.peerMtx.RUnlock() } } @@ -544,3 +530,13 @@ func (r *Router) OnStop() { <-q.closed() } } + +// stopCtx returns a context that is cancelled when the router stops. +func (r *Router) stopCtx() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-r.stopCh + cancel() + }() + return ctx +} diff --git a/p2p/router_test.go b/p2p/router_test.go index 1df66a87f..5436ee02b 100644 --- a/p2p/router_test.go +++ b/p2p/router_test.go @@ -4,9 +4,11 @@ import ( "errors" "testing" + "github.com/fortytw2/leaktest" gogotypes "github.com/gogo/protobuf/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -29,19 +31,25 @@ func echoReactor(channel *p2p.Channel) { } func TestRouter(t *testing.T) { + defer leaktest.Check(t)() + logger := log.TestingLogger() network := p2p.NewMemoryNetwork(logger) transport := network.GenerateTransport() + defer transport.Close() chID := p2p.ChannelID(1) // Start some other in-memory network nodes to communicate with, running // a simple echo reactor that returns received messages. peers := []p2p.PeerAddress{} for i := 0; i < 3; i++ { + peerManager, err := p2p.NewPeerManager(dbm.NewMemDB(), p2p.PeerManagerOptions{}) + require.NoError(t, err) peerTransport := network.GenerateTransport() + defer peerTransport.Close() peerRouter := p2p.NewRouter( logger.With("peerID", i), - p2p.NewPeerManager(p2p.PeerManagerOptions{}), + peerManager, map[p2p.Protocol]p2p.Transport{ p2p.MemoryProtocol: peerTransport, }, @@ -59,7 +67,9 @@ func TestRouter(t *testing.T) { } // Start the main router and connect it to the peers above. - peerManager := p2p.NewPeerManager(p2p.PeerManagerOptions{}) + peerManager, err := p2p.NewPeerManager(dbm.NewMemDB(), p2p.PeerManagerOptions{}) + require.NoError(t, err) + defer peerManager.Close() for _, address := range peers { err := peerManager.Add(address) require.NoError(t, err) @@ -70,11 +80,18 @@ func TestRouter(t *testing.T) { router := p2p.NewRouter(logger, peerManager, map[p2p.Protocol]p2p.Transport{ p2p.MemoryProtocol: transport, }) + channel, err := router.OpenChannel(chID, &TestMessage{}) require.NoError(t, err) + defer channel.Close() + err = router.Start() require.NoError(t, err) defer func() { + // Since earlier defers are closed after this, and we have to make sure + // we close channels and subscriptions before the router, we explicitly + // close them here to. + peerUpdates.Close() channel.Close() require.NoError(t, router.Stop()) }() @@ -97,13 +114,13 @@ func TestRouter(t *testing.T) { // We then submit an error for a peer, and watch it get disconnected. channel.Error() <- p2p.PeerError{ - PeerID: peers[0].NodeID(), + PeerID: peers[0].ID, Err: errors.New("test error"), Severity: p2p.PeerErrorSeverityCritical, } peerUpdate := <-peerUpdates.Updates() require.Equal(t, p2p.PeerUpdate{ - PeerID: peers[0].NodeID(), + PeerID: peers[0].ID, Status: p2p.PeerStatusDown, }, peerUpdate) @@ -114,7 +131,7 @@ func TestRouter(t *testing.T) { } for i := 0; i < len(peers)-1; i++ { envelope := <-channel.In() - require.NotEqual(t, peers[0].NodeID(), envelope.From) + require.NotEqual(t, peers[0].ID, envelope.From) require.Equal(t, &TestMessage{Value: "broadcast"}, envelope.Message) } select { diff --git a/p2p/transport.go b/p2p/transport.go index 5e15444fd..2e98c5d16 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "net" - "net/url" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p/conn" @@ -66,27 +65,23 @@ type Endpoint struct { Port uint16 } -// PeerAddress converts the endpoint into a peer address URL. +// PeerAddress converts the endpoint into a peer address. func (e Endpoint) PeerAddress() PeerAddress { - u := &url.URL{ - Scheme: string(e.Protocol), - User: url.User(string(e.PeerID)), + address := PeerAddress{ + ID: e.PeerID, + Protocol: e.Protocol, + Path: e.Path, } if e.IP != nil { - u.Host = e.IP.String() - if e.Port > 0 { - u.Host = net.JoinHostPort(u.Host, fmt.Sprintf("%v", e.Port)) - } - u.Path = e.Path - } else { - u.Opaque = e.Path + address.Hostname = e.IP.String() + address.Port = e.Port } - return PeerAddress{URL: u} + return address } // String formats an endpoint as a URL string. func (e Endpoint) String() string { - return e.PeerAddress().URL.String() + return e.PeerAddress().String() } // Validate validates an endpoint. @@ -96,8 +91,6 @@ func (e Endpoint) Validate() error { return errors.New("endpoint has no peer ID") case e.Protocol == "": return errors.New("endpoint has no protocol") - case len(e.IP) == 0 && len(e.Path) == 0: - return errors.New("endpoint must have either IP or path") case e.Port > 0 && len(e.IP) == 0: return fmt.Errorf("endpoint has port %v but no IP", e.Port) default: diff --git a/p2p/transport_mconn.go b/p2p/transport_mconn.go index 186162ca5..928c63adf 100644 --- a/p2p/transport_mconn.go +++ b/p2p/transport_mconn.go @@ -238,7 +238,7 @@ func (m *MConnTransport) Accept(ctx context.Context) (Connection, error) { case <-m.chClose: return nil, ErrTransportClosed{} case <-ctx.Done(): - return nil, nil + return nil, ctx.Err() } } diff --git a/p2p/transport_memory.go b/p2p/transport_memory.go index 6768ff186..b47ff6919 100644 --- a/p2p/transport_memory.go +++ b/p2p/transport_memory.go @@ -153,17 +153,14 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti if endpoint.Protocol != MemoryProtocol { return nil, fmt.Errorf("invalid protocol %q", endpoint.Protocol) } - if endpoint.Path == "" { - return nil, errors.New("no path") - } if endpoint.PeerID == "" { return nil, errors.New("no peer ID") } t.logger.Info("dialing peer", "remote", endpoint) - peerTransport := t.network.GetTransport(NodeID(endpoint.Path)) + peerTransport := t.network.GetTransport(endpoint.PeerID) if peerTransport == nil { - return nil, fmt.Errorf("unknown peer %q", endpoint.Path) + return nil, fmt.Errorf("unknown peer %q", endpoint.PeerID) } inCh := make(chan memoryMessage, 1) outCh := make(chan memoryMessage, 1) @@ -241,7 +238,6 @@ func (t *MemoryTransport) Endpoints() []Endpoint { return []Endpoint{{ Protocol: MemoryProtocol, PeerID: t.nodeInfo.NodeID, - Path: string(t.nodeInfo.NodeID), }} } } @@ -365,7 +361,6 @@ func (c *MemoryConnection) LocalEndpoint() Endpoint { return Endpoint{ PeerID: c.local.nodeInfo.NodeID, Protocol: MemoryProtocol, - Path: string(c.local.nodeInfo.NodeID), } } @@ -374,7 +369,6 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { return Endpoint{ PeerID: c.remote.nodeInfo.NodeID, Protocol: MemoryProtocol, - Path: string(c.remote.nodeInfo.NodeID), } } diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go new file mode 100644 index 000000000..bd57ae651 --- /dev/null +++ b/proto/tendermint/p2p/pex.go @@ -0,0 +1,33 @@ +package p2p + +import ( + fmt "fmt" + + proto "github.com/gogo/protobuf/proto" +) + +// Wrap implements the p2p Wrapper interface and wraps a PEX message. +func (m *PexMessage) Wrap(pb proto.Message) error { + switch msg := pb.(type) { + case *PexRequest: + m.Sum = &PexMessage_PexRequest{PexRequest: msg} + case *PexResponse: + m.Sum = &PexMessage_PexResponse{PexResponse: msg} + default: + return fmt.Errorf("unknown message: %T", msg) + } + return nil +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped PEX +// message. +func (m *PexMessage) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *PexMessage_PexRequest: + return msg.PexRequest, nil + case *PexMessage_PexResponse: + return msg.PexResponse, nil + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index b4a39fe98..fff1b5db8 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -23,6 +23,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type PexAddress struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` +} + +func (m *PexAddress) Reset() { *m = PexAddress{} } +func (m *PexAddress) String() string { return proto.CompactTextString(m) } +func (*PexAddress) ProtoMessage() {} +func (*PexAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_81c2f011fd13be57, []int{0} +} +func (m *PexAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PexAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PexAddress.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PexAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_PexAddress.Merge(m, src) +} +func (m *PexAddress) XXX_Size() int { + return m.Size() +} +func (m *PexAddress) XXX_DiscardUnknown() { + xxx_messageInfo_PexAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_PexAddress proto.InternalMessageInfo + +func (m *PexAddress) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *PexAddress) GetIP() string { + if m != nil { + return m.IP + } + return "" +} + +func (m *PexAddress) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + type PexRequest struct { } @@ -30,7 +90,7 @@ func (m *PexRequest) Reset() { *m = PexRequest{} } func (m *PexRequest) String() string { return proto.CompactTextString(m) } func (*PexRequest) ProtoMessage() {} func (*PexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{0} + return fileDescriptor_81c2f011fd13be57, []int{1} } func (m *PexRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -59,22 +119,22 @@ func (m *PexRequest) XXX_DiscardUnknown() { var xxx_messageInfo_PexRequest proto.InternalMessageInfo -type PexAddrs struct { - Addrs []NetAddress `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs"` +type PexResponse struct { + Addresses []PexAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"` } -func (m *PexAddrs) Reset() { *m = PexAddrs{} } -func (m *PexAddrs) String() string { return proto.CompactTextString(m) } -func (*PexAddrs) ProtoMessage() {} -func (*PexAddrs) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{1} +func (m *PexResponse) Reset() { *m = PexResponse{} } +func (m *PexResponse) String() string { return proto.CompactTextString(m) } +func (*PexResponse) ProtoMessage() {} +func (*PexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_81c2f011fd13be57, []int{2} } -func (m *PexAddrs) XXX_Unmarshal(b []byte) error { +func (m *PexResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PexAddrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_PexAddrs.Marshal(b, m, deterministic) + return xxx_messageInfo_PexResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -84,44 +144,44 @@ func (m *PexAddrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *PexAddrs) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexAddrs.Merge(m, src) +func (m *PexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PexResponse.Merge(m, src) } -func (m *PexAddrs) XXX_Size() int { +func (m *PexResponse) XXX_Size() int { return m.Size() } -func (m *PexAddrs) XXX_DiscardUnknown() { - xxx_messageInfo_PexAddrs.DiscardUnknown(m) +func (m *PexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PexResponse.DiscardUnknown(m) } -var xxx_messageInfo_PexAddrs proto.InternalMessageInfo +var xxx_messageInfo_PexResponse proto.InternalMessageInfo -func (m *PexAddrs) GetAddrs() []NetAddress { +func (m *PexResponse) GetAddresses() []PexAddress { if m != nil { - return m.Addrs + return m.Addresses } return nil } -type Message struct { +type PexMessage struct { // Types that are valid to be assigned to Sum: - // *Message_PexRequest - // *Message_PexAddrs - Sum isMessage_Sum `protobuf_oneof:"sum"` + // *PexMessage_PexRequest + // *PexMessage_PexResponse + Sum isPexMessage_Sum `protobuf_oneof:"sum"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{2} +func (m *PexMessage) Reset() { *m = PexMessage{} } +func (m *PexMessage) String() string { return proto.CompactTextString(m) } +func (*PexMessage) ProtoMessage() {} +func (*PexMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_81c2f011fd13be57, []int{3} } -func (m *Message) XXX_Unmarshal(b []byte) error { +func (m *PexMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PexMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) + return xxx_messageInfo_PexMessage.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -131,90 +191,136 @@ func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) +func (m *PexMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_PexMessage.Merge(m, src) } -func (m *Message) XXX_Size() int { +func (m *PexMessage) XXX_Size() int { return m.Size() } -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) +func (m *PexMessage) XXX_DiscardUnknown() { + xxx_messageInfo_PexMessage.DiscardUnknown(m) } -var xxx_messageInfo_Message proto.InternalMessageInfo +var xxx_messageInfo_PexMessage proto.InternalMessageInfo -type isMessage_Sum interface { - isMessage_Sum() +type isPexMessage_Sum interface { + isPexMessage_Sum() MarshalTo([]byte) (int, error) Size() int } -type Message_PexRequest struct { +type PexMessage_PexRequest struct { PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` } -type Message_PexAddrs struct { - PexAddrs *PexAddrs `protobuf:"bytes,2,opt,name=pex_addrs,json=pexAddrs,proto3,oneof" json:"pex_addrs,omitempty"` +type PexMessage_PexResponse struct { + PexResponse *PexResponse `protobuf:"bytes,2,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` } -func (*Message_PexRequest) isMessage_Sum() {} -func (*Message_PexAddrs) isMessage_Sum() {} +func (*PexMessage_PexRequest) isPexMessage_Sum() {} +func (*PexMessage_PexResponse) isPexMessage_Sum() {} -func (m *Message) GetSum() isMessage_Sum { +func (m *PexMessage) GetSum() isPexMessage_Sum { if m != nil { return m.Sum } return nil } -func (m *Message) GetPexRequest() *PexRequest { - if x, ok := m.GetSum().(*Message_PexRequest); ok { +func (m *PexMessage) GetPexRequest() *PexRequest { + if x, ok := m.GetSum().(*PexMessage_PexRequest); ok { return x.PexRequest } return nil } -func (m *Message) GetPexAddrs() *PexAddrs { - if x, ok := m.GetSum().(*Message_PexAddrs); ok { - return x.PexAddrs +func (m *PexMessage) GetPexResponse() *PexResponse { + if x, ok := m.GetSum().(*PexMessage_PexResponse); ok { + return x.PexResponse } return nil } // XXX_OneofWrappers is for the internal use of the proto package. -func (*Message) XXX_OneofWrappers() []interface{} { +func (*PexMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ - (*Message_PexRequest)(nil), - (*Message_PexAddrs)(nil), + (*PexMessage_PexRequest)(nil), + (*PexMessage_PexResponse)(nil), } } func init() { + proto.RegisterType((*PexAddress)(nil), "tendermint.p2p.PexAddress") proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") - proto.RegisterType((*PexAddrs)(nil), "tendermint.p2p.PexAddrs") - proto.RegisterType((*Message)(nil), "tendermint.p2p.Message") + proto.RegisterType((*PexResponse)(nil), "tendermint.p2p.PexResponse") + proto.RegisterType((*PexMessage)(nil), "tendermint.p2p.PexMessage") } func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } var fileDescriptor_81c2f011fd13be57 = []byte{ - // 268 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, - 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x49, 0xa1, 0xa9, - 0x2c, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0xa8, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, - 0x41, 0x2c, 0x88, 0xa8, 0x12, 0x0f, 0x17, 0x57, 0x40, 0x6a, 0x45, 0x50, 0x6a, 0x61, 0x69, 0x6a, - 0x71, 0x89, 0x92, 0x13, 0x17, 0x47, 0x40, 0x6a, 0x85, 0x63, 0x4a, 0x4a, 0x51, 0xb1, 0x90, 0x19, - 0x17, 0x6b, 0x22, 0x88, 0x21, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0x24, 0xa5, 0x87, 0x6a, 0x97, - 0x9e, 0x5f, 0x6a, 0x09, 0x48, 0x61, 0x6a, 0x71, 0xb1, 0x13, 0xcb, 0x89, 0x7b, 0xf2, 0x0c, 0x41, - 0x10, 0xe5, 0x4a, 0x1d, 0x8c, 0x5c, 0xec, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0xb6, - 0x5c, 0xdc, 0x05, 0xa9, 0x15, 0xf1, 0x45, 0x10, 0xe3, 0x25, 0x18, 0x15, 0x18, 0xb1, 0x99, 0x84, - 0x70, 0x80, 0x07, 0x43, 0x10, 0x57, 0x01, 0x9c, 0x27, 0x64, 0xce, 0xc5, 0x09, 0xd2, 0x0e, 0x71, - 0x06, 0x13, 0x58, 0xb3, 0x04, 0x16, 0xcd, 0x60, 0xf7, 0x7a, 0x30, 0x04, 0x71, 0x14, 0x40, 0xd9, - 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, - 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, - 0x2c, 0xc7, 0x10, 0x65, 0x9a, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, - 0x14, 0x66, 0xc8, 0xc1, 0x07, 0x0e, 0x29, 0xd4, 0xf0, 0x4c, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x0b, 0xcb, 0x40, 0x92, 0x01, 0x00, 0x00, + // 310 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x31, 0x4b, 0xc3, 0x40, + 0x18, 0xbd, 0x4b, 0x6b, 0xa1, 0x97, 0xea, 0x70, 0x88, 0x84, 0x0a, 0xd7, 0x92, 0xa9, 0x53, 0x02, + 0x11, 0x47, 0x45, 0x83, 0x43, 0x1d, 0x8a, 0xe5, 0x46, 0x17, 0x69, 0xcd, 0x47, 0xcc, 0xd0, 0xde, + 0x67, 0xee, 0x0a, 0xfd, 0x19, 0x0e, 0xfe, 0xa8, 0x8e, 0x1d, 0x9d, 0x8a, 0xa4, 0x7f, 0x44, 0xbc, + 0x13, 0x93, 0x42, 0xb7, 0x7b, 0xef, 0xfb, 0xde, 0xfb, 0xde, 0xf1, 0x58, 0x60, 0x60, 0x99, 0x41, + 0xb9, 0x28, 0x96, 0x26, 0xc6, 0x04, 0x63, 0x84, 0x75, 0x84, 0xa5, 0x32, 0x8a, 0x9f, 0xd5, 0x93, + 0x08, 0x13, 0xec, 0x9f, 0xe7, 0x2a, 0x57, 0x76, 0x14, 0xff, 0xbe, 0xdc, 0x56, 0x38, 0x65, 0x6c, + 0x0a, 0xeb, 0xfb, 0x2c, 0x2b, 0x41, 0x6b, 0x7e, 0xc1, 0xbc, 0x22, 0x0b, 0xe8, 0x90, 0x8e, 0xba, + 0x69, 0xa7, 0xda, 0x0d, 0xbc, 0xc7, 0x07, 0xe9, 0x15, 0x99, 0xe5, 0x31, 0xf0, 0x1a, 0xfc, 0x54, + 0x7a, 0x05, 0x72, 0xce, 0xda, 0xa8, 0x4a, 0x13, 0xb4, 0x86, 0x74, 0x74, 0x2a, 0xed, 0x3b, 0xec, + 0x59, 0x47, 0x09, 0xef, 0x2b, 0xd0, 0x26, 0x9c, 0x30, 0xdf, 0x22, 0x8d, 0x6a, 0xa9, 0x81, 0xdf, + 0xb2, 0xee, 0xcc, 0xdd, 0x02, 0x1d, 0xd0, 0x61, 0x6b, 0xe4, 0x27, 0xfd, 0xe8, 0x30, 0x68, 0x54, + 0xe7, 0x49, 0xdb, 0x9b, 0xdd, 0x80, 0xc8, 0x5a, 0x12, 0x7e, 0x52, 0xeb, 0x3e, 0x01, 0xad, 0x67, + 0x39, 0xf0, 0x1b, 0xe6, 0x23, 0xac, 0x5f, 0x4a, 0x77, 0xcc, 0x06, 0x3f, 0x6e, 0xf8, 0x17, 0x67, + 0x4c, 0x24, 0xc3, 0x7f, 0xc4, 0xef, 0x58, 0xcf, 0xc9, 0x5d, 0x3a, 0xfb, 0x41, 0x3f, 0xb9, 0x3c, + 0xaa, 0x77, 0x2b, 0x63, 0x22, 0x7d, 0xac, 0x61, 0x7a, 0xc2, 0x5a, 0x7a, 0xb5, 0x48, 0x9f, 0x36, + 0x95, 0xa0, 0xdb, 0x4a, 0xd0, 0xef, 0x4a, 0xd0, 0x8f, 0xbd, 0x20, 0xdb, 0xbd, 0x20, 0x5f, 0x7b, + 0x41, 0x9e, 0xaf, 0xf3, 0xc2, 0xbc, 0xad, 0xe6, 0xd1, 0xab, 0x5a, 0xc4, 0x8d, 0xaa, 0x9a, 0xad, + 0xd9, 0x4a, 0x0e, 0x6b, 0x9c, 0x77, 0x2c, 0x7b, 0xf5, 0x13, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xa1, + 0x59, 0x3c, 0xdf, 0x01, 0x00, 0x00, +} + +func (m *PexAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PexAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PexAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != 0 { + i = encodeVarintPex(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + } + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintPex(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x12 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintPex(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *PexRequest) Marshal() (dAtA []byte, err error) { @@ -240,7 +346,7 @@ func (m *PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PexAddrs) Marshal() (dAtA []byte, err error) { +func (m *PexResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -250,20 +356,20 @@ func (m *PexAddrs) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PexAddrs) MarshalTo(dAtA []byte) (int, error) { +func (m *PexResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PexAddrs) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Addrs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -277,7 +383,7 @@ func (m *PexAddrs) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Message) Marshal() (dAtA []byte, err error) { +func (m *PexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -287,12 +393,12 @@ func (m *Message) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Message) MarshalTo(dAtA []byte) (int, error) { +func (m *PexMessage) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PexMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -309,12 +415,12 @@ func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Message_PexRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *PexMessage_PexRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Message_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PexMessage_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.PexRequest != nil { { @@ -330,16 +436,16 @@ func (m *Message_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Message_PexAddrs) MarshalTo(dAtA []byte) (int, error) { +func (m *PexMessage_PexResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Message_PexAddrs) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PexMessage_PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.PexAddrs != nil { + if m.PexResponse != nil { { - size, err := m.PexAddrs.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PexResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -362,6 +468,26 @@ func encodeVarintPex(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *PexAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovPex(uint64(l)) + } + l = len(m.IP) + if l > 0 { + n += 1 + l + sovPex(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovPex(uint64(m.Port)) + } + return n +} + func (m *PexRequest) Size() (n int) { if m == nil { return 0 @@ -371,14 +497,14 @@ func (m *PexRequest) Size() (n int) { return n } -func (m *PexAddrs) Size() (n int) { +func (m *PexResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Addrs) > 0 { - for _, e := range m.Addrs { + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { l = e.Size() n += 1 + l + sovPex(uint64(l)) } @@ -386,7 +512,7 @@ func (m *PexAddrs) Size() (n int) { return n } -func (m *Message) Size() (n int) { +func (m *PexMessage) Size() (n int) { if m == nil { return 0 } @@ -398,7 +524,7 @@ func (m *Message) Size() (n int) { return n } -func (m *Message_PexRequest) Size() (n int) { +func (m *PexMessage_PexRequest) Size() (n int) { if m == nil { return 0 } @@ -410,14 +536,14 @@ func (m *Message_PexRequest) Size() (n int) { } return n } -func (m *Message_PexAddrs) Size() (n int) { +func (m *PexMessage_PexResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.PexAddrs != nil { - l = m.PexAddrs.Size() + if m.PexResponse != nil { + l = m.PexResponse.Size() n += 1 + l + sovPex(uint64(l)) } return n @@ -429,6 +555,139 @@ func sovPex(x uint64) (n int) { func sozPex(x uint64) (n int) { return sovPex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (m *PexAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PexAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PexAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPex(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PexRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -479,7 +738,7 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *PexAddrs) Unmarshal(dAtA []byte) error { +func (m *PexResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -502,15 +761,15 @@ func (m *PexAddrs) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PexAddrs: wiretype end group for non-group") + return fmt.Errorf("proto: PexResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PexAddrs: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PexResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -537,8 +796,8 @@ func (m *PexAddrs) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Addrs = append(m.Addrs, NetAddress{}) - if err := m.Addrs[len(m.Addrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Addresses = append(m.Addresses, PexAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -563,7 +822,7 @@ func (m *PexAddrs) Unmarshal(dAtA []byte) error { } return nil } -func (m *Message) Unmarshal(dAtA []byte) error { +func (m *PexMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -586,10 +845,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") + return fmt.Errorf("proto: PexMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -625,11 +884,11 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Sum = &Message_PexRequest{v} + m.Sum = &PexMessage_PexRequest{v} iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexAddrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PexResponse", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -656,11 +915,11 @@ func (m *Message) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &PexAddrs{} + v := &PexResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Sum = &Message_PexAddrs{v} + m.Sum = &PexMessage_PexResponse{v} iNdEx = postIndex default: iNdEx = preIndex diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto index dfe238dbe..48e1cfce3 100644 --- a/proto/tendermint/p2p/pex.proto +++ b/proto/tendermint/p2p/pex.proto @@ -3,18 +3,23 @@ package tendermint.p2p; option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; -import "tendermint/p2p/types.proto"; import "gogoproto/gogo.proto"; +message PexAddress { + string id = 1 [(gogoproto.customname) = "ID"]; + string ip = 2 [(gogoproto.customname) = "IP"]; + uint32 port = 3; +} + message PexRequest {} -message PexAddrs { - repeated NetAddress addrs = 1 [(gogoproto.nullable) = false]; +message PexResponse { + repeated PexAddress addresses = 1 [(gogoproto.nullable) = false]; } -message Message { +message PexMessage { oneof sum { - PexRequest pex_request = 1; - PexAddrs pex_addrs = 2; + PexRequest pex_request = 1; + PexResponse pex_response = 2; } } diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index 0a888f501..bffa6884f 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -7,15 +7,19 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" io "io" math "math" math_bits "math/bits" + time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -23,66 +27,6 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type NetAddress struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` -} - -func (m *NetAddress) Reset() { *m = NetAddress{} } -func (m *NetAddress) String() string { return proto.CompactTextString(m) } -func (*NetAddress) ProtoMessage() {} -func (*NetAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{0} -} -func (m *NetAddress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NetAddress.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NetAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetAddress.Merge(m, src) -} -func (m *NetAddress) XXX_Size() int { - return m.Size() -} -func (m *NetAddress) XXX_DiscardUnknown() { - xxx_messageInfo_NetAddress.DiscardUnknown(m) -} - -var xxx_messageInfo_NetAddress proto.InternalMessageInfo - -func (m *NetAddress) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *NetAddress) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -func (m *NetAddress) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - type ProtocolVersion struct { P2P uint64 `protobuf:"varint,1,opt,name=p2p,proto3" json:"p2p,omitempty"` Block uint64 `protobuf:"varint,2,opt,name=block,proto3" json:"block,omitempty"` @@ -93,7 +37,7 @@ func (m *ProtocolVersion) Reset() { *m = ProtocolVersion{} } func (m *ProtocolVersion) String() string { return proto.CompactTextString(m) } func (*ProtocolVersion) ProtoMessage() {} func (*ProtocolVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{1} + return fileDescriptor_c8a29e659aeca578, []int{0} } func (m *ProtocolVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +102,7 @@ func (m *NodeInfo) Reset() { *m = NodeInfo{} } func (m *NodeInfo) String() string { return proto.CompactTextString(m) } func (*NodeInfo) ProtoMessage() {} func (*NodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{2} + return fileDescriptor_c8a29e659aeca578, []int{1} } func (m *NodeInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -252,7 +196,7 @@ func (m *NodeInfoOther) Reset() { *m = NodeInfoOther{} } func (m *NodeInfoOther) String() string { return proto.CompactTextString(m) } func (*NodeInfoOther) ProtoMessage() {} func (*NodeInfoOther) Descriptor() ([]byte, []int) { - return fileDescriptor_c8a29e659aeca578, []int{3} + return fileDescriptor_c8a29e659aeca578, []int{2} } func (m *NodeInfoOther) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -295,89 +239,185 @@ func (m *NodeInfoOther) GetRPCAddress() string { return "" } +type PeerInfo struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AddressInfo []*PeerAddressInfo `protobuf:"bytes,2,rep,name=address_info,json=addressInfo,proto3" json:"address_info,omitempty"` + LastConnected *time.Time `protobuf:"bytes,3,opt,name=last_connected,json=lastConnected,proto3,stdtime" json:"last_connected,omitempty"` +} + +func (m *PeerInfo) Reset() { *m = PeerInfo{} } +func (m *PeerInfo) String() string { return proto.CompactTextString(m) } +func (*PeerInfo) ProtoMessage() {} +func (*PeerInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c8a29e659aeca578, []int{3} +} +func (m *PeerInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PeerInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PeerInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PeerInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerInfo.Merge(m, src) +} +func (m *PeerInfo) XXX_Size() int { + return m.Size() +} +func (m *PeerInfo) XXX_DiscardUnknown() { + xxx_messageInfo_PeerInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerInfo proto.InternalMessageInfo + +func (m *PeerInfo) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *PeerInfo) GetAddressInfo() []*PeerAddressInfo { + if m != nil { + return m.AddressInfo + } + return nil +} + +func (m *PeerInfo) GetLastConnected() *time.Time { + if m != nil { + return m.LastConnected + } + return nil +} + +type PeerAddressInfo struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + LastDialSuccess *time.Time `protobuf:"bytes,2,opt,name=last_dial_success,json=lastDialSuccess,proto3,stdtime" json:"last_dial_success,omitempty"` + LastDialFailure *time.Time `protobuf:"bytes,3,opt,name=last_dial_failure,json=lastDialFailure,proto3,stdtime" json:"last_dial_failure,omitempty"` + DialFailures uint32 `protobuf:"varint,4,opt,name=dial_failures,json=dialFailures,proto3" json:"dial_failures,omitempty"` +} + +func (m *PeerAddressInfo) Reset() { *m = PeerAddressInfo{} } +func (m *PeerAddressInfo) String() string { return proto.CompactTextString(m) } +func (*PeerAddressInfo) ProtoMessage() {} +func (*PeerAddressInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c8a29e659aeca578, []int{4} +} +func (m *PeerAddressInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PeerAddressInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PeerAddressInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PeerAddressInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerAddressInfo.Merge(m, src) +} +func (m *PeerAddressInfo) XXX_Size() int { + return m.Size() +} +func (m *PeerAddressInfo) XXX_DiscardUnknown() { + xxx_messageInfo_PeerAddressInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerAddressInfo proto.InternalMessageInfo + +func (m *PeerAddressInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *PeerAddressInfo) GetLastDialSuccess() *time.Time { + if m != nil { + return m.LastDialSuccess + } + return nil +} + +func (m *PeerAddressInfo) GetLastDialFailure() *time.Time { + if m != nil { + return m.LastDialFailure + } + return nil +} + +func (m *PeerAddressInfo) GetDialFailures() uint32 { + if m != nil { + return m.DialFailures + } + return 0 +} + func init() { - proto.RegisterType((*NetAddress)(nil), "tendermint.p2p.NetAddress") proto.RegisterType((*ProtocolVersion)(nil), "tendermint.p2p.ProtocolVersion") proto.RegisterType((*NodeInfo)(nil), "tendermint.p2p.NodeInfo") proto.RegisterType((*NodeInfoOther)(nil), "tendermint.p2p.NodeInfoOther") + proto.RegisterType((*PeerInfo)(nil), "tendermint.p2p.PeerInfo") + proto.RegisterType((*PeerAddressInfo)(nil), "tendermint.p2p.PeerAddressInfo") } func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } var fileDescriptor_c8a29e659aeca578 = []byte{ - // 468 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x52, 0xcd, 0x8e, 0xda, 0x3c, - 0x14, 0x25, 0x21, 0x10, 0xe6, 0xf2, 0xcd, 0x8f, 0xac, 0xd1, 0xa7, 0x0c, 0x52, 0x13, 0x44, 0x37, - 0xac, 0x12, 0x29, 0x55, 0x17, 0x5d, 0x96, 0xce, 0x86, 0xcd, 0x4c, 0x64, 0x55, 0x5d, 0xb4, 0x0b, - 0x04, 0xb1, 0x0b, 0x16, 0x60, 0x5b, 0x8e, 0xdb, 0xd2, 0xb7, 0xe8, 0x5b, 0x75, 0x96, 0xb3, 0xec, - 0x2a, 0xaa, 0xc2, 0x8b, 0x54, 0x76, 0x42, 0x0b, 0xec, 0xee, 0xb9, 0xc7, 0xf7, 0xe7, 0x5c, 0x1f, - 0x18, 0x68, 0xca, 0x09, 0x55, 0x5b, 0xc6, 0x75, 0x22, 0x53, 0x99, 0xe8, 0xef, 0x92, 0x16, 0xb1, - 0x54, 0x42, 0x0b, 0x74, 0xf5, 0x8f, 0x8b, 0x65, 0x2a, 0x07, 0xb7, 0x4b, 0xb1, 0x14, 0x96, 0x4a, - 0x4c, 0x54, 0xbf, 0x1a, 0x65, 0x00, 0x0f, 0x54, 0xbf, 0x25, 0x44, 0xd1, 0xa2, 0x40, 0xff, 0x83, - 0xcb, 0x48, 0xe0, 0x0c, 0x9d, 0xf1, 0xc5, 0xa4, 0x5b, 0x95, 0x91, 0x3b, 0xbd, 0xc7, 0x2e, 0x23, - 0x36, 0x2f, 0x03, 0xf7, 0x28, 0x9f, 0x61, 0x97, 0x49, 0x84, 0xc0, 0x93, 0x42, 0xe9, 0xa0, 0x3d, - 0x74, 0xc6, 0x97, 0xd8, 0xc6, 0xa3, 0xf7, 0x70, 0x9d, 0x99, 0xd6, 0xb9, 0xd8, 0x7c, 0xa0, 0xaa, - 0x60, 0x82, 0xa3, 0x3b, 0x68, 0xcb, 0x54, 0xda, 0xbe, 0xde, 0xc4, 0xaf, 0xca, 0xa8, 0x9d, 0xa5, - 0x19, 0x36, 0x39, 0x74, 0x0b, 0x9d, 0xc5, 0x46, 0xe4, 0x6b, 0xdb, 0xdc, 0xc3, 0x35, 0x40, 0x37, - 0xd0, 0x9e, 0x4b, 0x69, 0xdb, 0x7a, 0xd8, 0x84, 0xa3, 0x9f, 0x2e, 0xf4, 0x1e, 0x04, 0xa1, 0x53, - 0xfe, 0x59, 0xa0, 0x0c, 0x6e, 0x64, 0x33, 0x62, 0xf6, 0xb5, 0x9e, 0x61, 0x9b, 0xf7, 0xd3, 0x28, - 0x3e, 0x55, 0x1d, 0x9f, 0xad, 0x32, 0xf1, 0x9e, 0xca, 0xa8, 0x85, 0xaf, 0xe5, 0xd9, 0x86, 0x2f, - 0xc1, 0xe7, 0x82, 0xd0, 0x19, 0x23, 0x8d, 0x4a, 0xa8, 0xca, 0xa8, 0x6b, 0x07, 0xde, 0xe3, 0xae, - 0xa1, 0xa6, 0x04, 0x45, 0xd0, 0xdf, 0xb0, 0x42, 0x53, 0x3e, 0x9b, 0x13, 0xa2, 0xec, 0x76, 0x17, - 0x18, 0xea, 0x94, 0xb9, 0x20, 0x0a, 0xc0, 0xe7, 0x54, 0x7f, 0x13, 0x6a, 0x1d, 0x78, 0x96, 0x3c, - 0x40, 0xc3, 0x1c, 0x16, 0xed, 0xd4, 0x4c, 0x03, 0xd1, 0x00, 0x7a, 0xf9, 0x6a, 0xce, 0x39, 0xdd, - 0x14, 0x41, 0x77, 0xe8, 0x8c, 0xff, 0xc3, 0x7f, 0xb1, 0xa9, 0xda, 0x0a, 0xce, 0xd6, 0x54, 0x05, - 0x7e, 0x5d, 0xd5, 0x40, 0xf4, 0x06, 0x3a, 0x42, 0xaf, 0xa8, 0x0a, 0x7a, 0x56, 0xf6, 0x8b, 0x73, - 0xd9, 0x87, 0x53, 0x3d, 0x9a, 0x47, 0x8d, 0xe8, 0xba, 0x62, 0xf4, 0x09, 0x2e, 0x4f, 0x58, 0x74, - 0x07, 0x3d, 0xbd, 0x9b, 0x31, 0x4e, 0xe8, 0xae, 0xfe, 0x7a, 0xec, 0xeb, 0xdd, 0xd4, 0x40, 0x94, - 0x40, 0x5f, 0xc9, 0xdc, 0xca, 0xa5, 0x45, 0xd1, 0x9c, 0xe6, 0xaa, 0x2a, 0x23, 0xc0, 0xd9, 0xbb, - 0xc6, 0x34, 0x18, 0x94, 0xcc, 0x9b, 0x78, 0xf2, 0xf8, 0x54, 0x85, 0xce, 0x73, 0x15, 0x3a, 0xbf, - 0xab, 0xd0, 0xf9, 0xb1, 0x0f, 0x5b, 0xcf, 0xfb, 0xb0, 0xf5, 0x6b, 0x1f, 0xb6, 0x3e, 0xbe, 0x5e, - 0x32, 0xbd, 0xfa, 0xb2, 0x88, 0x73, 0xb1, 0x4d, 0x8e, 0x5c, 0x7b, 0x6c, 0x60, 0xeb, 0xcd, 0x53, - 0x47, 0x2f, 0xba, 0x36, 0xfb, 0xea, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x32, 0x6b, 0x65, - 0xea, 0x02, 0x00, 0x00, -} - -func (m *NetAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetAddress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintTypes(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + // 610 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x4e, 0x1b, 0x3d, + 0x14, 0xcd, 0x24, 0x21, 0x09, 0x37, 0x84, 0xf0, 0x59, 0xe8, 0xd3, 0x10, 0xa9, 0x19, 0x14, 0x36, + 0xac, 0x26, 0x52, 0xaa, 0x2e, 0xba, 0x64, 0x40, 0xad, 0x22, 0x55, 0x25, 0x9a, 0xa2, 0x2e, 0xda, + 0xc5, 0x68, 0x32, 0x76, 0x82, 0xc5, 0xc4, 0xb6, 0x3c, 0x4e, 0x4b, 0xdf, 0x82, 0x37, 0xe9, 0x63, + 0x94, 0x25, 0xcb, 0xae, 0xd2, 0x6a, 0xd8, 0xf6, 0x21, 0x2a, 0xdb, 0x33, 0x40, 0xa2, 0x2e, 0xd8, + 0xf9, 0xdc, 0xe3, 0x73, 0xee, 0x8f, 0xad, 0x0b, 0x3d, 0x45, 0x18, 0x26, 0x72, 0x41, 0x99, 0x1a, + 0x8a, 0x91, 0x18, 0xaa, 0x6f, 0x82, 0x64, 0xbe, 0x90, 0x5c, 0x71, 0xb4, 0xfb, 0xc8, 0xf9, 0x62, + 0x24, 0x7a, 0xfb, 0x73, 0x3e, 0xe7, 0x86, 0x1a, 0xea, 0x93, 0xbd, 0xd5, 0xf3, 0xe6, 0x9c, 0xcf, + 0x53, 0x32, 0x34, 0x68, 0xba, 0x9c, 0x0d, 0x15, 0x5d, 0x90, 0x4c, 0xc5, 0x0b, 0x61, 0x2f, 0x0c, + 0x2e, 0xa0, 0x3b, 0xd1, 0x87, 0x84, 0xa7, 0x1f, 0x89, 0xcc, 0x28, 0x67, 0xe8, 0x00, 0x6a, 0x62, + 0x24, 0x5c, 0xe7, 0xd0, 0x39, 0xae, 0x07, 0xcd, 0x7c, 0xe5, 0xd5, 0x26, 0xa3, 0x49, 0xa8, 0x63, + 0x68, 0x1f, 0xb6, 0xa6, 0x29, 0x4f, 0xae, 0xdc, 0xaa, 0x26, 0x43, 0x0b, 0xd0, 0x1e, 0xd4, 0x62, + 0x21, 0xdc, 0x9a, 0x89, 0xe9, 0xe3, 0xe0, 0x47, 0x15, 0x5a, 0xef, 0x39, 0x26, 0x63, 0x36, 0xe3, + 0x68, 0x02, 0x7b, 0xa2, 0x48, 0x11, 0x7d, 0xb1, 0x39, 0x8c, 0x79, 0x7b, 0xe4, 0xf9, 0xeb, 0x4d, + 0xf8, 0x1b, 0xa5, 0x04, 0xf5, 0xdb, 0x95, 0x57, 0x09, 0xbb, 0x62, 0xa3, 0xc2, 0x23, 0x68, 0x32, + 0x8e, 0x49, 0x44, 0xb1, 0x29, 0x64, 0x3b, 0x80, 0x7c, 0xe5, 0x35, 0x4c, 0xc2, 0xb3, 0xb0, 0xa1, + 0xa9, 0x31, 0x46, 0x1e, 0xb4, 0x53, 0x9a, 0x29, 0xc2, 0xa2, 0x18, 0x63, 0x69, 0xaa, 0xdb, 0x0e, + 0xc1, 0x86, 0x4e, 0x30, 0x96, 0xc8, 0x85, 0x26, 0x23, 0xea, 0x2b, 0x97, 0x57, 0x6e, 0xdd, 0x90, + 0x25, 0xd4, 0x4c, 0x59, 0xe8, 0x96, 0x65, 0x0a, 0x88, 0x7a, 0xd0, 0x4a, 0x2e, 0x63, 0xc6, 0x48, + 0x9a, 0xb9, 0x8d, 0x43, 0xe7, 0x78, 0x27, 0x7c, 0xc0, 0x5a, 0xb5, 0xe0, 0x8c, 0x5e, 0x11, 0xe9, + 0x36, 0xad, 0xaa, 0x80, 0xe8, 0x35, 0x6c, 0x71, 0x75, 0x49, 0xa4, 0xdb, 0x32, 0x6d, 0xbf, 0xd8, + 0x6c, 0xbb, 0x1c, 0xd5, 0xb9, 0xbe, 0x54, 0x34, 0x6d, 0x15, 0x83, 0xcf, 0xd0, 0x59, 0x63, 0xd1, + 0x01, 0xb4, 0xd4, 0x75, 0x44, 0x19, 0x26, 0xd7, 0x66, 0x8a, 0xdb, 0x61, 0x53, 0x5d, 0x8f, 0x35, + 0x44, 0x43, 0x68, 0x4b, 0x91, 0x98, 0x76, 0x49, 0x96, 0x15, 0xa3, 0xd9, 0xcd, 0x57, 0x1e, 0x84, + 0x93, 0xd3, 0x13, 0x1b, 0x0d, 0x41, 0x8a, 0xa4, 0x38, 0x0f, 0xbe, 0x3b, 0xd0, 0x9a, 0x10, 0x22, + 0xcd, 0x33, 0xfd, 0x0f, 0x55, 0x8a, 0xad, 0x65, 0xd0, 0xc8, 0x57, 0x5e, 0x75, 0x7c, 0x16, 0x56, + 0x29, 0x46, 0x01, 0xec, 0x14, 0x8e, 0x11, 0x65, 0x33, 0xee, 0x56, 0x0f, 0x6b, 0xff, 0x7c, 0x3a, + 0x42, 0x64, 0xe1, 0xab, 0xed, 0xc2, 0x76, 0xfc, 0x08, 0xd0, 0x5b, 0xd8, 0x4d, 0xe3, 0x4c, 0x45, + 0x09, 0x67, 0x8c, 0x24, 0x8a, 0x60, 0xf3, 0x1c, 0xed, 0x51, 0xcf, 0xb7, 0xff, 0xd3, 0x2f, 0xff, + 0xa7, 0x7f, 0x51, 0xfe, 0xcf, 0xa0, 0x7e, 0xf3, 0xcb, 0x73, 0xc2, 0x8e, 0xd6, 0x9d, 0x96, 0xb2, + 0xc1, 0x1f, 0x07, 0xba, 0x1b, 0x99, 0xf4, 0xdc, 0xcb, 0x96, 0x8b, 0x81, 0x14, 0x10, 0xbd, 0x83, + 0xff, 0x4c, 0x5a, 0x4c, 0xe3, 0x34, 0xca, 0x96, 0x49, 0x52, 0x8e, 0xe5, 0x39, 0x99, 0xbb, 0x5a, + 0x7a, 0x46, 0xe3, 0xf4, 0x83, 0x15, 0xae, 0xbb, 0xcd, 0x62, 0x9a, 0x2e, 0x25, 0x79, 0x76, 0x1f, + 0x0f, 0x6e, 0x6f, 0xac, 0x10, 0x1d, 0x41, 0xe7, 0xa9, 0x51, 0x66, 0xfe, 0x60, 0x27, 0xdc, 0xc1, + 0x8f, 0x77, 0xb2, 0xe0, 0xfc, 0x36, 0xef, 0x3b, 0x77, 0x79, 0xdf, 0xf9, 0x9d, 0xf7, 0x9d, 0x9b, + 0xfb, 0x7e, 0xe5, 0xee, 0xbe, 0x5f, 0xf9, 0x79, 0xdf, 0xaf, 0x7c, 0x7a, 0x35, 0xa7, 0xea, 0x72, + 0x39, 0xf5, 0x13, 0xbe, 0x18, 0x3e, 0xd9, 0x12, 0x4f, 0x17, 0x86, 0xd9, 0x05, 0xeb, 0x1b, 0x64, + 0xda, 0x30, 0xd1, 0x97, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0b, 0xe9, 0x56, 0xd3, 0x5a, 0x04, + 0x00, 0x00, } func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) { @@ -540,6 +580,115 @@ func (m *NodeInfoOther) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PeerInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PeerInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastConnected != nil { + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintTypes(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x1a + } + if len(m.AddressInfo) > 0 { + for iNdEx := len(m.AddressInfo) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AddressInfo[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PeerAddressInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PeerAddressInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PeerAddressInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DialFailures != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.DialFailures)) + i-- + dAtA[i] = 0x20 + } + if m.LastDialFailure != nil { + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastDialFailure, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialFailure):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintTypes(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x1a + } + if m.LastDialSuccess != nil { + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastDialSuccess, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialSuccess):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintTypes(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -551,26 +700,6 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *NetAddress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.Port != 0 { - n += 1 + sovTypes(uint64(m.Port)) - } - return n -} - func (m *ProtocolVersion) Size() (n int) { if m == nil { return 0 @@ -643,145 +772,59 @@ func (m *NodeInfoOther) Size() (n int) { return n } +func (m *PeerInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.AddressInfo) > 0 { + for _, e := range m.AddressInfo { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.LastConnected != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected) + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PeerAddressInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastDialSuccess != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialSuccess) + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastDialFailure != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialFailure) + n += 1 + l + sovTypes(uint64(l)) + } + if m.DialFailures != 0 { + n += 1 + sovTypes(uint64(m.DialFailures)) + } + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *NetAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1313,6 +1356,331 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { } return nil } +func (m *PeerInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PeerInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PeerInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressInfo = append(m.AddressInfo, &PeerAddressInfo{}) + if err := m.AddressInfo[len(m.AddressInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastConnected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastConnected == nil { + m.LastConnected = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastConnected, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PeerAddressInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PeerAddressInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastDialSuccess", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastDialSuccess == nil { + m.LastDialSuccess = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastDialSuccess, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastDialFailure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastDialFailure == nil { + m.LastDialFailure = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastDialFailure, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DialFailures", wireType) + } + m.DialFailures = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DialFailures |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index 87f50e81b..216a6d8d0 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -4,12 +4,7 @@ package tendermint.p2p; option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; import "gogoproto/gogo.proto"; - -message NetAddress { - string id = 1 [(gogoproto.customname) = "ID"]; - string ip = 2 [(gogoproto.customname) = "IP"]; - uint32 port = 3; -} +import "google/protobuf/timestamp.proto"; message ProtocolVersion { uint64 p2p = 1 [(gogoproto.customname) = "P2P"]; @@ -32,3 +27,16 @@ message NodeInfoOther { string tx_index = 1; string rpc_address = 2 [(gogoproto.customname) = "RPCAddress"]; } + +message PeerInfo { + string id = 1 [(gogoproto.customname) = "ID"]; + repeated PeerAddressInfo address_info = 2; + google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true]; +} + +message PeerAddressInfo { + string address = 1; + google.protobuf.Timestamp last_dial_success = 2 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_dial_failure = 3 [(gogoproto.stdtime) = true]; + uint32 dial_failures = 4; +} diff --git a/state/execution.go b/state/execution.go index b8054fde2..3450b60dc 100644 --- a/state/execution.go +++ b/state/execution.go @@ -238,7 +238,7 @@ func (blockExec *BlockExecutor) Commit( "Committed state", "height", block.Height, "txs", len(block.Txs), - "appHash", fmt.Sprintf("%X", res.Data), + "appHash", res.Data, ) // Update mempool. diff --git a/statesync/syncer.go b/statesync/syncer.go index f87129c87..425705cb1 100644 --- a/statesync/syncer.go +++ b/statesync/syncer.go @@ -114,7 +114,7 @@ func (s *syncer) AddSnapshot(peerID p2p.NodeID, snapshot *snapshot) (bool, error } if added { s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, - "hash", fmt.Sprintf("%X", snapshot.Hash)) + "hash", snapshot.Hash) } return added, nil } @@ -184,18 +184,18 @@ func (s *syncer) SyncAny(discoveryTime time.Duration) (sm.State, *types.Commit, case errors.Is(err, errRetrySnapshot): chunks.RetryAll() s.logger.Info("Retrying snapshot", "height", snapshot.Height, "format", snapshot.Format, - "hash", fmt.Sprintf("%X", snapshot.Hash)) + "hash", snapshot.Hash) continue case errors.Is(err, errTimeout): s.snapshots.Reject(snapshot) s.logger.Error("Timed out waiting for snapshot chunks, rejected snapshot", - "height", snapshot.Height, "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) case errors.Is(err, errRejectSnapshot): s.snapshots.Reject(snapshot) s.logger.Info("Snapshot rejected", "height", snapshot.Height, "format", snapshot.Format, - "hash", fmt.Sprintf("%X", snapshot.Hash)) + "hash", snapshot.Hash) case errors.Is(err, errRejectFormat): s.snapshots.RejectFormat(snapshot.Format) @@ -203,7 +203,7 @@ func (s *syncer) SyncAny(discoveryTime time.Duration) (sm.State, *types.Commit, case errors.Is(err, errRejectSender): s.logger.Info("Snapshot senders rejected", "height", snapshot.Height, "format", snapshot.Format, - "hash", fmt.Sprintf("%X", snapshot.Hash)) + "hash", snapshot.Hash) for _, peer := range s.snapshots.GetPeers(snapshot) { s.snapshots.RejectPeer(peer) s.logger.Info("Snapshot sender rejected", "peer", peer) @@ -280,7 +280,7 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. // Done! 🎉 s.logger.Info("Snapshot restored", "height", snapshot.Height, "format", snapshot.Format, - "hash", fmt.Sprintf("%X", snapshot.Hash)) + "hash", snapshot.Hash) return state, commit, nil } @@ -289,7 +289,7 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. // response, or nil if the snapshot was accepted. func (s *syncer) offerSnapshot(snapshot *snapshot) error { s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, - "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + "format", snapshot.Format, "hash", snapshot.Hash) resp, err := s.conn.OfferSnapshotSync(context.Background(), abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: snapshot.Height, @@ -306,7 +306,7 @@ func (s *syncer) offerSnapshot(snapshot *snapshot) error { switch resp.Result { case abci.ResponseOfferSnapshot_ACCEPT: s.logger.Info("Snapshot accepted, restoring", "height", snapshot.Height, - "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + "format", snapshot.Format, "hash", snapshot.Hash) return nil case abci.ResponseOfferSnapshot_ABORT: return errAbort @@ -453,8 +453,8 @@ func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) { s.logger.Error("appHash verification failed", - "expected", fmt.Sprintf("%X", snapshot.trustedAppHash), - "actual", fmt.Sprintf("%X", resp.LastBlockAppHash)) + "expected", snapshot.trustedAppHash, + "actual", resp.LastBlockAppHash) return 0, errVerifyFailed } @@ -467,6 +467,6 @@ func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { return 0, errVerifyFailed } - s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", fmt.Sprintf("%X", snapshot.trustedAppHash)) + s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", snapshot.trustedAppHash) return resp.AppVersion, nil } diff --git a/test/README.md b/test/README.md index 0e0d666e5..230b7339c 100644 --- a/test/README.md +++ b/test/README.md @@ -14,3 +14,9 @@ and run the following tests in docker containers: - counter app over grpc - persistence tests - crash tendermint at each of many predefined points, restart, and ensure it syncs properly with the app + +## Fuzzing + +[Fuzzing](https://en.wikipedia.org/wiki/Fuzzing) of various system inputs. + +See `./fuzz/README.md` for more details. diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 492e362d8..c8c0d5c3c 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -50,6 +50,10 @@ type Manifest struct { // KeyType sets the curve that will be used by validators. // Options are ed25519 & secp256k1 KeyType string `toml:"key_type"` + + // LogLevel sets the log level of the entire testnet. This can be overridden + // by individual nodes. + LogLevel string `toml:"log_level"` } // ManifestNode represents a node in a testnet manifest. @@ -130,6 +134,11 @@ type ManifestNode struct { // For more information, look at the readme in the maverick folder. // A list of all behaviors can be found in ../maverick/consensus/behavior.go Misbehaviors map[string]string `toml:"misbehaviors"` + + // Log level sets the log level of the specific node i.e. "consensus:info,*:error". + // This is helpful when debugging a specific problem. This overrides the network + // level. + LogLevel string `toml:"log_level"` } // Save saves the testnet manifest to a file. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index da0890b00..5044ef39d 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -60,6 +60,7 @@ type Testnet struct { ValidatorUpdates map[int64]map[*Node]int64 Nodes []*Node KeyType string + LogLevel string } // Node represents a Tendermint node in a testnet. @@ -84,6 +85,7 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation Misbehaviors map[int64]string + LogLevel string } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -123,6 +125,7 @@ func LoadTestnet(file string) (*Testnet, error) { ValidatorUpdates: map[int64]map[*Node]int64{}, Nodes: []*Node{}, KeyType: "ed25519", + LogLevel: manifest.LogLevel, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -159,6 +162,7 @@ func LoadTestnet(file string) (*Testnet, error) { RetainBlocks: nodeManifest.RetainBlocks, Perturbations: []Perturbation{}, Misbehaviors: make(map[int64]string), + LogLevel: manifest.LogLevel, } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this @@ -188,6 +192,9 @@ func LoadTestnet(file string) (*Testnet, error) { } node.Misbehaviors[height] = misbehavior } + if nodeManifest.LogLevel != "" { + node.LogLevel = nodeManifest.LogLevel + } testnet.Nodes = append(testnet.Nodes, node) } diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index d55fd95f2..bc3102bb0 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -116,6 +116,11 @@ func NewCLI() *CLI { cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, "Preserves the running of the test net after tests are completed") + cli.root.SetHelpCommand(&cobra.Command{ + Use: "no-help", + Hidden: true, + }) + cli.root.AddCommand(&cobra.Command{ Use: "setup", Short: "Generates the testnet directory and configuration", @@ -189,17 +194,26 @@ func NewCLI() *CLI { }) cli.root.AddCommand(&cobra.Command{ - Use: "logs", - Short: "Shows the testnet logs", + Use: "logs [node]", + Short: "Shows the testnet or a specefic node's logs", + Example: "runner logs valiator03", + Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + return execComposeVerbose(cli.testnet.Dir, "logs", args[0]) + } return execComposeVerbose(cli.testnet.Dir, "logs") }, }) cli.root.AddCommand(&cobra.Command{ - Use: "tail", - Short: "Tails the testnet logs", + Use: "tail [node]", + Short: "Tails the testnet or a specific node's logs", + Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + return execComposeVerbose(cli.testnet.Dir, "logs", "--follow", args[0]) + } return execComposeVerbose(cli.testnet.Dir, "logs", "--follow") }, }) diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index ceb7f29d9..1b2d8e36a 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -122,17 +122,24 @@ func Setup(testnet *e2e.Testnet) error { func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { // Must use version 2 Docker Compose format, to support IPv6. tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ - "misbehaviorsToString": func(misbehaviors map[int64]string) string { - str := "" + "startCommands": func(misbehaviors map[int64]string, logLevel string) string { + command := "start" + misbehaviorString := "" for height, misbehavior := range misbehaviors { // after the first behavior set, a comma must be prepended - if str != "" { - str += "," + if misbehaviorString != "" { + misbehaviorString += "," } heightString := strconv.Itoa(int(height)) - str += misbehavior + "," + heightString + misbehaviorString += misbehavior + "," + heightString } - return str + if misbehaviorString != "" { + command += " --misbehaviors " + misbehaviorString + } + if logLevel != "" && logLevel != config.DefaultPackageLogLevels() { + command += " --log-level " + logLevel + } + return command }, }).Parse(`version: '2.4' @@ -160,7 +167,9 @@ services: entrypoint: /usr/bin/entrypoint-builtin {{- else if .Misbehaviors }} entrypoint: /usr/bin/entrypoint-maverick - command: ["start", "--misbehaviors", "{{ misbehaviorsToString .Misbehaviors }}"] +{{- end }} +{{- if ne .ABCIProtocol "builtin"}} + command: {{ startCommands .Misbehaviors .LogLevel }} {{- end }} init: true ports: @@ -227,6 +236,9 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg := config.DefaultConfig() cfg.Moniker = node.Name cfg.ProxyApp = AppAddressTCP + if node.LogLevel != "" { + cfg.LogLevel = node.LogLevel + } cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) cfg.P2P.AddrBookStrict = false diff --git a/test/fuzz/Makefile b/test/fuzz/Makefile new file mode 100644 index 000000000..96b332dcf --- /dev/null +++ b/test/fuzz/Makefile @@ -0,0 +1,39 @@ +#!/usr/bin/make -f + +.PHONY: fuzz-mempool +fuzz-mempool: + cd mempool && \ + rm -f *-fuzz.zip && \ + go-fuzz-build && \ + go-fuzz + +.PHONY: fuzz-p2p-addrbook +fuzz-p2p-addrbook: + cd p2p/addrbook && \ + rm -f *-fuzz.zip && \ + go run ./init-corpus/main.go && \ + go-fuzz-build && \ + go-fuzz + +.PHONY: fuzz-p2p-pex +fuzz-p2p-pex: + cd p2p/pex && \ + rm -f *-fuzz.zip && \ + go run ./init-corpus/main.go && \ + go-fuzz-build && \ + go-fuzz + +.PHONY: fuzz-p2p-sc +fuzz-p2p-sc: + cd p2p/secret_connection && \ + rm -f *-fuzz.zip && \ + go run ./init-corpus/main.go && \ + go-fuzz-build && \ + go-fuzz + +.PHONY: fuzz-rpc-server +fuzz-rpc-server: + cd rpc/jsonrpc/server && \ + rm -f *-fuzz.zip && \ + go-fuzz-build && \ + go-fuzz diff --git a/test/fuzz/README.md b/test/fuzz/README.md new file mode 100644 index 000000000..707217afd --- /dev/null +++ b/test/fuzz/README.md @@ -0,0 +1,72 @@ +# fuzz + +Fuzzing for various packages in Tendermint using [go-fuzz](https://github.com/dvyukov/go-fuzz) library. + +Inputs: + +- mempool `CheckTx` (using kvstore in-process ABCI app) +- p2p `Addrbook#AddAddress` +- p2p `pex.Reactor#Receive` +- p2p `SecretConnection#Read` and `SecretConnection#Write` +- rpc jsonrpc server + +## Directory structure + +``` +| test +| |- corpus/ +| |- crashers/ +| |- init-corpus/ +| |- suppressions/ +| |- testdata/ +| |- .go +``` + +`/corpus` directory contains corpus data. The idea is to help the fuzzier to +understand what bytes sequences are semantically valid (e.g. if we're testing +PNG decoder, then we would put black-white PNG into corpus directory; with +blockchain reactor - we would put blockchain messages into corpus). + +`/init-corpus` (if present) contains a script for generating corpus data. + +`/testdata` directory may contain an additional data (like `addrbook.json`). + +Upon running the fuzzier, `/crashers` and `/suppressions` dirs will be created, +along with .zip archive. `/crashers` will show any inputs, which have +lead to panics (plus a trace). `/suppressions` will show any suppressed inputs. + +## Running + +```sh +make fuzz-mempool +make fuzz-p2p-addrbook +make fuzz-p2p-pex +make fuzz-p2p-sc +make fuzz-rpc-server +``` + +Each command will create corpus data (if needed), generate a fuzz archive and +call `go-fuzz` executable. + +Then watch out for the respective outputs in the fuzzer output to announce new +crashers which can be found in the directory `crashers`. + +For example if we find + +```sh +ls crashers/ +61bde465f47c93254d64d643c3b2480e0a54666e +61bde465f47c93254d64d643c3b2480e0a54666e.output +61bde465f47c93254d64d643c3b2480e0a54666e.quoted +da39a3ee5e6b4b0d3255bfef95601890afd80709 +da39a3ee5e6b4b0d3255bfef95601890afd80709.output +da39a3ee5e6b4b0d3255bfef95601890afd80709.quoted +``` + +the crashing bytes generated by the fuzzer will be in +`61bde465f47c93254d64d643c3b2480e0a54666e` the respective crash report in +`61bde465f47c93254d64d643c3b2480e0a54666e.output` + +and the bug report can be created by retrieving the bytes in +`61bde465f47c93254d64d643c3b2480e0a54666e` and feeding those back into the +`Fuzz` function. diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go new file mode 100644 index 000000000..3193b169d --- /dev/null +++ b/test/fuzz/mempool/checktx.go @@ -0,0 +1,34 @@ +package checktx + +import ( + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/config" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" +) + +var mempool mempl.Mempool + +func init() { + app := kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(app) + appConnMem, _ := cc.NewABCIClient() + err := appConnMem.Start() + if err != nil { + panic(err) + } + + cfg := config.DefaultMempoolConfig() + cfg.Broadcast = false + + mempool = mempl.NewCListMempool(cfg, appConnMem, 0) +} + +func Fuzz(data []byte) int { + err := mempool.CheckTx(data, nil, mempl.TxInfo{}) + if err != nil { + return 0 + } + + return 1 +} diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go new file mode 100644 index 000000000..f2799ef04 --- /dev/null +++ b/test/fuzz/p2p/addrbook/fuzz.go @@ -0,0 +1,35 @@ +// nolint: gosec +package addr + +import ( + "encoding/json" + "fmt" + "math/rand" + + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/pex" +) + +var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true) + +func Fuzz(data []byte) int { + addr := new(p2p.NetAddress) + if err := json.Unmarshal(data, addr); err != nil { + return -1 + } + + // Fuzz AddAddress. + err := addrBook.AddAddress(addr, addr) + if err != nil { + return 0 + } + + // Also, make sure PickAddress always returns a non-nil address. + bias := rand.Intn(100) + if p := addrBook.PickAddress(bias); p == nil { + panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", + bias, addrBook.Size())) + } + + return 1 +} diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go new file mode 100644 index 000000000..ff9dd9107 --- /dev/null +++ b/test/fuzz/p2p/addrbook/init-corpus/main.go @@ -0,0 +1,58 @@ +// nolint: gosec +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "path/filepath" + + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/p2p" +) + +func main() { + baseDir := flag.String("base", ".", `where the "corpus" directory will live`) + flag.Parse() + + initCorpus(*baseDir) +} + +func initCorpus(baseDir string) { + log.SetFlags(0) + + // create "corpus" directory + corpusDir := filepath.Join(baseDir, "corpus") + if err := os.MkdirAll(corpusDir, 0755); err != nil { + log.Fatalf("Creating %q err: %v", corpusDir, err) + } + + // create corpus + privKey := ed25519.GenPrivKey() + addrs := []*p2p.NetAddress{ + {ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0}, + {ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80}, + {ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808}, + {ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656}, + {ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656}, + } + + for i, addr := range addrs { + filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i)) + + bz, err := json.Marshal(addr) + if err != nil { + log.Fatalf("can't marshal %v: %v", addr, err) + } + + if err := ioutil.WriteFile(filename, bz, 0644); err != nil { + log.Fatalf("can't write %v to %q: %v", addr, filename, err) + } + + log.Printf("wrote %q", filename) + } +} diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go new file mode 100644 index 000000000..7d574bd5a --- /dev/null +++ b/test/fuzz/p2p/pex/init-corpus/main.go @@ -0,0 +1,82 @@ +// nolint: gosec +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "os" + "path/filepath" + + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/p2p" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" +) + +func main() { + baseDir := flag.String("base", ".", `where the "corpus" directory will live`) + flag.Parse() + + initCorpus(*baseDir) +} + +func initCorpus(rootDir string) { + log.SetFlags(0) + + corpusDir := filepath.Join(rootDir, "corpus") + if err := os.MkdirAll(corpusDir, 0755); err != nil { + log.Fatalf("Creating %q err: %v", corpusDir, err) + } + sizes := []int{0, 1, 2, 17, 5, 31} + + // Make the PRNG predictable + rand.Seed(10) + + for _, n := range sizes { + var addrs []*p2p.NetAddress + + // IPv4 addresses + for i := 0; i < n; i++ { + privKey := ed25519.GenPrivKey() + addr := fmt.Sprintf( + "%s@%v.%v.%v.%v:26656", + p2p.NodeIDFromPubKey(privKey.PubKey()), + rand.Int()%256, + rand.Int()%256, + rand.Int()%256, + rand.Int()%256, + ) + netAddr, _ := p2p.NewNetAddressString(addr) + addrs = append(addrs, netAddr) + } + + // IPv6 addresses + privKey := ed25519.GenPrivKey() + ipv6a, err := p2p.NewNetAddressString( + fmt.Sprintf("%s@[ff02::1:114]:26656", p2p.NodeIDFromPubKey(privKey.PubKey()))) + if err != nil { + log.Fatalf("can't create a new netaddress: %v", err) + } + addrs = append(addrs, ipv6a) + + msg := tmp2p.PexMessage{ + Sum: &tmp2p.PexMessage_PexResponse{ + PexResponse: &tmp2p.PexResponse{Addresses: p2p.NetAddressesToProto(addrs)}, + }, + } + bz, err := msg.Marshal() + if err != nil { + log.Fatalf("unable to marshal: %v", err) + } + + filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) + + if err := ioutil.WriteFile(filename, bz, 0644); err != nil { + log.Fatalf("can't write %X to %q: %v", bz, filename, err) + } + + log.Printf("wrote %q", filename) + } +} diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go new file mode 100644 index 000000000..37853810f --- /dev/null +++ b/test/fuzz/p2p/pex/reactor_receive.go @@ -0,0 +1,86 @@ +package pex + +import ( + "net" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/pex" + "github.com/tendermint/tendermint/version" +) + +var ( + pexR *pex.Reactor + peer p2p.Peer +) + +func init() { + addrB := pex.NewAddrBook("./testdata/addrbook1", false) + pexR := pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false}) + if pexR == nil { + panic("NewReactor returned nil") + } + pexR.SetLogger(log.NewNopLogger()) + peer := newFuzzPeer() + pexR.AddPeer(peer) + +} + +func Fuzz(data []byte) int { + // MakeSwitch uses log.TestingLogger which can't be executed in init() + cfg := config.DefaultP2PConfig() + cfg.PexReactor = true + sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { + return sw + }) + pexR.SetSwitch(sw) + + pexR.Receive(pex.PexChannel, peer, data) + return 1 +} + +type fuzzPeer struct { + *service.BaseService + m map[string]interface{} +} + +var _ p2p.Peer = (*fuzzPeer)(nil) + +func newFuzzPeer() *fuzzPeer { + fp := &fuzzPeer{m: make(map[string]interface{})} + fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp) + return fp +} + +var privKey = ed25519.GenPrivKey() +var nodeID = p2p.NodeIDFromPubKey(privKey.PubKey()) +var defaultNodeInfo = p2p.NodeInfo{ + ProtocolVersion: p2p.NewProtocolVersion( + version.P2PProtocol, + version.BlockProtocol, + 0, + ), + NodeID: nodeID, + ListenAddr: "0.0.0.0:98992", + Moniker: "foo1", +} + +func (fp *fuzzPeer) FlushStop() {} +func (fp *fuzzPeer) ID() p2p.NodeID { return nodeID } +func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(0, 0, 0, 0) } +func (fp *fuzzPeer) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: fp.RemoteIP(), Port: 98991, Zone: ""} +} +func (fp *fuzzPeer) IsOutbound() bool { return false } +func (fp *fuzzPeer) IsPersistent() bool { return false } +func (fp *fuzzPeer) CloseConn() error { return nil } +func (fp *fuzzPeer) NodeInfo() p2p.NodeInfo { return defaultNodeInfo } +func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } +func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { return p2p.NewNetAddress(fp.ID(), fp.RemoteAddr()) } +func (fp *fuzzPeer) Send(byte, []byte) bool { return true } +func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } +func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } +func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } diff --git a/test/fuzz/p2p/pex/testdata/addrbook1 b/test/fuzz/p2p/pex/testdata/addrbook1 new file mode 100644 index 000000000..acf3e721d --- /dev/null +++ b/test/fuzz/p2p/pex/testdata/addrbook1 @@ -0,0 +1,1705 @@ +{ + "Key": "badd73ebd4eeafbaefc01e0c", + "Addrs": [ + { + "Addr": { + "IP": "233.174.138.192", + "Port": 48186 + }, + "Src": { + "IP": "198.37.90.115", + "Port": 29492 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692278-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 55 + ] + }, + { + "Addr": { + "IP": "181.28.96.104", + "Port": 26776 + }, + "Src": { + "IP": "183.12.35.241", + "Port": 26794 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692289-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 18 + ] + }, + { + "Addr": { + "IP": "141.85.194.118", + "Port": 39768 + }, + "Src": { + "IP": "120.130.90.63", + "Port": 61750 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692383-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 185 + ] + }, + { + "Addr": { + "IP": "167.72.9.155", + "Port": 9542 + }, + "Src": { + "IP": "95.158.40.108", + "Port": 14929 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692604-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 250 + ] + }, + { + "Addr": { + "IP": "124.118.94.27", + "Port": 50333 + }, + "Src": { + "IP": "208.169.57.96", + "Port": 19754 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692046-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 0 + ] + }, + { + "Addr": { + "IP": "158.197.4.226", + "Port": 25979 + }, + "Src": { + "IP": "3.129.219.107", + "Port": 50374 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692211-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 174 + ] + }, + { + "Addr": { + "IP": "170.42.135.37", + "Port": 34524 + }, + "Src": { + "IP": "73.125.53.212", + "Port": 49691 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692241-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 14 + ] + }, + { + "Addr": { + "IP": "234.69.254.147", + "Port": 31885 + }, + "Src": { + "IP": "167.106.61.34", + "Port": 22187 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692609-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 213 + ] + }, + { + "Addr": { + "IP": "32.176.173.90", + "Port": 17250 + }, + "Src": { + "IP": "118.91.243.12", + "Port": 26781 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692273-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 35 + ] + }, + { + "Addr": { + "IP": "162.154.114.145", + "Port": 13875 + }, + "Src": { + "IP": "198.178.108.166", + "Port": 59623 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692373-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 216 + ] + }, + { + "Addr": { + "IP": "67.128.167.93", + "Port": 50513 + }, + "Src": { + "IP": "104.93.115.28", + "Port": 48298 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692399-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 18 + ] + }, + { + "Addr": { + "IP": "132.175.221.206", + "Port": 61037 + }, + "Src": { + "IP": "112.49.189.65", + "Port": 56186 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692422-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 31 + ] + }, + { + "Addr": { + "IP": "155.49.24.238", + "Port": 26261 + }, + "Src": { + "IP": "97.10.121.246", + "Port": 8694 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692473-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 23 + ] + }, + { + "Addr": { + "IP": "22.215.7.233", + "Port": 32487 + }, + "Src": { + "IP": "214.236.105.23", + "Port": 26870 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692572-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 255 + ] + }, + { + "Addr": { + "IP": "253.170.228.231", + "Port": 5002 + }, + "Src": { + "IP": "225.49.137.209", + "Port": 16908 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692619-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 161 + ] + }, + { + "Addr": { + "IP": "162.126.204.39", + "Port": 62618 + }, + "Src": { + "IP": "250.214.168.131", + "Port": 3237 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.69203-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 7 + ] + }, + { + "Addr": { + "IP": "83.154.228.215", + "Port": 23508 + }, + "Src": { + "IP": "66.33.77.170", + "Port": 52207 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692153-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 8 + ] + }, + { + "Addr": { + "IP": "132.49.63.65", + "Port": 53651 + }, + "Src": { + "IP": "250.164.163.212", + "Port": 8612 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692253-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 98 + ] + }, + { + "Addr": { + "IP": "200.168.34.12", + "Port": 61901 + }, + "Src": { + "IP": "133.185.186.115", + "Port": 14186 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692488-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 214 + ] + }, + { + "Addr": { + "IP": "31.93.45.219", + "Port": 61036 + }, + "Src": { + "IP": "176.191.214.170", + "Port": 33402 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692024-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 68 + ] + }, + { + "Addr": { + "IP": "250.189.27.93", + "Port": 51665 + }, + "Src": { + "IP": "93.161.116.107", + "Port": 53482 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692196-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 42 + ] + }, + { + "Addr": { + "IP": "50.7.17.126", + "Port": 64300 + }, + "Src": { + "IP": "233.234.64.214", + "Port": 61061 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692444-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 13 + ] + }, + { + "Addr": { + "IP": "88.85.81.64", + "Port": 34834 + }, + "Src": { + "IP": "4.240.150.250", + "Port": 63064 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692248-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 195 + ] + }, + { + "Addr": { + "IP": "242.117.244.198", + "Port": 4363 + }, + "Src": { + "IP": "149.29.34.42", + "Port": 62567 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692263-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 174 + ] + }, + { + "Addr": { + "IP": "245.155.175.114", + "Port": 37262 + }, + "Src": { + "IP": "75.85.36.49", + "Port": 7101 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692313-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 135 + ] + }, + { + "Addr": { + "IP": "224.184.241.26", + "Port": 55870 + }, + "Src": { + "IP": "52.15.194.216", + "Port": 4733 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692327-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 74 + ] + }, + { + "Addr": { + "IP": "43.178.26.188", + "Port": 55914 + }, + "Src": { + "IP": "103.250.250.35", + "Port": 1566 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692577-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 65 + ] + }, + { + "Addr": { + "IP": "102.117.172.117", + "Port": 35855 + }, + "Src": { + "IP": "114.152.204.187", + "Port": 21156 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692158-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 80 + ] + }, + { + "Addr": { + "IP": "39.33.41.199", + "Port": 51600 + }, + "Src": { + "IP": "119.65.88.38", + "Port": 41239 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692188-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 24 + ] + }, + { + "Addr": { + "IP": "63.164.56.227", + "Port": 1660 + }, + "Src": { + "IP": "169.54.47.92", + "Port": 2818 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692227-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 10 + ] + }, + { + "Addr": { + "IP": "50.183.223.115", + "Port": 26910 + }, + "Src": { + "IP": "115.98.199.4", + "Port": 8767 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692201-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 65 + ] + }, + { + "Addr": { + "IP": "132.94.203.167", + "Port": 53156 + }, + "Src": { + "IP": "17.195.234.168", + "Port": 29405 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692294-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 240 + ] + }, + { + "Addr": { + "IP": "135.194.230.212", + "Port": 14340 + }, + "Src": { + "IP": "160.2.241.10", + "Port": 36553 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692363-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 63 + ] + }, + { + "Addr": { + "IP": "116.53.200.25", + "Port": 27092 + }, + "Src": { + "IP": "219.104.163.247", + "Port": 50476 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692543-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 201 + ] + }, + { + "Addr": { + "IP": "125.77.44.185", + "Port": 55291 + }, + "Src": { + "IP": "77.15.232.117", + "Port": 6934 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692589-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 8 + ] + }, + { + "Addr": { + "IP": "27.221.35.172", + "Port": 26418 + }, + "Src": { + "IP": "252.18.49.70", + "Port": 9835 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692068-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 80 + ] + }, + { + "Addr": { + "IP": "133.225.167.135", + "Port": 59468 + }, + "Src": { + "IP": "110.223.163.74", + "Port": 22576 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.69213-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 164 + ] + }, + { + "Addr": { + "IP": "155.131.178.240", + "Port": 60476 + }, + "Src": { + "IP": "143.82.157.1", + "Port": 43821 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692173-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 34 + ] + }, + { + "Addr": { + "IP": "207.13.48.52", + "Port": 28549 + }, + "Src": { + "IP": "238.224.177.29", + "Port": 44100 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692594-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 113 + ] + }, + { + "Addr": { + "IP": "91.137.2.184", + "Port": 44887 + }, + "Src": { + "IP": "72.131.70.84", + "Port": 29960 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692627-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 199 + ] + }, + { + "Addr": { + "IP": "169.59.252.76", + "Port": 57711 + }, + "Src": { + "IP": "194.132.91.119", + "Port": 18037 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692478-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 124 + ] + }, + { + "Addr": { + "IP": "25.174.143.229", + "Port": 41540 + }, + "Src": { + "IP": "58.215.132.148", + "Port": 64950 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692534-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 88 + ] + }, + { + "Addr": { + "IP": "71.239.78.239", + "Port": 46938 + }, + "Src": { + "IP": "156.98.186.169", + "Port": 32046 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692116-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 154 + ] + }, + { + "Addr": { + "IP": "94.137.107.61", + "Port": 20756 + }, + "Src": { + "IP": "101.201.138.179", + "Port": 22877 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692414-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 233 + ] + }, + { + "Addr": { + "IP": "216.62.174.112", + "Port": 60162 + }, + "Src": { + "IP": "225.114.119.144", + "Port": 1575 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692464-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 132 + ] + }, + { + "Addr": { + "IP": "65.183.81.125", + "Port": 17511 + }, + "Src": { + "IP": "12.96.14.61", + "Port": 42308 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692308-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 153 + ] + }, + { + "Addr": { + "IP": "142.26.87.52", + "Port": 41967 + }, + "Src": { + "IP": "60.124.157.139", + "Port": 20727 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692321-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 29 + ] + }, + { + "Addr": { + "IP": "13.77.198.44", + "Port": 54508 + }, + "Src": { + "IP": "142.73.70.174", + "Port": 19525 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692553-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 170 + ] + }, + { + "Addr": { + "IP": "63.192.219.12", + "Port": 46603 + }, + "Src": { + "IP": "26.136.66.29", + "Port": 38924 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692558-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 203 + ] + }, + { + "Addr": { + "IP": "120.82.251.151", + "Port": 43723 + }, + "Src": { + "IP": "136.104.122.219", + "Port": 47452 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692599-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 103 + ] + }, + { + "Addr": { + "IP": "74.79.96.159", + "Port": 46646 + }, + "Src": { + "IP": "218.60.242.116", + "Port": 5361 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692145-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 121 + ] + }, + { + "Addr": { + "IP": "194.65.211.174", + "Port": 43464 + }, + "Src": { + "IP": "87.5.112.153", + "Port": 56348 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692163-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 96 + ] + }, + { + "Addr": { + "IP": "237.158.179.80", + "Port": 32231 + }, + "Src": { + "IP": "210.240.52.244", + "Port": 29142 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692183-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 27 + ] + }, + { + "Addr": { + "IP": "81.157.122.4", + "Port": 9917 + }, + "Src": { + "IP": "213.226.144.152", + "Port": 29950 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692614-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 33 + ] + }, + { + "Addr": { + "IP": "180.147.73.220", + "Port": 367 + }, + "Src": { + "IP": "32.229.253.215", + "Port": 62165 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692529-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 240 + ] + }, + { + "Addr": { + "IP": "83.110.235.17", + "Port": 33231 + }, + "Src": { + "IP": "230.54.162.85", + "Port": 51569 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692563-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 234 + ] + }, + { + "Addr": { + "IP": "100.252.20.2", + "Port": 1633 + }, + "Src": { + "IP": "52.136.47.198", + "Port": 31916 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692644-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 254 + ] + }, + { + "Addr": { + "IP": "74.5.247.79", + "Port": 18703 + }, + "Src": { + "IP": "200.247.68.128", + "Port": 55844 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692378-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 160 + ] + }, + { + "Addr": { + "IP": "17.220.231.87", + "Port": 59015 + }, + "Src": { + "IP": "54.207.49.4", + "Port": 17877 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692404-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 21 + ] + }, + { + "Addr": { + "IP": "156.194.57.127", + "Port": 18944 + }, + "Src": { + "IP": "154.94.235.84", + "Port": 61610 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692439-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 32 + ] + }, + { + "Addr": { + "IP": "137.57.172.158", + "Port": 32031 + }, + "Src": { + "IP": "144.160.225.126", + "Port": 43225 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692568-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 4 + ] + }, + { + "Addr": { + "IP": "101.220.101.200", + "Port": 26480 + }, + "Src": { + "IP": "130.225.42.1", + "Port": 2522 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692637-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 173 + ] + }, + { + "Addr": { + "IP": "136.233.185.164", + "Port": 34011 + }, + "Src": { + "IP": "112.127.216.43", + "Port": 55317 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692649-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 106 + ] + }, + { + "Addr": { + "IP": "101.189.107.148", + "Port": 28671 + }, + "Src": { + "IP": "213.55.140.235", + "Port": 2547 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692178-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 72 + ] + }, + { + "Addr": { + "IP": "61.190.60.64", + "Port": 58467 + }, + "Src": { + "IP": "206.86.120.31", + "Port": 54422 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692358-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 191 + ] + }, + { + "Addr": { + "IP": "227.51.127.223", + "Port": 52754 + }, + "Src": { + "IP": "124.24.12.47", + "Port": 59878 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692393-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 122 + ] + }, + { + "Addr": { + "IP": "101.19.152.238", + "Port": 47491 + }, + "Src": { + "IP": "211.30.216.184", + "Port": 17610 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692135-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 251 + ] + }, + { + "Addr": { + "IP": "182.198.35.238", + "Port": 15065 + }, + "Src": { + "IP": "239.67.104.149", + "Port": 43039 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692268-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 179 + ] + }, + { + "Addr": { + "IP": "233.12.68.51", + "Port": 47544 + }, + "Src": { + "IP": "203.224.119.48", + "Port": 23337 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692454-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 51 + ] + }, + { + "Addr": { + "IP": "181.30.35.80", + "Port": 500 + }, + "Src": { + "IP": "174.200.32.161", + "Port": 10174 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692503-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 69 + ] + }, + { + "Addr": { + "IP": "49.104.89.21", + "Port": 54774 + }, + "Src": { + "IP": "245.95.238.161", + "Port": 14339 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692654-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 120 + ] + }, + { + "Addr": { + "IP": "65.150.169.199", + "Port": 11589 + }, + "Src": { + "IP": "150.110.183.207", + "Port": 17694 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692041-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 182 + ] + }, + { + "Addr": { + "IP": "84.203.198.48", + "Port": 47122 + }, + "Src": { + "IP": "141.209.147.221", + "Port": 26085 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692056-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 74 + ] + }, + { + "Addr": { + "IP": "220.10.106.180", + "Port": 27439 + }, + "Src": { + "IP": "124.170.244.46", + "Port": 5249 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692125-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 166 + ] + }, + { + "Addr": { + "IP": "120.208.32.34", + "Port": 27224 + }, + "Src": { + "IP": "64.194.118.103", + "Port": 24388 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.69251-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 149 + ] + }, + { + "Addr": { + "IP": "245.182.67.231", + "Port": 58067 + }, + "Src": { + "IP": "62.108.238.220", + "Port": 41851 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692522-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 201 + ] + }, + { + "Addr": { + "IP": "50.81.160.105", + "Port": 8113 + }, + "Src": { + "IP": "129.187.68.121", + "Port": 58612 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692284-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 94 + ] + }, + { + "Addr": { + "IP": "101.116.47.155", + "Port": 20287 + }, + "Src": { + "IP": "94.34.167.170", + "Port": 41821 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692299-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 134 + ] + }, + { + "Addr": { + "IP": "159.253.213.86", + "Port": 5222 + }, + "Src": { + "IP": "124.47.162.125", + "Port": 45742 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692429-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 25 + ] + }, + { + "Addr": { + "IP": "124.72.81.213", + "Port": 35723 + }, + "Src": { + "IP": "201.65.186.55", + "Port": 26602 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692493-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 199 + ] + }, + { + "Addr": { + "IP": "77.216.197.130", + "Port": 49129 + }, + "Src": { + "IP": "245.160.14.27", + "Port": 38908 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692517-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 58 + ] + }, + { + "Addr": { + "IP": "175.46.154.0", + "Port": 15297 + }, + "Src": { + "IP": "6.10.7.13", + "Port": 9657 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692583-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 89 + ] + }, + { + "Addr": { + "IP": "176.71.131.235", + "Port": 14342 + }, + "Src": { + "IP": "1.36.215.198", + "Port": 21709 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692206-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 135 + ] + }, + { + "Addr": { + "IP": "34.211.134.186", + "Port": 31608 + }, + "Src": { + "IP": "187.87.12.183", + "Port": 32977 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692221-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 71 + ] + }, + { + "Addr": { + "IP": "238.63.227.107", + "Port": 49502 + }, + "Src": { + "IP": "185.51.127.143", + "Port": 22728 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692483-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 61 + ] + }, + { + "Addr": { + "IP": "160.65.76.45", + "Port": 27307 + }, + "Src": { + "IP": "170.175.198.16", + "Port": 44759 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692051-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 36 + ] + }, + { + "Addr": { + "IP": "152.22.79.90", + "Port": 25861 + }, + "Src": { + "IP": "216.183.31.190", + "Port": 9185 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692409-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 163 + ] + }, + { + "Addr": { + "IP": "200.2.175.37", + "Port": 57270 + }, + "Src": { + "IP": "108.20.254.94", + "Port": 32812 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692434-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 96 + ] + }, + { + "Addr": { + "IP": "111.16.237.10", + "Port": 45200 + }, + "Src": { + "IP": "215.82.246.115", + "Port": 42333 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692469-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 21 + ] + }, + { + "Addr": { + "IP": "166.217.195.221", + "Port": 4579 + }, + "Src": { + "IP": "148.153.131.183", + "Port": 13848 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692498-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 78 + ] + }, + { + "Addr": { + "IP": "1.226.156.147", + "Port": 61660 + }, + "Src": { + "IP": "169.138.16.69", + "Port": 23455 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692548-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 121 + ] + }, + { + "Addr": { + "IP": "108.209.27.58", + "Port": 59102 + }, + "Src": { + "IP": "140.27.139.90", + "Port": 52154 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692014-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 53 + ] + }, + { + "Addr": { + "IP": "221.244.202.95", + "Port": 5032 + }, + "Src": { + "IP": "230.152.141.80", + "Port": 19457 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692168-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 106 + ] + }, + { + "Addr": { + "IP": "55.87.1.138", + "Port": 39686 + }, + "Src": { + "IP": "55.22.167.132", + "Port": 35663 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692258-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 174 + ] + }, + { + "Addr": { + "IP": "209.53.148.74", + "Port": 18502 + }, + "Src": { + "IP": "195.108.121.25", + "Port": 16730 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692304-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 180 + ] + }, + { + "Addr": { + "IP": "21.66.206.236", + "Port": 10771 + }, + "Src": { + "IP": "236.195.50.16", + "Port": 30697 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692368-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 22 + ] + }, + { + "Addr": { + "IP": "190.87.236.91", + "Port": 58378 + }, + "Src": { + "IP": "72.224.218.34", + "Port": 44817 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692459-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 127 + ] + }, + { + "Addr": { + "IP": "197.172.79.170", + "Port": 24958 + }, + "Src": { + "IP": "71.22.4.12", + "Port": 28558 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692036-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 251 + ] + }, + { + "Addr": { + "IP": "160.176.234.94", + "Port": 47013 + }, + "Src": { + "IP": "212.172.24.59", + "Port": 29594 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692062-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 99 + ] + }, + { + "Addr": { + "IP": "170.206.180.18", + "Port": 26212 + }, + "Src": { + "IP": "228.135.62.18", + "Port": 26164 + }, + "Attempts": 0, + "LastAttempt": "2017-11-07T01:11:34.692234-07:00", + "LastSuccess": "0001-01-01T00:00:00Z", + "BucketType": 1, + "Buckets": [ + 34 + ] + } + ] +} diff --git a/test/fuzz/p2p/secret_connection/init-corpus/main.go b/test/fuzz/p2p/secret_connection/init-corpus/main.go new file mode 100644 index 000000000..635f2d99f --- /dev/null +++ b/test/fuzz/p2p/secret_connection/init-corpus/main.go @@ -0,0 +1,48 @@ +// nolint: gosec +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" +) + +func main() { + baseDir := flag.String("base", ".", `where the "corpus" directory will live`) + flag.Parse() + + initCorpus(*baseDir) +} + +func initCorpus(baseDir string) { + log.SetFlags(0) + + corpusDir := filepath.Join(baseDir, "corpus") + if err := os.MkdirAll(corpusDir, 0755); err != nil { + log.Fatal(err) + } + + data := []string{ + "dadc04c2-cfb1-4aa9-a92a-c0bf780ec8b6", + "", + " ", + " a ", + `{"a": 12, "tsp": 999, k: "blue"}`, + `9999.999`, + `""`, + `Tendermint fuzzing`, + } + + for i, datum := range data { + filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i)) + + if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil { + log.Fatalf("can't write %v to %q: %v", datum, filename, err) + } + + log.Printf("wrote %q", filename) + } +} diff --git a/test/fuzz/p2p/secret_connection/read_write.go b/test/fuzz/p2p/secret_connection/read_write.go new file mode 100644 index 000000000..632790002 --- /dev/null +++ b/test/fuzz/p2p/secret_connection/read_write.go @@ -0,0 +1,107 @@ +package secretconnection + +import ( + "bytes" + "fmt" + "io" + "log" + + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/libs/async" + sc "github.com/tendermint/tendermint/p2p/conn" +) + +func Fuzz(data []byte) int { + if len(data) == 0 { + return -1 + } + + fooConn, barConn := makeSecretConnPair() + n, err := fooConn.Write(data) + if err != nil { + panic(err) + } + dataRead := make([]byte, n) + m, err := barConn.Read(dataRead) + if err != nil { + panic(err) + } + if !bytes.Equal(data[:n], dataRead[:m]) { + panic(fmt.Sprintf("bytes written %X != read %X", data[:n], dataRead[:m])) + } + return 1 +} + +type kvstoreConn struct { + *io.PipeReader + *io.PipeWriter +} + +func (drw kvstoreConn) Close() (err error) { + err2 := drw.PipeWriter.CloseWithError(io.EOF) + err1 := drw.PipeReader.Close() + if err2 != nil { + return err + } + return err1 +} + +// Each returned ReadWriteCloser is akin to a net.Connection +func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { + barReader, fooWriter := io.Pipe() + fooReader, barWriter := io.Pipe() + return kvstoreConn{fooReader, fooWriter}, kvstoreConn{barReader, barWriter} +} + +func makeSecretConnPair() (fooSecConn, barSecConn *sc.SecretConnection) { + var ( + fooConn, barConn = makeKVStoreConnPair() + fooPrvKey = ed25519.GenPrivKey() + fooPubKey = fooPrvKey.PubKey() + barPrvKey = ed25519.GenPrivKey() + barPubKey = barPrvKey.PubKey() + ) + + // Make connections from both sides in parallel. + var trs, ok = async.Parallel( + func(_ int) (val interface{}, abort bool, err error) { + fooSecConn, err = sc.MakeSecretConnection(fooConn, fooPrvKey) + if err != nil { + log.Printf("failed to establish SecretConnection for foo: %v", err) + return nil, true, err + } + remotePubBytes := fooSecConn.RemotePubKey() + if !remotePubBytes.Equals(barPubKey) { + err = fmt.Errorf("unexpected fooSecConn.RemotePubKey. Expected %v, got %v", + barPubKey, fooSecConn.RemotePubKey()) + log.Print(err) + return nil, true, err + } + return nil, false, nil + }, + func(_ int) (val interface{}, abort bool, err error) { + barSecConn, err = sc.MakeSecretConnection(barConn, barPrvKey) + if barSecConn == nil { + log.Printf("failed to establish SecretConnection for bar: %v", err) + return nil, true, err + } + remotePubBytes := barSecConn.RemotePubKey() + if !remotePubBytes.Equals(fooPubKey) { + err = fmt.Errorf("unexpected barSecConn.RemotePubKey. Expected %v, got %v", + fooPubKey, barSecConn.RemotePubKey()) + log.Print(err) + return nil, true, err + } + return nil, false, nil + }, + ) + + if trs.FirstError() != nil { + log.Fatalf("unexpected error: %v", trs.FirstError()) + } + if !ok { + log.Fatal("Unexpected task abortion") + } + + return fooSecConn, barSecConn +} diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go new file mode 100644 index 000000000..98c75d511 --- /dev/null +++ b/test/fuzz/rpc/jsonrpc/server/handler.go @@ -0,0 +1,44 @@ +package handler + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + + "github.com/tendermint/tendermint/libs/log" + rs "github.com/tendermint/tendermint/rpc/jsonrpc/server" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" +) + +var rpcFuncMap = map[string]*rs.RPCFunc{ + "c": rs.NewRPCFunc(func(s string, i int) (string, int) { return "foo", 200 }, "s,i"), +} +var mux *http.ServeMux + +func init() { + mux := http.NewServeMux() + buf := new(bytes.Buffer) + lgr := log.NewTMLogger(buf) + rs.RegisterRPCFuncs(mux, rpcFuncMap, lgr) +} + +func Fuzz(data []byte) int { + req, _ := http.NewRequest("POST", "http://localhost/", bytes.NewReader(data)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + blob, err := ioutil.ReadAll(res.Body) + if err != nil { + panic(err) + } + if err := res.Body.Close(); err != nil { + panic(err) + } + recv := new(types.RPCResponse) + if err := json.Unmarshal(blob, recv); err != nil { + panic(err) + } + return 1 +} diff --git a/test/maverick/consensus/replay.go b/test/maverick/consensus/replay.go index 9e393fbda..6f4dabdcb 100644 --- a/test/maverick/consensus/replay.go +++ b/test/maverick/consensus/replay.go @@ -255,7 +255,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { h.logger.Info("ABCI Handshake App Info", "height", blockHeight, - "hash", fmt.Sprintf("%X", appHash), + "hash", appHash, "software-version", res.Version, "protocol-version", res.AppVersion, ) @@ -272,7 +272,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", - "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + "appHeight", blockHeight, "appHash", appHash) // TODO: (on restart) replay mempool diff --git a/test/maverick/main.go b/test/maverick/main.go index 5a6dcfaf4..2da4a4c90 100644 --- a/test/maverick/main.go +++ b/test/maverick/main.go @@ -35,7 +35,7 @@ func init() { } func registerFlagsRootCmd(command *cobra.Command) { - command.PersistentFlags().String("log_level", config.LogLevel, "Log level") + command.PersistentFlags().String("log-level", config.LogLevel, "Log level") } func ParseConfig() (*cfg.Config, error) { diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go index 14937a8ce..14f8896ae 100644 --- a/test/maverick/node/node.go +++ b/test/maverick/node/node.go @@ -795,8 +795,11 @@ func NewNode(config *cfg.Config, logNodeStartupInfo(state, pubKey, logger, consensusLogger) // TODO: Fetch and provide real options and do proper p2p bootstrapping. - peerMgr := p2p.NewPeerManager(p2p.PeerManagerOptions{}) - + // TODO: Use a persistent peer database. + peerMgr, err := p2p.NewPeerManager(dbm.NewMemDB(), p2p.PeerManagerOptions{}) + if err != nil { + return nil, err + } csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) mpReactorShim, mpReactor, mempool := createMempoolReactor(config, proxyApp, state, memplMetrics, peerMgr, logger)